blob: a1ed96a7a46247b220fcf6517345abf6cc0c12b6 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Andreas Zieglerea6eb5e2019-01-17 14:30:23 +01008#define pr_fmt(fmt) "trace_uprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040010#include <linux/security.h>
Masami Hiramatsu0597c492018-11-05 18:03:04 +090011#include <linux/ctype.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053012#include <linux/module.h>
13#include <linux/uaccess.h>
14#include <linux/uprobes.h>
15#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080016#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010017#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053018
Masami Hiramatsu0597c492018-11-05 18:03:04 +090019#include "trace_dynevent.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053020#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090021#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053022
23#define UPROBE_EVENT_SYSTEM "uprobes"
24
Oleg Nesterov457d1772013-03-29 18:26:51 +010025struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28};
29
30#define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34#define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
Masami Hiramatsu0597c492018-11-05 18:03:04 +090037static int trace_uprobe_create(int argc, const char **argv);
38static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39static int trace_uprobe_release(struct dyn_event *ev);
40static bool trace_uprobe_is_busy(struct dyn_event *ev);
41static bool trace_uprobe_match(const char *system, const char *event,
Masami Hiramatsu30199132019-06-20 00:07:39 +090042 int argc, const char **argv, struct dyn_event *ev);
Masami Hiramatsu0597c492018-11-05 18:03:04 +090043
44static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
50};
51
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052/*
53 * uprobe event core functions
54 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053055struct trace_uprobe {
Masami Hiramatsu0597c492018-11-05 18:03:04 +090056 struct dyn_event devent;
Oleg Nesterova932b732013-01-31 19:47:23 +010057 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070058 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053059 struct inode *inode;
60 char *filename;
61 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053062 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053063 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090064 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065};
66
Masami Hiramatsu0597c492018-11-05 18:03:04 +090067static bool is_trace_uprobe(struct dyn_event *ev)
68{
69 return ev->ops == &trace_uprobe_ops;
70}
71
72static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73{
74 return container_of(ev, struct trace_uprobe, devent);
75}
76
77/**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
81 */
82#define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
85
Namhyung Kim14577c32013-07-03 15:42:53 +090086#define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053088 (sizeof(struct probe_arg) * (n)))
89
90static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040091static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053092
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090093struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
96};
97
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053098static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010099static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530101
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900102#ifdef CONFIG_STACK_GROWSUP
103static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
104{
105 return addr - (n * sizeof(long));
106}
107#else
108static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
109{
110 return addr + (n * sizeof(long));
111}
112#endif
113
114static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
115{
116 unsigned long ret;
117 unsigned long addr = user_stack_pointer(regs);
118
119 addr = adjust_stack_addr(addr, n);
120
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
122 return 0;
123
124 return ret;
125}
126
127/*
128 * Uprobes-specific fetch functions
129 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900130static nokprobe_inline int
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900131probe_mem_read(void *dest, void *src, size_t size)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900132{
133 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900134
Masami Hiramatsuf3f58932018-08-29 01:17:47 +0900135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900136}
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +0900137
138static nokprobe_inline int
139probe_mem_read_user(void *dest, void *src, size_t size)
140{
141 return probe_mem_read(dest, src, size);
142}
143
Namhyung Kim5baaa592013-11-26 15:21:04 +0900144/*
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
147 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900148static nokprobe_inline int
149fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900150{
151 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900155 void __user *src = (void __force __user *) addr;
156
Masami Hiramatsu91784122018-04-25 21:19:01 +0900157 if (unlikely(!maxlen))
158 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900159
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
162 else
163 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900164 if (ret >= 0) {
165 if (ret == maxlen)
166 dst[ret - 1] = '\0';
Andreas Ziegler07220692019-01-16 15:16:29 +0100167 else
168 /*
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
171 * for in ret.
172 */
173 ret++;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900175 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900176
177 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900178}
179
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900180static nokprobe_inline int
181fetch_store_string_user(unsigned long addr, void *dest, void *base)
182{
183 return fetch_store_string(addr, dest, base);
184}
185
Masami Hiramatsu53305922018-04-25 21:18:03 +0900186/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900187static nokprobe_inline int
188fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900189{
190 int len;
191 void __user *vaddr = (void __force __user *) addr;
192
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
195 else
196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900197
Masami Hiramatsu91784122018-04-25 21:19:01 +0900198 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900199}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900200
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900201static nokprobe_inline int
202fetch_store_strlen_user(unsigned long addr)
203{
204 return fetch_store_strlen(addr);
205}
206
Masami Hiramatsu53305922018-04-25 21:18:03 +0900207static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900208{
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
211
212 udd = (void *) current->utask->vaddr;
213
214 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900215 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900216}
217
Masami Hiramatsu53305922018-04-25 21:18:03 +0900218/* Note that we don't verify it, since the code does not come from user space */
219static int
220process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900221 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900222{
223 unsigned long val;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900224
225 /* 1st stage: get value from context */
226 switch (code->op) {
227 case FETCH_OP_REG:
228 val = regs_get_register(regs, code->param);
229 break;
230 case FETCH_OP_STACK:
231 val = get_user_stack_nth(regs, code->param);
232 break;
233 case FETCH_OP_STACKP:
234 val = user_stack_pointer(regs);
235 break;
236 case FETCH_OP_RETVAL:
237 val = regs_return_value(regs);
238 break;
239 case FETCH_OP_IMM:
240 val = code->immediate;
241 break;
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900242 case FETCH_OP_COMM:
243 val = FETCH_TOKEN_COMM;
244 break;
Masami Hiramatsua42e3c42019-06-20 00:08:37 +0900245 case FETCH_OP_DATA:
246 val = (unsigned long)code->data;
247 break;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900248 case FETCH_OP_FOFFS:
249 val = translate_user_vaddr(code->immediate);
250 break;
251 default:
252 return -EILSEQ;
253 }
254 code++;
255
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900256 return process_fetch_insn_bottom(code, val, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900257}
258NOKPROBE_SYMBOL(process_fetch_insn)
259
Oleg Nesterov736288b2013-02-03 20:58:35 +0100260static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
261{
262 rwlock_init(&filter->rwlock);
263 filter->nr_systemwide = 0;
264 INIT_LIST_HEAD(&filter->perf_events);
265}
266
267static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
268{
269 return !filter->nr_systemwide && list_empty(&filter->perf_events);
270}
271
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100272static inline bool is_ret_probe(struct trace_uprobe *tu)
273{
274 return tu->consumer.ret_handler != NULL;
275}
276
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900277static bool trace_uprobe_is_busy(struct dyn_event *ev)
278{
279 struct trace_uprobe *tu = to_trace_uprobe(ev);
280
281 return trace_probe_is_enabled(&tu->tp);
282}
283
Masami Hiramatsuab10d692019-06-20 00:08:18 +0900284static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285 int argc, const char **argv)
286{
287 char buf[MAX_ARGSTR_LEN + 1];
288 int len;
289
290 if (!argc)
291 return true;
292
293 len = strlen(tu->filename);
294 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 return false;
296
297 if (tu->ref_ctr_offset == 0)
298 snprintf(buf, sizeof(buf), "0x%0*lx",
299 (int)(sizeof(void *) * 2), tu->offset);
300 else
301 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302 (int)(sizeof(void *) * 2), tu->offset,
303 tu->ref_ctr_offset);
304 if (strcmp(buf, &argv[0][len + 1]))
305 return false;
306
307 argc--; argv++;
308
309 return trace_probe_match_command_args(&tu->tp, argc, argv);
310}
311
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900312static bool trace_uprobe_match(const char *system, const char *event,
Masami Hiramatsu30199132019-06-20 00:07:39 +0900313 int argc, const char **argv, struct dyn_event *ev)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900314{
315 struct trace_uprobe *tu = to_trace_uprobe(ev);
316
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900317 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
Masami Hiramatsuab10d692019-06-20 00:08:18 +0900318 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319 trace_uprobe_match_command_head(tu, argc, argv);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900320}
321
Masami Hiramatsu60d53e22019-06-20 00:07:20 +0900322static nokprobe_inline struct trace_uprobe *
323trace_uprobe_primary_from_call(struct trace_event_call *call)
324{
325 struct trace_probe *tp;
326
327 tp = trace_probe_primary_from_call(call);
328 if (WARN_ON_ONCE(!tp))
329 return NULL;
330
331 return container_of(tp, struct trace_uprobe, tp);
332}
333
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530334/*
335 * Allocate new trace_uprobe and initialize it (including uprobes).
336 */
337static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100338alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530339{
340 struct trace_uprobe *tu;
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900341 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530342
343 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
344 if (!tu)
345 return ERR_PTR(-ENOMEM);
346
Masami Hiramatsub61387c2020-01-22 12:23:25 +0900347 ret = trace_probe_init(&tu->tp, event, group, true);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900348 if (ret < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530349 goto error;
350
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900351 dyn_event_init(&tu->devent, &trace_uprobe_ops);
Oleg Nesterova932b732013-01-31 19:47:23 +0100352 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100353 if (is_ret)
354 tu->consumer.ret_handler = uretprobe_dispatcher;
Masami Hiramatsub61387c2020-01-22 12:23:25 +0900355 init_trace_uprobe_filter(tu->tp.event->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530356 return tu;
357
358error:
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530359 kfree(tu);
360
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900361 return ERR_PTR(ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530362}
363
364static void free_trace_uprobe(struct trace_uprobe *tu)
365{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900366 if (!tu)
367 return;
368
Song Liu0c92c7a2018-04-23 10:21:34 -0700369 path_put(&tu->path);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900370 trace_probe_cleanup(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530371 kfree(tu->filename);
372 kfree(tu);
373}
374
375static struct trace_uprobe *find_probe_event(const char *event, const char *group)
376{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900377 struct dyn_event *pos;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530378 struct trace_uprobe *tu;
379
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900380 for_each_trace_uprobe(tu, pos)
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900381 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530383 return tu;
384
385 return NULL;
386}
387
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900388/* Unregister a trace_uprobe and probe_event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400389static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530390{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400391 int ret;
392
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900393 if (trace_probe_has_sibling(&tu->tp))
394 goto unreg;
395
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400396 ret = unregister_uprobe_event(tu);
397 if (ret)
398 return ret;
399
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900400unreg:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900401 dyn_event_remove(&tu->devent);
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900402 trace_probe_unlink(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530403 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400404 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530405}
406
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900407static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
408 struct trace_uprobe *comp)
409{
410 struct trace_probe_event *tpe = orig->tp.event;
411 struct trace_probe *pos;
412 struct inode *comp_inode = d_real_inode(comp->path.dentry);
413 int i;
414
415 list_for_each_entry(pos, &tpe->probes, list) {
416 orig = container_of(pos, struct trace_uprobe, tp);
417 if (comp_inode != d_real_inode(orig->path.dentry) ||
418 comp->offset != orig->offset)
419 continue;
420
421 /*
422 * trace_probe_compare_arg_type() ensured that nr_args and
423 * each argument name and type are same. Let's compare comm.
424 */
425 for (i = 0; i < orig->tp.nr_args; i++) {
426 if (strcmp(orig->tp.args[i].comm,
427 comp->tp.args[i].comm))
Srikar Dronamrajuf8d7ab22019-09-24 17:19:06 +0530428 break;
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900429 }
430
Srikar Dronamrajuf8d7ab22019-09-24 17:19:06 +0530431 if (i == orig->tp.nr_args)
432 return true;
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900433 }
434
435 return false;
436}
437
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900438static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
439{
440 int ret;
441
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900442 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
443 if (ret) {
444 /* Note that argument starts index = 2 */
445 trace_probe_log_set_index(ret + 1);
446 trace_probe_log_err(0, DIFF_ARG_TYPE);
447 return -EEXIST;
448 }
449 if (trace_uprobe_has_same_uprobe(to, tu)) {
450 trace_probe_log_set_index(0);
451 trace_probe_log_err(0, SAME_PROBE);
452 return -EEXIST;
453 }
454
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900455 /* Append to existing event */
456 ret = trace_probe_append(&tu->tp, &to->tp);
457 if (!ret)
458 dyn_event_add(&tu->devent);
459
460 return ret;
461}
462
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530463/*
464 * Uprobe with multiple reference counter is not allowed. i.e.
465 * If inode and offset matches, reference counter offset *must*
466 * match as well. Though, there is one exception: If user is
467 * replacing old trace_uprobe with new one(same group/event),
468 * then we allow same uprobe with new reference counter as far
469 * as the new one does not conflict with any other existing
470 * ones.
471 */
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900472static int validate_ref_ctr_offset(struct trace_uprobe *new)
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530473{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900474 struct dyn_event *pos;
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900475 struct trace_uprobe *tmp;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530476 struct inode *new_inode = d_real_inode(new->path.dentry);
477
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900478 for_each_trace_uprobe(tmp, pos) {
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900479 if (new_inode == d_real_inode(tmp->path.dentry) &&
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530480 new->offset == tmp->offset &&
481 new->ref_ctr_offset != tmp->ref_ctr_offset) {
482 pr_warn("Reference counter offset mismatch.");
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900483 return -EINVAL;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530484 }
485 }
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900486 return 0;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530487}
488
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530489/* Register a trace_uprobe and probe_event */
490static int register_trace_uprobe(struct trace_uprobe *tu)
491{
Namhyung Kim14577c32013-07-03 15:42:53 +0900492 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530493 int ret;
494
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900495 mutex_lock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530496
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900497 ret = validate_ref_ctr_offset(tu);
498 if (ret)
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530499 goto end;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530500
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900501 /* register as an event */
502 old_tu = find_probe_event(trace_probe_name(&tu->tp),
503 trace_probe_group_name(&tu->tp));
Namhyung Kim14577c32013-07-03 15:42:53 +0900504 if (old_tu) {
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900505 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506 trace_probe_log_set_index(0);
507 trace_probe_log_err(0, DIFF_PROBE_TYPE);
508 ret = -EEXIST;
509 } else {
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900510 ret = append_trace_uprobe(tu, old_tu);
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900511 }
512 goto end;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400513 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530514
515 ret = register_uprobe_event(tu);
516 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700517 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530518 goto end;
519 }
520
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900521 dyn_event_add(&tu->devent);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530522
523end:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900524 mutex_unlock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530525
526 return ret;
527}
528
529/*
530 * Argument syntax:
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +0900531 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530532 */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900533static int trace_uprobe_create(int argc, const char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530534{
535 struct trace_uprobe *tu;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900536 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
537 char *arg, *filename, *rctr, *rctr_end, *tmp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530538 char buf[MAX_EVENT_NAME_LEN];
539 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530540 unsigned long offset, ref_ctr_offset;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900541 bool is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530542 int i, ret;
543
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530544 ret = 0;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530545 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530546
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900547 switch (argv[0][0]) {
548 case 'r':
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100549 is_return = true;
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900550 break;
551 case 'p':
552 break;
553 default:
554 return -ECANCELED;
555 }
556
557 if (argc < 2)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900558 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530559
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900560 if (argv[0][1] == ':')
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530561 event = &argv[0][2];
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530562
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900563 if (!strchr(argv[1], '/'))
564 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530565
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900566 filename = kstrdup(argv[1], GFP_KERNEL);
567 if (!filename)
568 return -ENOMEM;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530569
Kenny Yu6496bb72017-01-13 08:58:34 -0800570 /* Find the last occurrence, in case the path contains ':' too. */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900571 arg = strrchr(filename, ':');
572 if (!arg || !isdigit(arg[1])) {
573 kfree(filename);
574 return -ECANCELED;
575 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530576
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500577 trace_probe_log_init("trace_uprobe", argc, argv);
578 trace_probe_log_set_index(1); /* filename is the 2nd argument */
579
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530580 *arg++ = '\0';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530581 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900582 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500583 trace_probe_log_err(0, FILE_NOT_FOUND);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900584 kfree(filename);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500585 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700586 return ret;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900587 }
Song Liu0c92c7a2018-04-23 10:21:34 -0700588 if (!d_is_reg(path.dentry)) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500589 trace_probe_log_err(0, NO_REGULAR_FILE);
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800590 ret = -EINVAL;
591 goto fail_address_parse;
592 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530593
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530594 /* Parse reference counter offset if specified. */
595 rctr = strchr(arg, '(');
596 if (rctr) {
597 rctr_end = strchr(rctr, ')');
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500598 if (!rctr_end) {
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530599 ret = -EINVAL;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500600 rctr_end = rctr + strlen(rctr);
601 trace_probe_log_err(rctr_end - filename,
602 REFCNT_OPEN_BRACE);
603 goto fail_address_parse;
604 } else if (rctr_end[1] != '\0') {
605 ret = -EINVAL;
606 trace_probe_log_err(rctr_end + 1 - filename,
607 BAD_REFCNT_SUFFIX);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530608 goto fail_address_parse;
609 }
610
611 *rctr++ = '\0';
612 *rctr_end = '\0';
613 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
614 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500615 trace_probe_log_err(rctr - filename, BAD_REFCNT);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530616 goto fail_address_parse;
617 }
618 }
619
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +0900620 /* Check if there is %return suffix */
621 tmp = strchr(arg, '%');
622 if (tmp) {
623 if (!strcmp(tmp, "%return")) {
624 *tmp = '\0';
625 is_return = true;
626 } else {
627 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
628 ret = -EINVAL;
629 goto fail_address_parse;
630 }
631 }
632
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530633 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100634 ret = kstrtoul(arg, 0, &offset);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500635 if (ret) {
636 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100637 goto fail_address_parse;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500638 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530639
640 /* setup a probe */
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500641 trace_probe_log_set_index(0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900642 if (event) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500643 ret = traceprobe_parse_event_name(&event, &group, buf,
644 event - argv[0]);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900645 if (ret)
646 goto fail_address_parse;
647 } else {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800648 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530649 char *ptr;
650
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800651 tail = kstrdup(kbasename(filename), GFP_KERNEL);
652 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530653 ret = -ENOMEM;
654 goto fail_address_parse;
655 }
656
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530657 ptr = strpbrk(tail, ".-_");
658 if (ptr)
659 *ptr = '\0';
660
661 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
662 event = buf;
663 kfree(tail);
664 }
665
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500666 argc -= 2;
667 argv += 2;
668
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100669 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530670 if (IS_ERR(tu)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530671 ret = PTR_ERR(tu);
Masami Hiramatsua0394802019-03-14 13:30:50 +0900672 /* This must return -ENOMEM otherwise there is a bug */
673 WARN_ON_ONCE(ret != -ENOMEM);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530674 goto fail_address_parse;
675 }
676 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530677 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700678 tu->path = path;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900679 tu->filename = filename;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530680
681 /* parse arguments */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530682 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900683 tmp = kstrdup(argv[i], GFP_KERNEL);
684 if (!tmp) {
685 ret = -ENOMEM;
686 goto error;
687 }
688
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500689 trace_probe_log_set_index(i + 2);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900690 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
Masami Hiramatsua1303af2018-04-25 21:21:26 +0900691 is_return ? TPARG_FL_RETURN : 0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900692 kfree(tmp);
Masami Hiramatsud00bbea92018-11-05 18:01:40 +0900693 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530694 goto error;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530695 }
696
Masami Hiramatsub4d4b962019-06-01 00:16:56 +0900697 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
698 if (ret < 0)
699 goto error;
700
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530701 ret = register_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500702 if (!ret)
703 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530704
705error:
706 free_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500707out:
708 trace_probe_log_clear();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530709 return ret;
710
711fail_address_parse:
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500712 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700713 path_put(&path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900714 kfree(filename);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530715
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530716 return ret;
717}
718
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900719static int create_or_delete_trace_uprobe(int argc, char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530720{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900721 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530722
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900723 if (argv[0][0] == '-')
724 return dyn_event_release(argc, argv, &trace_uprobe_ops);
725
726 ret = trace_uprobe_create(argc, (const char **)argv);
727 return ret == -ECANCELED ? -EINVAL : ret;
728}
729
730static int trace_uprobe_release(struct dyn_event *ev)
731{
732 struct trace_uprobe *tu = to_trace_uprobe(ev);
733
734 return unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530735}
736
737/* Probes listing interfaces */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900738static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530739{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900740 struct trace_uprobe *tu = to_trace_uprobe(ev);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100741 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530742 int i;
743
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900744 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
745 trace_probe_name(&tu->tp), tu->filename,
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530746 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530747
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530748 if (tu->ref_ctr_offset)
749 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
750
Namhyung Kim14577c32013-07-03 15:42:53 +0900751 for (i = 0; i < tu->tp.nr_args; i++)
752 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530753
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100754 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530755 return 0;
756}
757
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900758static int probes_seq_show(struct seq_file *m, void *v)
759{
760 struct dyn_event *ev = v;
761
762 if (!is_trace_uprobe(ev))
763 return 0;
764
765 return trace_uprobe_show(m, ev);
766}
767
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530768static const struct seq_operations probes_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900769 .start = dyn_event_seq_start,
770 .next = dyn_event_seq_next,
771 .stop = dyn_event_seq_stop,
772 .show = probes_seq_show
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530773};
774
775static int probes_open(struct inode *inode, struct file *file)
776{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400777 int ret;
778
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400779 ret = security_locked_down(LOCKDOWN_TRACEFS);
780 if (ret)
781 return ret;
782
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400783 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900784 ret = dyn_events_release_all(&trace_uprobe_ops);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400785 if (ret)
786 return ret;
787 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530788
789 return seq_open(file, &probes_seq_op);
790}
791
792static ssize_t probes_write(struct file *file, const char __user *buffer,
793 size_t count, loff_t *ppos)
794{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900795 return trace_parse_run_command(file, buffer, count, ppos,
796 create_or_delete_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530797}
798
799static const struct file_operations uprobe_events_ops = {
800 .owner = THIS_MODULE,
801 .open = probes_open,
802 .read = seq_read,
803 .llseek = seq_lseek,
804 .release = seq_release,
805 .write = probes_write,
806};
807
808/* Probes profiling interfaces */
809static int probes_profile_seq_show(struct seq_file *m, void *v)
810{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900811 struct dyn_event *ev = v;
812 struct trace_uprobe *tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530813
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900814 if (!is_trace_uprobe(ev))
815 return 0;
816
817 tu = to_trace_uprobe(ev);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400818 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900819 trace_probe_name(&tu->tp), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530820 return 0;
821}
822
823static const struct seq_operations profile_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900824 .start = dyn_event_seq_start,
825 .next = dyn_event_seq_next,
826 .stop = dyn_event_seq_stop,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530827 .show = probes_profile_seq_show
828};
829
830static int profile_open(struct inode *inode, struct file *file)
831{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400832 int ret;
833
834 ret = security_locked_down(LOCKDOWN_TRACEFS);
835 if (ret)
836 return ret;
837
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530838 return seq_open(file, &profile_seq_op);
839}
840
841static const struct file_operations uprobe_profile_ops = {
842 .owner = THIS_MODULE,
843 .open = profile_open,
844 .read = seq_read,
845 .llseek = seq_lseek,
846 .release = seq_release,
847};
848
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900849struct uprobe_cpu_buffer {
850 struct mutex mutex;
851 void *buf;
852};
853static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
854static int uprobe_buffer_refcnt;
855
856static int uprobe_buffer_init(void)
857{
858 int cpu, err_cpu;
859
860 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
861 if (uprobe_cpu_buffer == NULL)
862 return -ENOMEM;
863
864 for_each_possible_cpu(cpu) {
865 struct page *p = alloc_pages_node(cpu_to_node(cpu),
866 GFP_KERNEL, 0);
867 if (p == NULL) {
868 err_cpu = cpu;
869 goto err;
870 }
871 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
872 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
873 }
874
875 return 0;
876
877err:
878 for_each_possible_cpu(cpu) {
879 if (cpu == err_cpu)
880 break;
881 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
882 }
883
884 free_percpu(uprobe_cpu_buffer);
885 return -ENOMEM;
886}
887
888static int uprobe_buffer_enable(void)
889{
890 int ret = 0;
891
892 BUG_ON(!mutex_is_locked(&event_mutex));
893
894 if (uprobe_buffer_refcnt++ == 0) {
895 ret = uprobe_buffer_init();
896 if (ret < 0)
897 uprobe_buffer_refcnt--;
898 }
899
900 return ret;
901}
902
903static void uprobe_buffer_disable(void)
904{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800905 int cpu;
906
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900907 BUG_ON(!mutex_is_locked(&event_mutex));
908
909 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800910 for_each_possible_cpu(cpu)
911 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
912 cpu)->buf);
913
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900914 free_percpu(uprobe_cpu_buffer);
915 uprobe_cpu_buffer = NULL;
916 }
917}
918
919static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
920{
921 struct uprobe_cpu_buffer *ucb;
922 int cpu;
923
924 cpu = raw_smp_processor_id();
925 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
926
927 /*
928 * Use per-cpu buffers for fastest access, but we might migrate
929 * so the mutex makes sure we have sole access to it.
930 */
931 mutex_lock(&ucb->mutex);
932
933 return ucb;
934}
935
936static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
937{
938 mutex_unlock(&ucb->mutex);
939}
940
Namhyung Kima43b9702014-01-17 17:08:36 +0900941static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900942 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900943 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400944 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530945{
946 struct uprobe_trace_entry_head *entry;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500947 struct trace_buffer *buffer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530948 struct ring_buffer_event *event;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100949 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900950 int size, esize;
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +0900951 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530952
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400953 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900954
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900955 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100956 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530957
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400958 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900959 return;
960
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900961 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900962 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400963 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100964 call->event.type, size, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900965 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900966 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900967
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530968 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100969 if (is_ret_probe(tu)) {
970 entry->vaddr[0] = func;
971 entry->vaddr[1] = instruction_pointer(regs);
972 data = DATAOF_TRACE_ENTRY(entry, true);
973 } else {
974 entry->vaddr[0] = instruction_pointer(regs);
975 data = DATAOF_TRACE_ENTRY(entry, false);
976 }
977
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900978 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530979
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100980 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100981}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100982
Oleg Nesterova51cc602013-03-30 18:02:12 +0100983/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900984static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
985 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100986{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900987 struct event_file_link *link;
988
989 if (is_ret_probe(tu))
990 return 0;
991
992 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900993 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900994 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
995 rcu_read_unlock();
996
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100997 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530998}
999
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001000static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001001 struct pt_regs *regs,
1002 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001003{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001004 struct event_file_link *link;
1005
1006 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001007 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001008 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1009 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001010}
1011
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301012/* Event entry printers */
1013static enum print_line_t
1014print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1015{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001016 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301017 struct trace_seq *s = &iter->seq;
1018 struct trace_uprobe *tu;
1019 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301020
Oleg Nesterov457d1772013-03-29 18:26:51 +01001021 entry = (struct uprobe_trace_entry_head *)iter->ent;
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001022 tu = trace_uprobe_primary_from_call(
1023 container_of(event, struct trace_event_call, event));
1024 if (unlikely(!tu))
1025 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301026
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001027 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001028 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +09001029 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001030 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001031 data = DATAOF_TRACE_ENTRY(entry, true);
1032 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001033 trace_seq_printf(s, "%s: (0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +09001034 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001035 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001036 data = DATAOF_TRACE_ENTRY(entry, false);
1037 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301038
Masami Hiramatsu56de7632018-04-25 21:16:36 +09001039 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1040 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301041
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001042 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301043
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001044 out:
1045 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301046}
1047
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001048typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1049 enum uprobe_filter_ctx ctx,
1050 struct mm_struct *mm);
1051
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001052static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301053{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001054 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301055
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001056 tu->consumer.filter = filter;
1057 tu->inode = d_real_inode(tu->path.dentry);
1058
1059 if (tu->ref_ctr_offset)
1060 ret = uprobe_register_refctr(tu->inode, tu->offset,
1061 tu->ref_ctr_offset, &tu->consumer);
1062 else
1063 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1064
1065 if (ret)
1066 tu->inode = NULL;
1067
1068 return ret;
1069}
1070
1071static void __probe_event_disable(struct trace_probe *tp)
1072{
1073 struct trace_probe *pos;
1074 struct trace_uprobe *tu;
1075
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001076 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001077 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001078
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001079 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1080 tu = container_of(pos, struct trace_uprobe, tp);
1081 if (!tu->inode)
1082 continue;
1083
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001084 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1085 tu->inode = NULL;
1086 }
1087}
1088
1089static int probe_event_enable(struct trace_event_call *call,
1090 struct trace_event_file *file, filter_func_t filter)
1091{
1092 struct trace_probe *pos, *tp;
1093 struct trace_uprobe *tu;
1094 bool enabled;
1095 int ret;
1096
1097 tp = trace_probe_primary_from_call(call);
1098 if (WARN_ON_ONCE(!tp))
1099 return -ENODEV;
1100 enabled = trace_probe_is_enabled(tp);
1101
1102 /* This may also change "enabled" state */
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001103 if (file) {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001104 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
Oleg Nesterov48212542014-06-27 19:01:36 +02001105 return -EINTR;
1106
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001107 ret = trace_probe_add_file(tp, file);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001108 if (ret < 0)
1109 return ret;
Oleg Nesterov48212542014-06-27 19:01:36 +02001110 } else {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001111 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
Oleg Nesterov48212542014-06-27 19:01:36 +02001112 return -EINTR;
1113
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001114 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
Oleg Nesterov48212542014-06-27 19:01:36 +02001115 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301116
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001117 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001118 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Oleg Nesterov736288b2013-02-03 20:58:35 +01001119
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001120 if (enabled)
1121 return 0;
1122
Oleg Nesterovfb6bab62014-06-27 19:01:46 +02001123 ret = uprobe_buffer_enable();
1124 if (ret)
1125 goto err_flags;
1126
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001127 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1128 tu = container_of(pos, struct trace_uprobe, tp);
1129 ret = trace_uprobe_enable(tu, filter);
1130 if (ret) {
1131 __probe_event_disable(tp);
1132 goto err_buffer;
1133 }
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301134 }
1135
Oleg Nesterovfb6bab62014-06-27 19:01:46 +02001136 return 0;
1137
1138 err_buffer:
1139 uprobe_buffer_disable();
1140
1141 err_flags:
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001142 if (file)
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001143 trace_probe_remove_file(tp, file);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001144 else
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001145 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001146
Oleg Nesterov41618242013-01-27 18:36:24 +01001147 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301148}
1149
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001150static void probe_event_disable(struct trace_event_call *call,
1151 struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301152{
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001153 struct trace_probe *tp;
1154
1155 tp = trace_probe_primary_from_call(call);
1156 if (WARN_ON_ONCE(!tp))
1157 return;
1158
1159 if (!trace_probe_is_enabled(tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301160 return;
1161
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001162 if (file) {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001163 if (trace_probe_remove_file(tp, file) < 0)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001164 return;
1165
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001166 if (trace_probe_is_enabled(tp))
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001167 return;
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001168 } else
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001169 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001170
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001171 __probe_event_disable(tp);
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001172 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301173}
1174
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001175static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301176{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001177 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301178 struct uprobe_trace_entry_head field;
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001179 struct trace_uprobe *tu;
1180
1181 tu = trace_uprobe_primary_from_call(event_call);
1182 if (unlikely(!tu))
1183 return -ENODEV;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301184
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001185 if (is_ret_probe(tu)) {
1186 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1187 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1188 size = SIZEOF_TRACE_ENTRY(true);
1189 } else {
1190 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1191 size = SIZEOF_TRACE_ENTRY(false);
1192 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001193
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001194 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301195}
1196
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301197#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001198static bool
1199__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1200{
1201 struct perf_event *event;
1202
1203 if (filter->nr_systemwide)
1204 return true;
1205
1206 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001207 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001208 return true;
1209 }
1210
1211 return false;
1212}
1213
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001214static inline bool
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001215trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1216 struct perf_event *event)
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001217{
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001218 return __uprobe_perf_filter(filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001219}
1220
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001221static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1222 struct perf_event *event)
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001223{
1224 bool done;
1225
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001226 write_lock(&filter->rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001227 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001228 list_del(&event->hw.tp_list);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001229 done = filter->nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001230 (event->hw.target->flags & PF_EXITING) ||
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001231 trace_uprobe_filter_event(filter, event);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001232 } else {
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001233 filter->nr_systemwide--;
1234 done = filter->nr_systemwide;
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001235 }
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001236 write_unlock(&filter->rwlock);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001237
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001238 return done;
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001239}
1240
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001241/* This returns true if the filter always covers target mm */
1242static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1243 struct perf_event *event)
Oleg Nesterov736288b2013-02-03 20:58:35 +01001244{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001245 bool done;
1246
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001247 write_lock(&filter->rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001248 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001249 /*
1250 * event->parent != NULL means copy_process(), we can avoid
1251 * uprobe_apply(). current->mm must be probed and we can rely
1252 * on dup_mmap() which preserves the already installed bp's.
1253 *
1254 * attr.enable_on_exec means that exec/mmap will install the
1255 * breakpoints we need.
1256 */
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001257 done = filter->nr_systemwide ||
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001258 event->parent || event->attr.enable_on_exec ||
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001259 trace_uprobe_filter_event(filter, event);
1260 list_add(&event->hw.tp_list, &filter->perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001261 } else {
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001262 done = filter->nr_systemwide;
1263 filter->nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001264 }
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001265 write_unlock(&filter->rwlock);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001266
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001267 return done;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001268}
1269
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001270static int uprobe_perf_close(struct trace_event_call *call,
1271 struct perf_event *event)
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001272{
1273 struct trace_probe *pos, *tp;
1274 struct trace_uprobe *tu;
1275 int ret = 0;
1276
1277 tp = trace_probe_primary_from_call(call);
1278 if (WARN_ON_ONCE(!tp))
1279 return -ENODEV;
1280
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001281 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001282 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001283 return 0;
1284
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001285 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1286 tu = container_of(pos, struct trace_uprobe, tp);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001287 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001288 if (ret)
1289 break;
1290 }
1291
1292 return ret;
1293}
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001294
1295static int uprobe_perf_open(struct trace_event_call *call,
1296 struct perf_event *event)
1297{
1298 struct trace_probe *pos, *tp;
1299 struct trace_uprobe *tu;
1300 int err = 0;
1301
1302 tp = trace_probe_primary_from_call(call);
1303 if (WARN_ON_ONCE(!tp))
1304 return -ENODEV;
1305
1306 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001307 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001308 return 0;
1309
1310 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1311 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1312 if (err) {
1313 uprobe_perf_close(call, event);
1314 break;
1315 }
1316 }
1317
1318 return err;
1319}
1320
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001321static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1322 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1323{
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001324 struct trace_uprobe_filter *filter;
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001325 struct trace_uprobe *tu;
1326 int ret;
1327
1328 tu = container_of(uc, struct trace_uprobe, consumer);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001329 filter = tu->tp.event->filter;
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001330
1331 read_lock(&filter->rwlock);
1332 ret = __uprobe_perf_filter(filter, mm);
1333 read_unlock(&filter->rwlock);
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001334
1335 return ret;
1336}
1337
Namhyung Kima43b9702014-01-17 17:08:36 +09001338static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001339 unsigned long func, struct pt_regs *regs,
1340 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301341{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001342 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301343 struct uprobe_trace_entry_head *entry;
1344 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001345 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001346 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001347 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301348
Alexei Starovoitov70ed0702020-02-24 11:27:15 -08001349 if (bpf_prog_array_valid(call)) {
1350 u32 ret;
1351
1352 preempt_disable();
1353 ret = trace_call_bpf(call, regs);
1354 preempt_enable();
1355 if (!ret)
1356 return;
1357 }
Wang Nan04a22fa2015-07-01 02:13:50 +00001358
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001359 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1360
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001361 size = esize + tu->tp.size + dsize;
1362 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1363 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1364 return;
1365
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301366 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001367 head = this_cpu_ptr(call->perf_events);
1368 if (hlist_empty(head))
1369 goto out;
1370
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001371 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301372 if (!entry)
1373 goto out;
1374
Oleg Nesterov393a7362013-03-30 18:46:22 +01001375 if (is_ret_probe(tu)) {
1376 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001377 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001378 data = DATAOF_TRACE_ENTRY(entry, true);
1379 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001380 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001381 data = DATAOF_TRACE_ENTRY(entry, false);
1382 }
1383
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001384 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001385
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001386 if (size - esize > tu->tp.size + dsize) {
1387 int len = tu->tp.size + dsize;
1388
1389 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001390 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301391
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001392 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001393 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301394 out:
1395 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001396}
1397
1398/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001399static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1400 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001401{
1402 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1403 return UPROBE_HANDLER_REMOVE;
1404
Oleg Nesterov393a7362013-03-30 18:46:22 +01001405 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001406 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001407 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301408}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001409
1410static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001411 struct pt_regs *regs,
1412 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001413{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001414 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001415}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001416
1417int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1418 const char **filename, u64 *probe_offset,
1419 bool perf_type_tracepoint)
1420{
1421 const char *pevent = trace_event_name(event->tp_event);
1422 const char *group = event->tp_event->class->system;
1423 struct trace_uprobe *tu;
1424
1425 if (perf_type_tracepoint)
1426 tu = find_probe_event(pevent, group);
1427 else
Jean-Philippe Brucker22d5bd62020-06-08 14:45:32 +02001428 tu = trace_uprobe_primary_from_call(event->tp_event);
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001429 if (!tu)
1430 return -EINVAL;
1431
1432 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1433 : BPF_FD_TYPE_UPROBE;
1434 *filename = tu->filename;
1435 *probe_offset = tu->offset;
1436 return 0;
1437}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301438#endif /* CONFIG_PERF_EVENTS */
1439
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001440static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001441trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001442 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301443{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001444 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301445
1446 switch (type) {
1447 case TRACE_REG_REGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001448 return probe_event_enable(event, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301449
1450 case TRACE_REG_UNREGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001451 probe_event_disable(event, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301452 return 0;
1453
1454#ifdef CONFIG_PERF_EVENTS
1455 case TRACE_REG_PERF_REGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001456 return probe_event_enable(event, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301457
1458 case TRACE_REG_PERF_UNREGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001459 probe_event_disable(event, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301460 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001461
1462 case TRACE_REG_PERF_OPEN:
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001463 return uprobe_perf_open(event, data);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001464
1465 case TRACE_REG_PERF_CLOSE:
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001466 return uprobe_perf_close(event, data);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001467
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301468#endif
1469 default:
1470 return 0;
1471 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301472}
1473
1474static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1475{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301476 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001477 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001478 struct uprobe_cpu_buffer *ucb;
1479 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001480 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301481
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001482
Oleg Nesterova932b732013-01-31 19:47:23 +01001483 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001484 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301485
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001486 udd.tu = tu;
1487 udd.bp_addr = instruction_pointer(regs);
1488
1489 current->utask->vaddr = (unsigned long) &udd;
1490
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001491 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1492 return 0;
1493
1494 dsize = __get_data_size(&tu->tp, regs);
1495 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1496
1497 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001498 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001499
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001500 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001501 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301502
1503#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001504 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001505 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301506#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001507 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001508 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301509}
1510
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001511static int uretprobe_dispatcher(struct uprobe_consumer *con,
1512 unsigned long func, struct pt_regs *regs)
1513{
1514 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001515 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001516 struct uprobe_cpu_buffer *ucb;
1517 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001518
1519 tu = container_of(con, struct trace_uprobe, consumer);
1520
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001521 udd.tu = tu;
1522 udd.bp_addr = func;
1523
1524 current->utask->vaddr = (unsigned long) &udd;
1525
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001526 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1527 return 0;
1528
1529 dsize = __get_data_size(&tu->tp, regs);
1530 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1531
1532 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001533 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001534
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001535 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001536 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001537
1538#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001539 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001540 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001541#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001542 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001543 return 0;
1544}
1545
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301546static struct trace_event_functions uprobe_funcs = {
1547 .trace = print_uprobe_event
1548};
1549
Peter Zijlstra04ae87a2019-10-24 22:26:59 +02001550static struct trace_event_fields uprobe_fields_array[] = {
1551 { .type = TRACE_FUNCTION_TYPE,
1552 .define_fields = uprobe_event_define_fields },
1553 {}
1554};
1555
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001556static inline void init_trace_event_call(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301557{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001558 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301559 call->event.funcs = &uprobe_funcs;
Peter Zijlstra04ae87a2019-10-24 22:26:59 +02001560 call->class->fields_array = uprobe_fields_array;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301561
Song Liu9fd2e482019-05-07 09:15:45 -07001562 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
Song Liu33ea4b22017-12-06 14:45:16 -08001563 call->class->reg = trace_uprobe_register;
Song Liu33ea4b22017-12-06 14:45:16 -08001564}
1565
1566static int register_uprobe_event(struct trace_uprobe *tu)
1567{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001568 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001569
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001570 return trace_probe_register_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301571}
1572
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001573static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301574{
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001575 return trace_probe_unregister_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301576}
1577
Song Liu33ea4b22017-12-06 14:45:16 -08001578#ifdef CONFIG_PERF_EVENTS
1579struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001580create_local_trace_uprobe(char *name, unsigned long offs,
1581 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001582{
1583 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001584 struct path path;
1585 int ret;
1586
1587 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1588 if (ret)
1589 return ERR_PTR(ret);
1590
Song Liu0c92c7a2018-04-23 10:21:34 -07001591 if (!d_is_reg(path.dentry)) {
1592 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001593 return ERR_PTR(-EINVAL);
1594 }
1595
1596 /*
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001597 * local trace_kprobes are not added to dyn_event, so they are never
Song Liu33ea4b22017-12-06 14:45:16 -08001598 * searched in find_trace_kprobe(). Therefore, there is no concern of
1599 * duplicated name "DUMMY_EVENT" here.
1600 */
1601 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1602 is_return);
1603
1604 if (IS_ERR(tu)) {
1605 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1606 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001607 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001608 return ERR_CAST(tu);
1609 }
1610
1611 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001612 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001613 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001614 tu->filename = kstrdup(name, GFP_KERNEL);
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001615 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001616
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001617 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001618 ret = -ENOMEM;
1619 goto error;
1620 }
1621
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001622 return trace_probe_event_call(&tu->tp);
Song Liu33ea4b22017-12-06 14:45:16 -08001623error:
1624 free_trace_uprobe(tu);
1625 return ERR_PTR(ret);
1626}
1627
1628void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1629{
1630 struct trace_uprobe *tu;
1631
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001632 tu = trace_uprobe_primary_from_call(event_call);
Song Liu33ea4b22017-12-06 14:45:16 -08001633
Song Liu33ea4b22017-12-06 14:45:16 -08001634 free_trace_uprobe(tu);
1635}
1636#endif /* CONFIG_PERF_EVENTS */
1637
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301638/* Make a trace interface for controling probe points */
1639static __init int init_uprobe_trace(void)
1640{
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001641 int ret;
1642
1643 ret = dyn_event_register(&trace_uprobe_ops);
1644 if (ret)
1645 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301646
Wei Yang22c36b12020-07-12 09:10:36 +08001647 ret = tracing_init_dentry();
1648 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301649 return 0;
1650
Wei Yang22c36b12020-07-12 09:10:36 +08001651 trace_create_file("uprobe_events", 0644, NULL,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301652 NULL, &uprobe_events_ops);
1653 /* Profile interface */
Wei Yang22c36b12020-07-12 09:10:36 +08001654 trace_create_file("uprobe_profile", 0444, NULL,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301655 NULL, &uprobe_profile_ops);
1656 return 0;
1657}
1658
1659fs_initcall(init_uprobe_trace);