blob: 65a4157af851a46dd40793e545ad5bca0259c3b3 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04002/*
Masami Hiramatsu77b44d12009-11-03 19:12:47 -05003 * Kprobes-based tracing events
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04004 *
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 *
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04007 */
Masami Hiramatsu72576342017-02-07 20:21:28 +09008#define pr_fmt(fmt) "trace_kprobe: " fmt
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04009
10#include <linux/module.h>
11#include <linux/uaccess.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010012#include <linux/rculist.h>
Masami Hiramatsu540adea2018-01-13 02:55:03 +090013#include <linux/error-injection.h>
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040014
Francis Deslauriersd8999262018-07-30 19:20:42 +090015#include "trace_kprobe_selftest.h"
Srikar Dronamraju8ab83f52012-04-09 14:41:44 +053016#include "trace_probe.h"
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040017
Masami Hiramatsuf52487e2009-09-10 19:53:53 -040018#define KPROBE_EVENT_SYSTEM "kprobes"
Alban Crequy696ced42017-04-03 12:36:22 +020019#define KRETPROBE_MAXACTIVE_MAX 4096
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040020
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040021/**
Masami Hiramatsu77b44d12009-11-03 19:12:47 -050022 * Kprobe event core functions
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040023 */
Namhyung Kimc31ffb32013-07-03 13:50:51 +090024struct trace_kprobe {
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040025 struct list_head list;
Masami Hiramatsu4a846b42009-09-11 05:31:21 +020026 struct kretprobe rp; /* Use rp.kp for kprobe use */
Martin KaFai Laua7636d92016-02-03 12:28:28 -080027 unsigned long __percpu *nhit;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040028 const char *symbol; /* symbol name */
Namhyung Kimc31ffb32013-07-03 13:50:51 +090029 struct trace_probe tp;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040030};
31
Namhyung Kimc31ffb32013-07-03 13:50:51 +090032#define SIZEOF_TRACE_KPROBE(n) \
33 (offsetof(struct trace_kprobe, tp.args) + \
Masami Hiramatsueca0d912009-09-10 19:53:38 -040034 (sizeof(struct probe_arg) * (n)))
Masami Hiramatsua82378d2009-08-13 16:35:18 -040035
Masami Hiramatsu3da0f182014-04-17 17:18:28 +090036static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040037{
Namhyung Kimc31ffb32013-07-03 13:50:51 +090038 return tk->rp.handler != NULL;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040039}
40
Masami Hiramatsu3da0f182014-04-17 17:18:28 +090041static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040042{
Namhyung Kimc31ffb32013-07-03 13:50:51 +090043 return tk->symbol ? tk->symbol : "unknown";
Masami Hiramatsu413d37d2009-08-13 16:35:11 -040044}
45
Masami Hiramatsu3da0f182014-04-17 17:18:28 +090046static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
Masami Hiramatsu61424312011-06-27 16:26:56 +090047{
Namhyung Kimc31ffb32013-07-03 13:50:51 +090048 return tk->rp.kp.offset;
Masami Hiramatsu61424312011-06-27 16:26:56 +090049}
50
Masami Hiramatsu3da0f182014-04-17 17:18:28 +090051static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
Masami Hiramatsu61424312011-06-27 16:26:56 +090052{
Namhyung Kimc31ffb32013-07-03 13:50:51 +090053 return !!(kprobe_gone(&tk->rp.kp));
Masami Hiramatsu61424312011-06-27 16:26:56 +090054}
55
Masami Hiramatsu3da0f182014-04-17 17:18:28 +090056static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
Namhyung Kimc31ffb32013-07-03 13:50:51 +090057 struct module *mod)
Masami Hiramatsu61424312011-06-27 16:26:56 +090058{
59 int len = strlen(mod->name);
Namhyung Kimc31ffb32013-07-03 13:50:51 +090060 const char *name = trace_kprobe_symbol(tk);
Masami Hiramatsu61424312011-06-27 16:26:56 +090061 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
62}
63
Masami Hiramatsu3da0f182014-04-17 17:18:28 +090064static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
Masami Hiramatsu61424312011-06-27 16:26:56 +090065{
Namhyung Kimc31ffb32013-07-03 13:50:51 +090066 return !!strchr(trace_kprobe_symbol(tk), ':');
Masami Hiramatsu61424312011-06-27 16:26:56 +090067}
68
Marcin Nowakowskif18f97a2016-12-09 15:19:37 +010069static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
70{
71 unsigned long nhit = 0;
72 int cpu;
73
74 for_each_possible_cpu(cpu)
75 nhit += *per_cpu_ptr(tk->nhit, cpu);
76
77 return nhit;
78}
79
Masami Hiramatsu6bc6c772018-08-02 16:50:48 +090080/* Return 0 if it fails to find the symbol address */
Masami Hiramatsu45408c42018-07-30 19:20:14 +090081static nokprobe_inline
82unsigned long trace_kprobe_address(struct trace_kprobe *tk)
83{
84 unsigned long addr;
85
86 if (tk->symbol) {
87 addr = (unsigned long)
88 kallsyms_lookup_name(trace_kprobe_symbol(tk));
Masami Hiramatsu6bc6c772018-08-02 16:50:48 +090089 if (addr)
90 addr += tk->rp.kp.offset;
Masami Hiramatsu45408c42018-07-30 19:20:14 +090091 } else {
92 addr = (unsigned long)tk->rp.kp.addr;
93 }
94 return addr;
95}
96
Masami Hiramatsub4da3342018-01-13 02:54:04 +090097bool trace_kprobe_on_func_entry(struct trace_event_call *call)
Josef Bacik9802d862017-12-11 11:36:48 -050098{
99 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
Masami Hiramatsub4da3342018-01-13 02:54:04 +0900100
101 return kprobe_on_func_entry(tk->rp.kp.addr,
102 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
103 tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
Josef Bacik9802d862017-12-11 11:36:48 -0500104}
105
Masami Hiramatsub4da3342018-01-13 02:54:04 +0900106bool trace_kprobe_error_injectable(struct trace_event_call *call)
Josef Bacik9802d862017-12-11 11:36:48 -0500107{
108 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
Josef Bacik9802d862017-12-11 11:36:48 -0500109
Masami Hiramatsu45408c42018-07-30 19:20:14 +0900110 return within_error_injection_list(trace_kprobe_address(tk));
Josef Bacik9802d862017-12-11 11:36:48 -0500111}
112
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900113static int register_kprobe_event(struct trace_kprobe *tk);
114static int unregister_kprobe_event(struct trace_kprobe *tk);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400115
116static DEFINE_MUTEX(probe_lock);
117static LIST_HEAD(probe_list);
118
Masami Hiramatsu50d78052009-09-14 16:49:20 -0400119static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
120static int kretprobe_dispatcher(struct kretprobe_instance *ri,
121 struct pt_regs *regs);
122
Namhyung Kim1301a442013-11-26 15:21:04 +0900123/* Memory fetching by symbol */
124struct symbol_cache {
125 char *symbol;
126 long offset;
127 unsigned long addr;
128};
129
130unsigned long update_symbol_cache(struct symbol_cache *sc)
131{
132 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
133
134 if (sc->addr)
135 sc->addr += sc->offset;
136
137 return sc->addr;
138}
139
140void free_symbol_cache(struct symbol_cache *sc)
141{
142 kfree(sc->symbol);
143 kfree(sc);
144}
145
146struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
147{
148 struct symbol_cache *sc;
149
150 if (!sym || strlen(sym) == 0)
151 return NULL;
152
153 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
154 if (!sc)
155 return NULL;
156
157 sc->symbol = kstrdup(sym, GFP_KERNEL);
158 if (!sc->symbol) {
159 kfree(sc);
160 return NULL;
161 }
162 sc->offset = offset;
163 update_symbol_cache(sc);
164
165 return sc;
166}
167
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900168/*
169 * Kprobes-specific fetch functions
170 */
171#define DEFINE_FETCH_stack(type) \
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900172static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900173 void *offset, void *dest) \
174{ \
175 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
176 (unsigned int)((unsigned long)offset)); \
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900177} \
178NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
179
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900180DEFINE_BASIC_FETCH_FUNCS(stack)
181/* No string on the stack entry */
182#define fetch_stack_string NULL
183#define fetch_stack_string_size NULL
184
Namhyung Kim5baaa592013-11-26 15:21:04 +0900185#define DEFINE_FETCH_memory(type) \
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900186static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
Namhyung Kim5baaa592013-11-26 15:21:04 +0900187 void *addr, void *dest) \
188{ \
189 type retval; \
190 if (probe_kernel_address(addr, retval)) \
191 *(type *)dest = 0; \
192 else \
193 *(type *)dest = retval; \
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900194} \
195NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
196
Namhyung Kim5baaa592013-11-26 15:21:04 +0900197DEFINE_BASIC_FETCH_FUNCS(memory)
198/*
199 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
200 * length and relative data location.
201 */
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900202static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
203 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900204{
Namhyung Kim5baaa592013-11-26 15:21:04 +0900205 int maxlen = get_rloc_len(*(u32 *)dest);
206 u8 *dst = get_rloc_data(dest);
Alexei Starovoitov1a6877b2015-08-28 15:56:22 -0700207 long ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900208
209 if (!maxlen)
210 return;
211
212 /*
213 * Try to get string again, since the string can be changed while
214 * probing.
215 */
Alexei Starovoitov1a6877b2015-08-28 15:56:22 -0700216 ret = strncpy_from_unsafe(dst, addr, maxlen);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900217
218 if (ret < 0) { /* Failed to fetch string */
Alexei Starovoitov1a6877b2015-08-28 15:56:22 -0700219 dst[0] = '\0';
Namhyung Kim5baaa592013-11-26 15:21:04 +0900220 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
221 } else {
Alexei Starovoitov1a6877b2015-08-28 15:56:22 -0700222 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
Namhyung Kim5baaa592013-11-26 15:21:04 +0900223 }
224}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900225NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
Namhyung Kim5baaa592013-11-26 15:21:04 +0900226
227/* Return the length of string -- including null terminal byte */
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900228static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
229 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900230{
231 mm_segment_t old_fs;
232 int ret, len = 0;
233 u8 c;
234
235 old_fs = get_fs();
236 set_fs(KERNEL_DS);
237 pagefault_disable();
238
239 do {
240 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
241 len++;
242 } while (c && ret == 0 && len < MAX_STRING_SIZE);
243
244 pagefault_enable();
245 set_fs(old_fs);
246
247 if (ret < 0) /* Failed to check the length */
248 *(u32 *)dest = 0;
249 else
250 *(u32 *)dest = len;
251}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900252NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
Namhyung Kim5baaa592013-11-26 15:21:04 +0900253
Namhyung Kim1301a442013-11-26 15:21:04 +0900254#define DEFINE_FETCH_symbol(type) \
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900255void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
Namhyung Kim1301a442013-11-26 15:21:04 +0900256{ \
257 struct symbol_cache *sc = data; \
258 if (sc->addr) \
259 fetch_memory_##type(regs, (void *)sc->addr, dest); \
260 else \
261 *(type *)dest = 0; \
Masami Hiramatsu3da0f182014-04-17 17:18:28 +0900262} \
263NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
264
Namhyung Kim1301a442013-11-26 15:21:04 +0900265DEFINE_BASIC_FETCH_FUNCS(symbol)
266DEFINE_FETCH_symbol(string)
267DEFINE_FETCH_symbol(string_size)
268
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900269/* kprobes don't support file_offset fetch methods */
270#define fetch_file_offset_u8 NULL
271#define fetch_file_offset_u16 NULL
272#define fetch_file_offset_u32 NULL
273#define fetch_file_offset_u64 NULL
274#define fetch_file_offset_string NULL
275#define fetch_file_offset_string_size NULL
276
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900277/* Fetch type information table */
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100278static const struct fetch_type kprobes_fetch_type_table[] = {
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900279 /* Special types */
280 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
281 sizeof(u32), 1, "__data_loc char[]"),
282 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
283 string_size, sizeof(u32), 0, "u32"),
284 /* Basic types */
285 ASSIGN_FETCH_TYPE(u8, u8, 0),
286 ASSIGN_FETCH_TYPE(u16, u16, 0),
287 ASSIGN_FETCH_TYPE(u32, u32, 0),
288 ASSIGN_FETCH_TYPE(u64, u64, 0),
289 ASSIGN_FETCH_TYPE(s8, u8, 1),
290 ASSIGN_FETCH_TYPE(s16, u16, 1),
291 ASSIGN_FETCH_TYPE(s32, u32, 1),
292 ASSIGN_FETCH_TYPE(s64, u64, 1),
Masami Hiramatsu17ce3dc2016-08-18 17:57:50 +0900293 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
294 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
295 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
296 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900297
298 ASSIGN_FETCH_TYPE_END
299};
300
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200301/*
302 * Allocate new trace_probe and initialize it (including kprobes).
303 */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900304static struct trace_kprobe *alloc_trace_kprobe(const char *group,
Masami Hiramatsuf52487e2009-09-10 19:53:53 -0400305 const char *event,
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200306 void *addr,
307 const char *symbol,
308 unsigned long offs,
Alban Crequy696ced42017-04-03 12:36:22 +0200309 int maxactive,
Srikar Dronamraju3a6b7662012-04-09 14:41:33 +0530310 int nargs, bool is_return)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400311{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900312 struct trace_kprobe *tk;
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500313 int ret = -ENOMEM;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400314
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900315 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
316 if (!tk)
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500317 return ERR_PTR(ret);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400318
Martin KaFai Laua7636d92016-02-03 12:28:28 -0800319 tk->nhit = alloc_percpu(unsigned long);
320 if (!tk->nhit)
321 goto error;
322
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400323 if (symbol) {
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900324 tk->symbol = kstrdup(symbol, GFP_KERNEL);
325 if (!tk->symbol)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400326 goto error;
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900327 tk->rp.kp.symbol_name = tk->symbol;
328 tk->rp.kp.offset = offs;
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200329 } else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900330 tk->rp.kp.addr = addr;
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200331
332 if (is_return)
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900333 tk->rp.handler = kretprobe_dispatcher;
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200334 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900335 tk->rp.kp.pre_handler = kprobe_dispatcher;
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200336
Alban Crequy696ced42017-04-03 12:36:22 +0200337 tk->rp.maxactive = maxactive;
338
Masami Hiramatsuda346342010-08-27 20:39:12 +0900339 if (!event || !is_good_name(event)) {
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500340 ret = -EINVAL;
Masami Hiramatsu42635652009-08-13 16:35:26 -0400341 goto error;
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500342 }
343
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900344 tk->tp.call.class = &tk->tp.class;
345 tk->tp.call.name = kstrdup(event, GFP_KERNEL);
346 if (!tk->tp.call.name)
Masami Hiramatsu42635652009-08-13 16:35:26 -0400347 goto error;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400348
Masami Hiramatsuda346342010-08-27 20:39:12 +0900349 if (!group || !is_good_name(group)) {
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500350 ret = -EINVAL;
Masami Hiramatsuf52487e2009-09-10 19:53:53 -0400351 goto error;
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500352 }
353
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900354 tk->tp.class.system = kstrdup(group, GFP_KERNEL);
355 if (!tk->tp.class.system)
Masami Hiramatsuf52487e2009-09-10 19:53:53 -0400356 goto error;
357
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900358 INIT_LIST_HEAD(&tk->list);
359 INIT_LIST_HEAD(&tk->tp.files);
360 return tk;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400361error:
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900362 kfree(tk->tp.call.name);
363 kfree(tk->symbol);
Martin KaFai Laua7636d92016-02-03 12:28:28 -0800364 free_percpu(tk->nhit);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900365 kfree(tk);
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500366 return ERR_PTR(ret);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400367}
368
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900369static void free_trace_kprobe(struct trace_kprobe *tk)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400370{
371 int i;
372
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900373 for (i = 0; i < tk->tp.nr_args; i++)
374 traceprobe_free_probe_arg(&tk->tp.args[i]);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400375
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900376 kfree(tk->tp.call.class->system);
377 kfree(tk->tp.call.name);
378 kfree(tk->symbol);
Martin KaFai Laua7636d92016-02-03 12:28:28 -0800379 free_percpu(tk->nhit);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900380 kfree(tk);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400381}
382
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900383static struct trace_kprobe *find_trace_kprobe(const char *event,
384 const char *group)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400385{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900386 struct trace_kprobe *tk;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400387
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900388 list_for_each_entry(tk, &probe_list, list)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400389 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900390 strcmp(tk->tp.call.class->system, group) == 0)
391 return tk;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400392 return NULL;
393}
394
Steven Rostedt (VMware)87107a22018-07-26 12:07:32 -0400395static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
396{
397 int ret = 0;
398
399 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
400 if (trace_kprobe_is_return(tk))
401 ret = enable_kretprobe(&tk->rp);
402 else
403 ret = enable_kprobe(&tk->rp.kp);
404 }
405
406 return ret;
407}
408
Oleg Nesterov3fe3d612013-06-20 19:38:09 +0200409/*
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900410 * Enable trace_probe
411 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
412 */
413static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400414enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
Masami Hiramatsu1538f882011-06-27 16:26:44 +0900415{
Steven Rostedt (VMware)87107a22018-07-26 12:07:32 -0400416 struct event_file_link *link;
Masami Hiramatsu1538f882011-06-27 16:26:44 +0900417 int ret = 0;
418
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900419 if (file) {
Oleg Nesterovb04d52e2013-06-20 19:38:14 +0200420 link = kmalloc(sizeof(*link), GFP_KERNEL);
421 if (!link) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900422 ret = -ENOMEM;
Oleg Nesterov3fe3d612013-06-20 19:38:09 +0200423 goto out;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900424 }
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900425
Oleg Nesterovb04d52e2013-06-20 19:38:14 +0200426 link->file = file;
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900427 list_add_tail_rcu(&link->list, &tk->tp.files);
Oleg Nesterovb04d52e2013-06-20 19:38:14 +0200428
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900429 tk->tp.flags |= TP_FLAG_TRACE;
Steven Rostedt (VMware)87107a22018-07-26 12:07:32 -0400430 ret = __enable_trace_kprobe(tk);
431 if (ret) {
432 list_del_rcu(&link->list);
Artem Savkov57ea2a32018-07-25 16:20:38 +0200433 kfree(link);
434 tk->tp.flags &= ~TP_FLAG_TRACE;
Artem Savkov57ea2a32018-07-25 16:20:38 +0200435 }
Steven Rostedt (VMware)87107a22018-07-26 12:07:32 -0400436
437 } else {
438 tk->tp.flags |= TP_FLAG_PROFILE;
439 ret = __enable_trace_kprobe(tk);
440 if (ret)
441 tk->tp.flags &= ~TP_FLAG_PROFILE;
Artem Savkov57ea2a32018-07-25 16:20:38 +0200442 }
Oleg Nesterov3fe3d612013-06-20 19:38:09 +0200443 out:
Masami Hiramatsu1538f882011-06-27 16:26:44 +0900444 return ret;
445}
446
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900447/*
448 * Disable trace_probe
449 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
450 */
451static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400452disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900453{
Masami Hiramatsua232e272013-07-09 18:35:26 +0900454 struct event_file_link *link = NULL;
455 int wait = 0;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900456 int ret = 0;
457
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900458 if (file) {
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900459 link = find_event_file_link(&tk->tp, file);
Oleg Nesterovb04d52e2013-06-20 19:38:14 +0200460 if (!link) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900461 ret = -EINVAL;
Oleg Nesterov3fe3d612013-06-20 19:38:09 +0200462 goto out;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900463 }
464
Oleg Nesterovb04d52e2013-06-20 19:38:14 +0200465 list_del_rcu(&link->list);
Masami Hiramatsua232e272013-07-09 18:35:26 +0900466 wait = 1;
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900467 if (!list_empty(&tk->tp.files))
Oleg Nesterovb04d52e2013-06-20 19:38:14 +0200468 goto out;
469
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900470 tk->tp.flags &= ~TP_FLAG_TRACE;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900471 } else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900472 tk->tp.flags &= ~TP_FLAG_PROFILE;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900473
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900474 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
475 if (trace_kprobe_is_return(tk))
476 disable_kretprobe(&tk->rp);
Masami Hiramatsu1538f882011-06-27 16:26:44 +0900477 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900478 disable_kprobe(&tk->rp.kp);
Masami Hiramatsua232e272013-07-09 18:35:26 +0900479 wait = 1;
Masami Hiramatsu1538f882011-06-27 16:26:44 +0900480 }
Song Liue12f03d2017-12-06 14:45:15 -0800481
482 /*
483 * if tk is not added to any list, it must be a local trace_kprobe
484 * created with perf_event_open. We don't need to wait for these
485 * trace_kprobes
486 */
487 if (list_empty(&tk->list))
488 wait = 0;
Oleg Nesterov3fe3d612013-06-20 19:38:09 +0200489 out:
Masami Hiramatsua232e272013-07-09 18:35:26 +0900490 if (wait) {
491 /*
492 * Synchronize with kprobe_trace_func/kretprobe_trace_func
493 * to ensure disabled (all running handlers are finished).
494 * This is not only for kfree(), but also the caller,
495 * trace_remove_event_call() supposes it for releasing
496 * event_call related objects, which will be accessed in
497 * the kprobe_trace_func/kretprobe_trace_func.
498 */
499 synchronize_sched();
500 kfree(link); /* Ignored if link == NULL */
501 }
502
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +0900503 return ret;
Masami Hiramatsu1538f882011-06-27 16:26:44 +0900504}
505
Masami Hiramatsu45408c42018-07-30 19:20:14 +0900506#if defined(CONFIG_KPROBES_ON_FTRACE) && \
507 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
508static bool within_notrace_func(struct trace_kprobe *tk)
509{
510 unsigned long offset, size, addr;
511
512 addr = trace_kprobe_address(tk);
Masami Hiramatsu6bc6c772018-08-02 16:50:48 +0900513 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
514 return false;
Masami Hiramatsu45408c42018-07-30 19:20:14 +0900515
516 return !ftrace_location_range(addr - offset, addr - offset + size);
517}
518#else
519#define within_notrace_func(tk) (false)
520#endif
521
Masami Hiramatsu61424312011-06-27 16:26:56 +0900522/* Internal register function - just handle k*probes and flags */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900523static int __register_trace_kprobe(struct trace_kprobe *tk)
Masami Hiramatsu61424312011-06-27 16:26:56 +0900524{
Masami Hiramatsu7f6878a2011-06-27 16:27:03 +0900525 int i, ret;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900526
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900527 if (trace_probe_is_registered(&tk->tp))
Masami Hiramatsu61424312011-06-27 16:26:56 +0900528 return -EINVAL;
529
Masami Hiramatsu45408c42018-07-30 19:20:14 +0900530 if (within_notrace_func(tk)) {
531 pr_warn("Could not probe notrace function %s\n",
532 trace_kprobe_symbol(tk));
533 return -EINVAL;
534 }
535
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900536 for (i = 0; i < tk->tp.nr_args; i++)
537 traceprobe_update_arg(&tk->tp.args[i]);
Masami Hiramatsu7f6878a2011-06-27 16:27:03 +0900538
Masami Hiramatsu61424312011-06-27 16:26:56 +0900539 /* Set/clear disabled flag according to tp->flag */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900540 if (trace_probe_is_enabled(&tk->tp))
541 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900542 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900543 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900544
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900545 if (trace_kprobe_is_return(tk))
546 ret = register_kretprobe(&tk->rp);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900547 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900548 ret = register_kprobe(&tk->rp.kp);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900549
550 if (ret == 0)
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900551 tk->tp.flags |= TP_FLAG_REGISTERED;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900552 else {
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900553 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700554 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
Masami Hiramatsu61424312011-06-27 16:26:56 +0900555 ret = 0;
556 } else if (ret == -EILSEQ) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700557 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
558 tk->rp.kp.addr);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900559 ret = -EINVAL;
560 }
561 }
562
563 return ret;
564}
565
566/* Internal unregister function - just handle k*probes and flags */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900567static void __unregister_trace_kprobe(struct trace_kprobe *tk)
Masami Hiramatsu61424312011-06-27 16:26:56 +0900568{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900569 if (trace_probe_is_registered(&tk->tp)) {
570 if (trace_kprobe_is_return(tk))
571 unregister_kretprobe(&tk->rp);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900572 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900573 unregister_kprobe(&tk->rp.kp);
574 tk->tp.flags &= ~TP_FLAG_REGISTERED;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900575 /* Cleanup kprobe for reuse */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900576 if (tk->rp.kp.symbol_name)
577 tk->rp.kp.addr = NULL;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900578 }
579}
580
Masami Hiramatsu2d5e0672009-09-14 16:48:56 -0400581/* Unregister a trace_probe and probe_event: call with locking probe_lock */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900582static int unregister_trace_kprobe(struct trace_kprobe *tk)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400583{
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900584 /* Enabled event can not be unregistered */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900585 if (trace_probe_is_enabled(&tk->tp))
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900586 return -EBUSY;
587
Steven Rostedt (Red Hat)40c32592013-07-03 23:33:50 -0400588 /* Will fail if probe is being used by ftrace or perf */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900589 if (unregister_kprobe_event(tk))
Steven Rostedt (Red Hat)40c32592013-07-03 23:33:50 -0400590 return -EBUSY;
591
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900592 __unregister_trace_kprobe(tk);
593 list_del(&tk->list);
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900594
595 return 0;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400596}
597
598/* Register a trace_probe and probe_event */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900599static int register_trace_kprobe(struct trace_kprobe *tk)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400600{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900601 struct trace_kprobe *old_tk;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400602 int ret;
603
604 mutex_lock(&probe_lock);
605
Masami Hiramatsu61424312011-06-27 16:26:56 +0900606 /* Delete old (same name) event if exist */
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400607 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400608 tk->tp.call.class->system);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900609 if (old_tk) {
610 ret = unregister_trace_kprobe(old_tk);
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900611 if (ret < 0)
612 goto end;
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900613 free_trace_kprobe(old_tk);
Masami Hiramatsu2d5e0672009-09-14 16:48:56 -0400614 }
Masami Hiramatsu61424312011-06-27 16:26:56 +0900615
616 /* Register new event */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900617 ret = register_kprobe_event(tk);
Masami Hiramatsu2d5e0672009-09-14 16:48:56 -0400618 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700619 pr_warn("Failed to register probe event(%d)\n", ret);
Masami Hiramatsu2d5e0672009-09-14 16:48:56 -0400620 goto end;
621 }
622
Masami Hiramatsu61424312011-06-27 16:26:56 +0900623 /* Register k*probe */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900624 ret = __register_trace_kprobe(tk);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900625 if (ret < 0)
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900626 unregister_kprobe_event(tk);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900627 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900628 list_add_tail(&tk->list, &probe_list);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900629
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400630end:
631 mutex_unlock(&probe_lock);
632 return ret;
633}
634
Masami Hiramatsu61424312011-06-27 16:26:56 +0900635/* Module notifier call back, checking event on the module */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900636static int trace_kprobe_module_callback(struct notifier_block *nb,
Masami Hiramatsu61424312011-06-27 16:26:56 +0900637 unsigned long val, void *data)
638{
639 struct module *mod = data;
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900640 struct trace_kprobe *tk;
Masami Hiramatsu61424312011-06-27 16:26:56 +0900641 int ret;
642
643 if (val != MODULE_STATE_COMING)
644 return NOTIFY_DONE;
645
646 /* Update probes on coming module */
647 mutex_lock(&probe_lock);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900648 list_for_each_entry(tk, &probe_list, list) {
649 if (trace_kprobe_within_module(tk, mod)) {
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900650 /* Don't need to check busy - this should have gone. */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900651 __unregister_trace_kprobe(tk);
652 ret = __register_trace_kprobe(tk);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900653 if (ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -0700654 pr_warn("Failed to re-register probe %s on %s: %d\n",
655 trace_event_name(&tk->tp.call),
656 mod->name, ret);
Masami Hiramatsu61424312011-06-27 16:26:56 +0900657 }
658 }
659 mutex_unlock(&probe_lock);
660
661 return NOTIFY_DONE;
662}
663
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900664static struct notifier_block trace_kprobe_module_nb = {
665 .notifier_call = trace_kprobe_module_callback,
Masami Hiramatsu61424312011-06-27 16:26:56 +0900666 .priority = 1 /* Invoked after kprobe module callback */
667};
668
Naveen N. Raofca18a42017-07-08 00:27:30 +0530669/* Convert certain expected symbols into '_' when generating event names */
670static inline void sanitize_event_name(char *name)
671{
672 while (*name++ != '\0')
673 if (*name == ':' || *name == '.')
674 *name = '_';
675}
676
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900677static int create_trace_kprobe(int argc, char **argv)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400678{
679 /*
680 * Argument syntax:
Alban Crequy696ced42017-04-03 12:36:22 +0200681 * - Add kprobe:
682 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
683 * - Add kretprobe:
684 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400685 * Fetch args:
Masami Hiramatsu2e06ff62009-10-07 18:27:59 -0400686 * $retval : fetch return value
687 * $stack : fetch stack address
688 * $stackN : fetch Nth of stack (N:0-)
Omar Sandoval35abb672016-06-08 18:38:02 -0700689 * $comm : fetch current task comm
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400690 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
691 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
692 * %REG : fetch register REG
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400693 * Dereferencing memory fetch:
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400694 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
Masami Hiramatsueca0d912009-09-10 19:53:38 -0400695 * Alias name of args:
696 * NAME=FETCHARG : set NAME as alias of FETCHARG.
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400697 * Type of args:
698 * FETCHARG:TYPE : use TYPE instead of unsigned long.
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400699 */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900700 struct trace_kprobe *tk;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400701 int i, ret = 0;
Srikar Dronamraju3a6b7662012-04-09 14:41:33 +0530702 bool is_return = false, is_delete = false;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400703 char *symbol = NULL, *event = NULL, *group = NULL;
Alban Crequy696ced42017-04-03 12:36:22 +0200704 int maxactive = 0;
Masami Hiramatsuda346342010-08-27 20:39:12 +0900705 char *arg;
Masami Hiramatsuc5d343b2018-03-17 21:38:10 +0900706 long offset = 0;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400707 void *addr = NULL;
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200708 char buf[MAX_EVENT_NAME_LEN];
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400709
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500710 /* argc must be >= 1 */
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400711 if (argv[0][0] == 'p')
Srikar Dronamraju3a6b7662012-04-09 14:41:33 +0530712 is_return = false;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400713 else if (argv[0][0] == 'r')
Srikar Dronamraju3a6b7662012-04-09 14:41:33 +0530714 is_return = true;
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500715 else if (argv[0][0] == '-')
Srikar Dronamraju3a6b7662012-04-09 14:41:33 +0530716 is_delete = true;
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400717 else {
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500718 pr_info("Probe definition must be started with 'p', 'r' or"
719 " '-'.\n");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400720 return -EINVAL;
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400721 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400722
Alban Crequy696ced42017-04-03 12:36:22 +0200723 event = strchr(&argv[0][1], ':');
724 if (event) {
725 event[0] = '\0';
726 event++;
727 }
728 if (is_return && isdigit(argv[0][1])) {
729 ret = kstrtouint(&argv[0][1], 0, &maxactive);
730 if (ret) {
731 pr_info("Failed to parse maxactive.\n");
732 return ret;
733 }
734 /* kretprobes instances are iterated over via a list. The
735 * maximum should stay reasonable.
736 */
737 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
738 pr_info("Maxactive is too big (%d > %d).\n",
739 maxactive, KRETPROBE_MAXACTIVE_MAX);
740 return -E2BIG;
741 }
742 }
743
744 if (event) {
Masami Hiramatsuf52487e2009-09-10 19:53:53 -0400745 if (strchr(event, '/')) {
746 group = event;
747 event = strchr(group, '/') + 1;
748 event[-1] = '\0';
749 if (strlen(group) == 0) {
Wenji Huanga5efd922010-02-24 15:40:23 +0800750 pr_info("Group name is not specified\n");
Masami Hiramatsuf52487e2009-09-10 19:53:53 -0400751 return -EINVAL;
752 }
753 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400754 if (strlen(event) == 0) {
Wenji Huanga5efd922010-02-24 15:40:23 +0800755 pr_info("Event name is not specified\n");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400756 return -EINVAL;
757 }
758 }
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500759 if (!group)
760 group = KPROBE_EVENT_SYSTEM;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400761
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500762 if (is_delete) {
763 if (!event) {
764 pr_info("Delete command needs an event name.\n");
765 return -EINVAL;
766 }
Srikar Dronamraju9da79ab2010-06-30 14:15:48 +0530767 mutex_lock(&probe_lock);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900768 tk = find_trace_kprobe(event, group);
769 if (!tk) {
Srikar Dronamraju9da79ab2010-06-30 14:15:48 +0530770 mutex_unlock(&probe_lock);
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500771 pr_info("Event %s/%s doesn't exist.\n", group, event);
772 return -ENOENT;
773 }
774 /* delete an event */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900775 ret = unregister_trace_kprobe(tk);
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900776 if (ret == 0)
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900777 free_trace_kprobe(tk);
Srikar Dronamraju9da79ab2010-06-30 14:15:48 +0530778 mutex_unlock(&probe_lock);
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900779 return ret;
Masami Hiramatsua7c312b2009-12-08 17:03:16 -0500780 }
781
782 if (argc < 2) {
783 pr_info("Probe point is not specified.\n");
784 return -EINVAL;
785 }
Sabrina Dubroca9e52b322017-06-22 11:24:42 +0200786
787 /* try to parse an address. if that fails, try to read the
788 * input as a symbol. */
789 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400790 /* a symbol specified */
791 symbol = argv[1];
792 /* TODO: support .init module functions */
Srikar Dronamraju8ab83f52012-04-09 14:41:44 +0530793 ret = traceprobe_split_symbol_offset(symbol, &offset);
Masami Hiramatsuc5d343b2018-03-17 21:38:10 +0900794 if (ret || offset < 0 || offset > UINT_MAX) {
Sabrina Dubroca9e52b322017-06-22 11:24:42 +0200795 pr_info("Failed to parse either an address or a symbol.\n");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400796 return ret;
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400797 }
Steven Rostedt (VMware)d0e02572017-02-27 11:52:04 -0500798 if (offset && is_return &&
Naveen N. Rao659b9572017-07-07 22:37:24 +0530799 !kprobe_on_func_entry(NULL, symbol, offset)) {
Steven Rostedt (VMware)d0e02572017-02-27 11:52:04 -0500800 pr_info("Given offset is not valid for return probe.\n");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400801 return -EINVAL;
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400802 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400803 }
Masami Hiramatsua82378d2009-08-13 16:35:18 -0400804 argc -= 2; argv += 2;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400805
806 /* setup a probe */
Masami Hiramatsu42635652009-08-13 16:35:26 -0400807 if (!event) {
808 /* Make a new event name */
Masami Hiramatsu42635652009-08-13 16:35:26 -0400809 if (symbol)
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500810 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
Masami Hiramatsu42635652009-08-13 16:35:26 -0400811 is_return ? 'r' : 'p', symbol, offset);
812 else
Masami Hiramatsu6f3cf442009-12-16 17:24:08 -0500813 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
Masami Hiramatsu42635652009-08-13 16:35:26 -0400814 is_return ? 'r' : 'p', addr);
Naveen N. Raofca18a42017-07-08 00:27:30 +0530815 sanitize_event_name(buf);
Masami Hiramatsu4a846b42009-09-11 05:31:21 +0200816 event = buf;
817 }
Alban Crequy696ced42017-04-03 12:36:22 +0200818 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
819 argc, is_return);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900820 if (IS_ERR(tk)) {
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400821 pr_info("Failed to allocate trace_probe.(%d)\n",
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900822 (int)PTR_ERR(tk));
823 return PTR_ERR(tk);
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400824 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400825
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400826 /* parse arguments */
Masami Hiramatsua82378d2009-08-13 16:35:18 -0400827 ret = 0;
828 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900829 struct probe_arg *parg = &tk->tp.args[i];
830
Masami Hiramatsu61a52732010-08-27 20:38:46 +0900831 /* Increment count for freeing args in error case */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900832 tk->tp.nr_args++;
Masami Hiramatsu61a52732010-08-27 20:38:46 +0900833
Masami Hiramatsueca0d912009-09-10 19:53:38 -0400834 /* Parse argument name */
835 arg = strchr(argv[i], '=');
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900836 if (arg) {
Masami Hiramatsueca0d912009-09-10 19:53:38 -0400837 *arg++ = '\0';
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900838 parg->name = kstrdup(argv[i], GFP_KERNEL);
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900839 } else {
Masami Hiramatsueca0d912009-09-10 19:53:38 -0400840 arg = argv[i];
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900841 /* If argument name is omitted, set "argN" */
842 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900843 parg->name = kstrdup(buf, GFP_KERNEL);
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900844 }
Masami Hiramatsua703d942009-10-07 18:28:07 -0400845
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900846 if (!parg->name) {
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900847 pr_info("Failed to allocate argument[%d] name.\n", i);
Masami Hiramatsuba8665d2009-11-30 19:19:20 -0500848 ret = -ENOMEM;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400849 goto error;
850 }
Masami Hiramatsuda346342010-08-27 20:39:12 +0900851
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900852 if (!is_good_name(parg->name)) {
Masami Hiramatsuda346342010-08-27 20:39:12 +0900853 pr_info("Invalid argument[%d] name: %s\n",
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900854 i, parg->name);
Masami Hiramatsuda346342010-08-27 20:39:12 +0900855 ret = -EINVAL;
856 goto error;
857 }
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400858
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900859 if (traceprobe_conflict_field_name(parg->name,
860 tk->tp.args, i)) {
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900861 pr_info("Argument[%d] name '%s' conflicts with "
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400862 "another field.\n", i, argv[i]);
863 ret = -EINVAL;
864 goto error;
865 }
Masami Hiramatsuba8665d2009-11-30 19:19:20 -0500866
867 /* Parse fetch argument */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900868 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100869 is_return, true,
870 kprobes_fetch_type_table);
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400871 if (ret) {
Masami Hiramatsuaba91592010-08-27 20:39:06 +0900872 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400873 goto error;
Masami Hiramatsue63cc232009-10-16 20:07:28 -0400874 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400875 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400876
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900877 ret = register_trace_kprobe(tk);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400878 if (ret)
879 goto error;
880 return 0;
881
882error:
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900883 free_trace_kprobe(tk);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400884 return ret;
885}
886
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900887static int release_all_trace_kprobes(void)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400888{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900889 struct trace_kprobe *tk;
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900890 int ret = 0;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400891
892 mutex_lock(&probe_lock);
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900893 /* Ensure no probe is in use. */
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900894 list_for_each_entry(tk, &probe_list, list)
895 if (trace_probe_is_enabled(&tk->tp)) {
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900896 ret = -EBUSY;
897 goto end;
898 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400899 /* TODO: Use batch unregistration */
900 while (!list_empty(&probe_list)) {
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900901 tk = list_entry(probe_list.next, struct trace_kprobe, list);
902 ret = unregister_trace_kprobe(tk);
Steven Rostedt (Red Hat)40c32592013-07-03 23:33:50 -0400903 if (ret)
904 goto end;
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900905 free_trace_kprobe(tk);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400906 }
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900907
908end:
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400909 mutex_unlock(&probe_lock);
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900910
911 return ret;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400912}
913
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400914/* Probes listing interfaces */
915static void *probes_seq_start(struct seq_file *m, loff_t *pos)
916{
917 mutex_lock(&probe_lock);
918 return seq_list_start(&probe_list, *pos);
919}
920
921static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
922{
923 return seq_list_next(v, &probe_list, pos);
924}
925
926static void probes_seq_stop(struct seq_file *m, void *v)
927{
928 mutex_unlock(&probe_lock);
929}
930
931static int probes_seq_show(struct seq_file *m, void *v)
932{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900933 struct trace_kprobe *tk = v;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400934 int i;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400935
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100936 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400937 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400938 trace_event_name(&tk->tp.call));
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400939
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900940 if (!tk->symbol)
941 seq_printf(m, " 0x%p", tk->rp.kp.addr);
942 else if (tk->rp.kp.offset)
943 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
944 tk->rp.kp.offset);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400945 else
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900946 seq_printf(m, " %s", trace_kprobe_symbol(tk));
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400947
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900948 for (i = 0; i < tk->tp.nr_args; i++)
949 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100950 seq_putc(m, '\n');
Masami Hiramatsu93ccae72010-04-12 13:17:08 -0400951
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400952 return 0;
953}
954
955static const struct seq_operations probes_seq_op = {
956 .start = probes_seq_start,
957 .next = probes_seq_next,
958 .stop = probes_seq_stop,
959 .show = probes_seq_show
960};
961
962static int probes_open(struct inode *inode, struct file *file)
963{
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900964 int ret;
965
966 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900967 ret = release_all_trace_kprobes();
Masami Hiramatsu02ca1522011-10-04 19:44:38 +0900968 if (ret < 0)
969 return ret;
970 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400971
972 return seq_open(file, &probes_seq_op);
973}
974
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400975static ssize_t probes_write(struct file *file, const char __user *buffer,
976 size_t count, loff_t *ppos)
977{
Tom Zanussi7e465ba2017-09-22 14:58:20 -0500978 return trace_parse_run_command(file, buffer, count, ppos,
979 create_trace_kprobe);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -0400980}
981
982static const struct file_operations kprobe_events_ops = {
983 .owner = THIS_MODULE,
984 .open = probes_open,
985 .read = seq_read,
986 .llseek = seq_lseek,
987 .release = seq_release,
988 .write = probes_write,
989};
990
Masami Hiramatsucd7e7bd2009-08-13 16:35:42 -0400991/* Probes profiling interfaces */
992static int probes_profile_seq_show(struct seq_file *m, void *v)
993{
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900994 struct trace_kprobe *tk = v;
Masami Hiramatsucd7e7bd2009-08-13 16:35:42 -0400995
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400996 seq_printf(m, " %-44s %15lu %15lu\n",
Marcin Nowakowskif18f97a2016-12-09 15:19:37 +0100997 trace_event_name(&tk->tp.call),
998 trace_kprobe_nhit(tk),
Namhyung Kimc31ffb32013-07-03 13:50:51 +0900999 tk->rp.kp.nmissed);
Masami Hiramatsucd7e7bd2009-08-13 16:35:42 -04001000
1001 return 0;
1002}
1003
1004static const struct seq_operations profile_seq_op = {
1005 .start = probes_seq_start,
1006 .next = probes_seq_next,
1007 .stop = probes_seq_stop,
1008 .show = probes_profile_seq_show
1009};
1010
1011static int profile_open(struct inode *inode, struct file *file)
1012{
1013 return seq_open(file, &profile_seq_op);
1014}
1015
1016static const struct file_operations kprobe_profile_ops = {
1017 .owner = THIS_MODULE,
1018 .open = profile_open,
1019 .read = seq_read,
1020 .llseek = seq_lseek,
1021 .release = seq_release,
1022};
1023
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001024/* Kprobe handler */
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001025static nokprobe_inline void
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001026__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001027 struct trace_event_file *trace_file)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001028{
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001029 struct kprobe_trace_entry_head *entry;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001030 struct ring_buffer_event *event;
Frederic Weisbecker8f8ffe22009-09-11 01:09:23 +02001031 struct ring_buffer *buffer;
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001032 int size, dsize, pc;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001033 unsigned long irq_flags;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001034 struct trace_event_call *call = &tk->tp.call;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001035
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001036 WARN_ON(call != trace_file->event_call);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001037
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -04001038 if (trace_trigger_soft_disabled(trace_file))
Steven Rostedt (Red Hat)13a1e4a2014-01-06 21:32:10 -05001039 return;
Masami Hiramatsub8820082013-05-09 14:44:54 +09001040
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001041 local_save_flags(irq_flags);
1042 pc = preempt_count();
1043
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001044 dsize = __get_data_size(&tk->tp, regs);
1045 size = sizeof(*entry) + tk->tp.size + dsize;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001046
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001047 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001048 call->event.type,
1049 size, irq_flags, pc);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001050 if (!event)
Xiao Guangrong1e12a4a2010-01-28 09:34:27 +08001051 return;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001052
1053 entry = ring_buffer_event_data(event);
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001054 entry->ip = (unsigned long)tk->rp.kp.addr;
1055 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001056
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001057 event_trigger_unlock_commit_regs(trace_file, buffer, event,
Steven Rostedt (Red Hat)13a1e4a2014-01-06 21:32:10 -05001058 entry, irq_flags, pc, regs);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001059}
1060
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001061static void
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001062kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001063{
Oleg Nesterovb04d52e2013-06-20 19:38:14 +02001064 struct event_file_link *link;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001065
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001066 list_for_each_entry_rcu(link, &tk->tp.files, list)
1067 __kprobe_trace_func(tk, regs, link->file);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001068}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001069NOKPROBE_SYMBOL(kprobe_trace_func);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001070
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001071/* Kretprobe handler */
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001072static nokprobe_inline void
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001073__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001074 struct pt_regs *regs,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001075 struct trace_event_file *trace_file)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001076{
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001077 struct kretprobe_trace_entry_head *entry;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001078 struct ring_buffer_event *event;
Frederic Weisbecker8f8ffe22009-09-11 01:09:23 +02001079 struct ring_buffer *buffer;
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001080 int size, pc, dsize;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001081 unsigned long irq_flags;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001082 struct trace_event_call *call = &tk->tp.call;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001083
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001084 WARN_ON(call != trace_file->event_call);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001085
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -04001086 if (trace_trigger_soft_disabled(trace_file))
Steven Rostedt (Red Hat)13a1e4a2014-01-06 21:32:10 -05001087 return;
Masami Hiramatsub8820082013-05-09 14:44:54 +09001088
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001089 local_save_flags(irq_flags);
1090 pc = preempt_count();
1091
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001092 dsize = __get_data_size(&tk->tp, regs);
1093 size = sizeof(*entry) + tk->tp.size + dsize;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001094
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001095 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001096 call->event.type,
1097 size, irq_flags, pc);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001098 if (!event)
Xiao Guangrong1e12a4a2010-01-28 09:34:27 +08001099 return;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001100
1101 entry = ring_buffer_event_data(event);
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001102 entry->func = (unsigned long)tk->rp.kp.addr;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001103 entry->ret_ip = (unsigned long)ri->ret_addr;
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001104 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001105
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001106 event_trigger_unlock_commit_regs(trace_file, buffer, event,
Steven Rostedt (Red Hat)13a1e4a2014-01-06 21:32:10 -05001107 entry, irq_flags, pc, regs);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001108}
1109
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001110static void
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001111kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001112 struct pt_regs *regs)
1113{
Oleg Nesterovb04d52e2013-06-20 19:38:14 +02001114 struct event_file_link *link;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001115
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001116 list_for_each_entry_rcu(link, &tk->tp.files, list)
1117 __kretprobe_trace_func(tk, ri, regs, link->file);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001118}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001119NOKPROBE_SYMBOL(kretprobe_trace_func);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001120
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001121/* Event entry printers */
Masami Hiramatsub62fdd92013-05-13 20:58:39 +09001122static enum print_line_t
Steven Rostedta9a57762010-04-22 18:46:14 -04001123print_kprobe_event(struct trace_iterator *iter, int flags,
1124 struct trace_event *event)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001125{
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001126 struct kprobe_trace_entry_head *field;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001127 struct trace_seq *s = &iter->seq;
Masami Hiramatsueca0d912009-09-10 19:53:38 -04001128 struct trace_probe *tp;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001129 u8 *data;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001130 int i;
1131
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001132 field = (struct kprobe_trace_entry_head *)iter->ent;
Steven Rostedt80decc72010-04-23 10:00:22 -04001133 tp = container_of(event, struct trace_probe, call.event);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001134
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001135 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
Masami Hiramatsu6e9f23d2009-09-10 19:53:45 -04001136
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001137 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001138 goto out;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001139
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001140 trace_seq_putc(s, ')');
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001141
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001142 data = (u8 *)&field[1];
1143 for (i = 0; i < tp->nr_args; i++)
1144 if (!tp->args[i].type->print(s, tp->args[i].name,
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001145 data + tp->args[i].offset, field))
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001146 goto out;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001147
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001148 trace_seq_putc(s, '\n');
1149 out:
1150 return trace_handle_return(s);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001151}
1152
Masami Hiramatsub62fdd92013-05-13 20:58:39 +09001153static enum print_line_t
Steven Rostedta9a57762010-04-22 18:46:14 -04001154print_kretprobe_event(struct trace_iterator *iter, int flags,
1155 struct trace_event *event)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001156{
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001157 struct kretprobe_trace_entry_head *field;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001158 struct trace_seq *s = &iter->seq;
Masami Hiramatsueca0d912009-09-10 19:53:38 -04001159 struct trace_probe *tp;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001160 u8 *data;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001161 int i;
1162
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001163 field = (struct kretprobe_trace_entry_head *)iter->ent;
Steven Rostedt80decc72010-04-23 10:00:22 -04001164 tp = container_of(event, struct trace_probe, call.event);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001165
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001166 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
Masami Hiramatsu6e9f23d2009-09-10 19:53:45 -04001167
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001168 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001169 goto out;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001170
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001171 trace_seq_puts(s, " <- ");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001172
1173 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001174 goto out;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001175
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001176 trace_seq_putc(s, ')');
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001177
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001178 data = (u8 *)&field[1];
1179 for (i = 0; i < tp->nr_args; i++)
1180 if (!tp->args[i].type->print(s, tp->args[i].name,
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001181 data + tp->args[i].offset, field))
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001182 goto out;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001183
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001184 trace_seq_putc(s, '\n');
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001185
Steven Rostedt (Red Hat)85224da2014-11-12 15:18:16 -05001186 out:
1187 return trace_handle_return(s);
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001188}
1189
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001190
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001191static int kprobe_event_define_fields(struct trace_event_call *event_call)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001192{
1193 int ret, i;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001194 struct kprobe_trace_entry_head field;
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001195 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001196
Masami Hiramatsua703d942009-10-07 18:28:07 -04001197 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
Masami Hiramatsueca0d912009-09-10 19:53:38 -04001198 /* Set argument names as fields */
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001199 for (i = 0; i < tk->tp.nr_args; i++) {
1200 struct probe_arg *parg = &tk->tp.args[i];
1201
1202 ret = trace_define_field(event_call, parg->type->fmttype,
1203 parg->name,
1204 sizeof(field) + parg->offset,
1205 parg->type->size,
1206 parg->type->is_signed,
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001207 FILTER_OTHER);
1208 if (ret)
1209 return ret;
1210 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001211 return 0;
1212}
1213
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001214static int kretprobe_event_define_fields(struct trace_event_call *event_call)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001215{
1216 int ret, i;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001217 struct kretprobe_trace_entry_head field;
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001218 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001219
Masami Hiramatsua703d942009-10-07 18:28:07 -04001220 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1221 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
Masami Hiramatsueca0d912009-09-10 19:53:38 -04001222 /* Set argument names as fields */
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001223 for (i = 0; i < tk->tp.nr_args; i++) {
1224 struct probe_arg *parg = &tk->tp.args[i];
1225
1226 ret = trace_define_field(event_call, parg->type->fmttype,
1227 parg->name,
1228 sizeof(field) + parg->offset,
1229 parg->type->size,
1230 parg->type->is_signed,
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001231 FILTER_OTHER);
1232 if (ret)
1233 return ret;
1234 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001235 return 0;
1236}
1237
Li Zefan07b139c2009-12-21 14:27:35 +08001238#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001239
1240/* Kprobe profile handler */
Josef Bacik9802d862017-12-11 11:36:48 -05001241static int
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001242kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001243{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001244 struct trace_event_call *call = &tk->tp.call;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001245 struct kprobe_trace_entry_head *entry;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02001246 struct hlist_head *head;
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001247 int size, __size, dsize;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01001248 int rctx;
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001249
Josef Bacik9802d862017-12-11 11:36:48 -05001250 if (bpf_prog_array_valid(call)) {
Masami Hiramatsu66665ad2018-01-13 02:54:33 +09001251 unsigned long orig_ip = instruction_pointer(regs);
Josef Bacik9802d862017-12-11 11:36:48 -05001252 int ret;
1253
1254 ret = trace_call_bpf(call, regs);
1255
1256 /*
1257 * We need to check and see if we modified the pc of the
1258 * pt_regs, and if so clear the kprobe and return 1 so that we
Masami Hiramatsu66665ad2018-01-13 02:54:33 +09001259 * don't do the single stepping.
1260 * The ftrace kprobe handler leaves it up to us to re-enable
1261 * preemption here before returning if we've modified the ip.
Josef Bacik9802d862017-12-11 11:36:48 -05001262 */
Masami Hiramatsu66665ad2018-01-13 02:54:33 +09001263 if (orig_ip != instruction_pointer(regs)) {
Josef Bacik9802d862017-12-11 11:36:48 -05001264 reset_current_kprobe();
Masami Hiramatsu66665ad2018-01-13 02:54:33 +09001265 preempt_enable_no_resched();
Josef Bacik9802d862017-12-11 11:36:48 -05001266 return 1;
1267 }
1268 if (!ret)
1269 return 0;
1270 }
Alexei Starovoitov25415172015-03-25 12:49:20 -07001271
Oleg Nesterov288e9842013-06-20 19:38:06 +02001272 head = this_cpu_ptr(call->perf_events);
1273 if (hlist_empty(head))
Josef Bacik9802d862017-12-11 11:36:48 -05001274 return 0;
Oleg Nesterov288e9842013-06-20 19:38:06 +02001275
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001276 dsize = __get_data_size(&tk->tp, regs);
1277 __size = sizeof(*entry) + tk->tp.size + dsize;
Masami Hiramatsu74ebb632009-09-14 16:49:28 -04001278 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1279 size -= sizeof(u32);
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001280
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001281 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08001282 if (!entry)
Josef Bacik9802d862017-12-11 11:36:48 -05001283 return 0;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01001284
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001285 entry->ip = (unsigned long)tk->rp.kp.addr;
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001286 memset(&entry[1], 0, dsize);
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001287 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001288 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001289 head, NULL);
Josef Bacik9802d862017-12-11 11:36:48 -05001290 return 0;
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001291}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001292NOKPROBE_SYMBOL(kprobe_perf_func);
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001293
1294/* Kretprobe profile handler */
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001295static void
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001296kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
Masami Hiramatsu2b106aa2013-05-09 14:44:41 +09001297 struct pt_regs *regs)
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001298{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001299 struct trace_event_call *call = &tk->tp.call;
Masami Hiramatsu93ccae72010-04-12 13:17:08 -04001300 struct kretprobe_trace_entry_head *entry;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02001301 struct hlist_head *head;
Masami Hiramatsue09c8612010-07-05 15:54:45 -03001302 int size, __size, dsize;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01001303 int rctx;
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001304
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001305 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
Alexei Starovoitov25415172015-03-25 12:49:20 -07001306 return;
1307
Oleg Nesterov288e9842013-06-20 19:38:06 +02001308 head = this_cpu_ptr(call->perf_events);
1309 if (hlist_empty(head))
1310 return;
1311
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001312 dsize = __get_data_size(&tk->tp, regs);
1313 __size = sizeof(*entry) + tk->tp.size + dsize;
Masami Hiramatsu74ebb632009-09-14 16:49:28 -04001314 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1315 size -= sizeof(u32);
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001316
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001317 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08001318 if (!entry)
Xiao Guangrong1e12a4a2010-01-28 09:34:27 +08001319 return;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01001320
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001321 entry->func = (unsigned long)tk->rp.kp.addr;
Masami Hiramatsua1a138d2009-09-25 11:20:12 -07001322 entry->ret_ip = (unsigned long)ri->ret_addr;
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001323 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001324 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001325 head, NULL);
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001326}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001327NOKPROBE_SYMBOL(kretprobe_perf_func);
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001328
1329int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1330 const char **symbol, u64 *probe_offset,
1331 u64 *probe_addr, bool perf_type_tracepoint)
1332{
1333 const char *pevent = trace_event_name(event->tp_event);
1334 const char *group = event->tp_event->class->system;
1335 struct trace_kprobe *tk;
1336
1337 if (perf_type_tracepoint)
1338 tk = find_trace_kprobe(pevent, group);
1339 else
1340 tk = event->tp_event->data;
1341 if (!tk)
1342 return -EINVAL;
1343
1344 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1345 : BPF_FD_TYPE_KPROBE;
1346 if (tk->symbol) {
1347 *symbol = tk->symbol;
1348 *probe_offset = tk->rp.kp.offset;
1349 *probe_addr = 0;
1350 } else {
1351 *symbol = NULL;
1352 *probe_offset = 0;
1353 *probe_addr = (unsigned long)tk->rp.kp.addr;
1354 }
1355 return 0;
1356}
Li Zefan07b139c2009-12-21 14:27:35 +08001357#endif /* CONFIG_PERF_EVENTS */
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001358
Oleg Nesterov3fe3d612013-06-20 19:38:09 +02001359/*
1360 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1361 *
1362 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1363 * lockless, but we can't race with this __init function.
1364 */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001365static int kprobe_register(struct trace_event_call *event,
Masami Hiramatsufbc19632014-04-17 17:18:00 +09001366 enum trace_reg type, void *data)
Steven Rostedt22392912010-04-21 12:27:06 -04001367{
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001368 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001369 struct trace_event_file *file = data;
Masami Hiramatsu1538f882011-06-27 16:26:44 +09001370
Steven Rostedt22392912010-04-21 12:27:06 -04001371 switch (type) {
1372 case TRACE_REG_REGISTER:
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001373 return enable_trace_kprobe(tk, file);
Steven Rostedt22392912010-04-21 12:27:06 -04001374 case TRACE_REG_UNREGISTER:
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001375 return disable_trace_kprobe(tk, file);
Steven Rostedt22392912010-04-21 12:27:06 -04001376
1377#ifdef CONFIG_PERF_EVENTS
1378 case TRACE_REG_PERF_REGISTER:
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001379 return enable_trace_kprobe(tk, NULL);
Steven Rostedt22392912010-04-21 12:27:06 -04001380 case TRACE_REG_PERF_UNREGISTER:
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001381 return disable_trace_kprobe(tk, NULL);
Jiri Olsaceec0b62012-02-15 15:51:49 +01001382 case TRACE_REG_PERF_OPEN:
1383 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +01001384 case TRACE_REG_PERF_ADD:
1385 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +01001386 return 0;
Steven Rostedt22392912010-04-21 12:27:06 -04001387#endif
1388 }
1389 return 0;
1390}
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001391
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001392static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001393{
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001394 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
Josef Bacik9802d862017-12-11 11:36:48 -05001395 int ret = 0;
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001396
Martin KaFai Laua7636d92016-02-03 12:28:28 -08001397 raw_cpu_inc(*tk->nhit);
Masami Hiramatsu48182bd2013-05-09 14:44:36 +09001398
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001399 if (tk->tp.flags & TP_FLAG_TRACE)
1400 kprobe_trace_func(tk, regs);
Li Zefan07b139c2009-12-21 14:27:35 +08001401#ifdef CONFIG_PERF_EVENTS
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001402 if (tk->tp.flags & TP_FLAG_PROFILE)
Josef Bacik9802d862017-12-11 11:36:48 -05001403 ret = kprobe_perf_func(tk, regs);
Li Zefan07b139c2009-12-21 14:27:35 +08001404#endif
Josef Bacik9802d862017-12-11 11:36:48 -05001405 return ret;
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001406}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001407NOKPROBE_SYMBOL(kprobe_dispatcher);
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001408
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001409static int
1410kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001411{
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001412 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001413
Martin KaFai Laua7636d92016-02-03 12:28:28 -08001414 raw_cpu_inc(*tk->nhit);
Masami Hiramatsu48182bd2013-05-09 14:44:36 +09001415
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001416 if (tk->tp.flags & TP_FLAG_TRACE)
1417 kretprobe_trace_func(tk, ri, regs);
Li Zefan07b139c2009-12-21 14:27:35 +08001418#ifdef CONFIG_PERF_EVENTS
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001419 if (tk->tp.flags & TP_FLAG_PROFILE)
1420 kretprobe_perf_func(tk, ri, regs);
Li Zefan07b139c2009-12-21 14:27:35 +08001421#endif
Masami Hiramatsu50d78052009-09-14 16:49:20 -04001422 return 0; /* We don't tweek kernel, so just return 0 */
1423}
Masami Hiramatsu3da0f182014-04-17 17:18:28 +09001424NOKPROBE_SYMBOL(kretprobe_dispatcher);
Masami Hiramatsue08d1c62009-09-10 19:53:30 -04001425
Steven Rostedta9a57762010-04-22 18:46:14 -04001426static struct trace_event_functions kretprobe_funcs = {
1427 .trace = print_kretprobe_event
1428};
1429
1430static struct trace_event_functions kprobe_funcs = {
1431 .trace = print_kprobe_event
1432};
1433
Song Liue12f03d2017-12-06 14:45:15 -08001434static inline void init_trace_event_call(struct trace_kprobe *tk,
1435 struct trace_event_call *call)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001436{
Li Zefanffb9f992010-05-24 16:24:52 +08001437 INIT_LIST_HEAD(&call->class->fields);
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001438 if (trace_kprobe_is_return(tk)) {
Steven Rostedt80decc72010-04-23 10:00:22 -04001439 call->event.funcs = &kretprobe_funcs;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001440 call->class->define_fields = kretprobe_event_define_fields;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001441 } else {
Steven Rostedt80decc72010-04-23 10:00:22 -04001442 call->event.funcs = &kprobe_funcs;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001443 call->class->define_fields = kprobe_event_define_fields;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001444 }
Song Liue12f03d2017-12-06 14:45:15 -08001445
1446 call->flags = TRACE_EVENT_FL_KPROBE;
1447 call->class->reg = kprobe_register;
1448 call->data = tk;
1449}
1450
1451static int register_kprobe_event(struct trace_kprobe *tk)
1452{
1453 struct trace_event_call *call = &tk->tp.call;
1454 int ret = 0;
1455
1456 init_trace_event_call(tk, call);
1457
Namhyung Kim5bf652a2013-07-03 16:09:02 +09001458 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
Lai Jiangshana342a0282009-12-15 15:39:49 +08001459 return -ENOMEM;
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001460 ret = register_trace_event(&call->event);
Steven Rostedt32c0eda2010-04-23 10:38:03 -04001461 if (!ret) {
Lai Jiangshana342a0282009-12-15 15:39:49 +08001462 kfree(call->print_fmt);
Masami Hiramatsuff50d992009-08-13 16:35:34 -04001463 return -ENODEV;
Lai Jiangshana342a0282009-12-15 15:39:49 +08001464 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001465 ret = trace_add_event_call(call);
Masami Hiramatsuff50d992009-08-13 16:35:34 -04001466 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001467 pr_info("Failed to register kprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001468 trace_event_name(call));
Lai Jiangshana342a0282009-12-15 15:39:49 +08001469 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001470 unregister_trace_event(&call->event);
Masami Hiramatsuff50d992009-08-13 16:35:34 -04001471 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001472 return ret;
1473}
1474
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001475static int unregister_kprobe_event(struct trace_kprobe *tk)
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001476{
Steven Rostedt (Red Hat)40c32592013-07-03 23:33:50 -04001477 int ret;
1478
Masami Hiramatsuff50d992009-08-13 16:35:34 -04001479 /* tp->event is unregistered in trace_remove_event_call() */
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001480 ret = trace_remove_event_call(&tk->tp.call);
Steven Rostedt (Red Hat)40c32592013-07-03 23:33:50 -04001481 if (!ret)
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001482 kfree(tk->tp.call.print_fmt);
Steven Rostedt (Red Hat)40c32592013-07-03 23:33:50 -04001483 return ret;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001484}
1485
Song Liue12f03d2017-12-06 14:45:15 -08001486#ifdef CONFIG_PERF_EVENTS
1487/* create a trace_kprobe, but don't add it to global lists */
1488struct trace_event_call *
1489create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1490 bool is_return)
1491{
1492 struct trace_kprobe *tk;
1493 int ret;
1494 char *event;
1495
1496 /*
1497 * local trace_kprobes are not added to probe_list, so they are never
1498 * searched in find_trace_kprobe(). Therefore, there is no concern of
1499 * duplicated name here.
1500 */
1501 event = func ? func : "DUMMY_EVENT";
1502
1503 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1504 offs, 0 /* maxactive */, 0 /* nargs */,
1505 is_return);
1506
1507 if (IS_ERR(tk)) {
1508 pr_info("Failed to allocate trace_probe.(%d)\n",
1509 (int)PTR_ERR(tk));
1510 return ERR_CAST(tk);
1511 }
1512
1513 init_trace_event_call(tk, &tk->tp.call);
1514
1515 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1516 ret = -ENOMEM;
1517 goto error;
1518 }
1519
1520 ret = __register_trace_kprobe(tk);
Jiri Olsa0fc8c352018-07-09 16:19:06 +02001521 if (ret < 0) {
1522 kfree(tk->tp.call.print_fmt);
Song Liue12f03d2017-12-06 14:45:15 -08001523 goto error;
Jiri Olsa0fc8c352018-07-09 16:19:06 +02001524 }
Song Liue12f03d2017-12-06 14:45:15 -08001525
1526 return &tk->tp.call;
1527error:
1528 free_trace_kprobe(tk);
1529 return ERR_PTR(ret);
1530}
1531
1532void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1533{
1534 struct trace_kprobe *tk;
1535
1536 tk = container_of(event_call, struct trace_kprobe, tp.call);
1537
1538 if (trace_probe_is_enabled(&tk->tp)) {
1539 WARN_ON(1);
1540 return;
1541 }
1542
1543 __unregister_trace_kprobe(tk);
Jiri Olsa0fc8c352018-07-09 16:19:06 +02001544
1545 kfree(tk->tp.call.print_fmt);
Song Liue12f03d2017-12-06 14:45:15 -08001546 free_trace_kprobe(tk);
1547}
1548#endif /* CONFIG_PERF_EVENTS */
1549
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001550/* Make a tracefs interface for controlling probe points */
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001551static __init int init_kprobe_trace(void)
1552{
1553 struct dentry *d_tracer;
1554 struct dentry *entry;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001555
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001556 if (register_module_notifier(&trace_kprobe_module_nb))
Masami Hiramatsu61424312011-06-27 16:26:56 +09001557 return -EINVAL;
1558
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001559 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001560 if (IS_ERR(d_tracer))
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001561 return 0;
1562
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001563 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001564 NULL, &kprobe_events_ops);
1565
Masami Hiramatsucd7e7bd2009-08-13 16:35:42 -04001566 /* Event list interface */
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001567 if (!entry)
Joe Perchesa395d6a2016-03-22 14:28:09 -07001568 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
Masami Hiramatsucd7e7bd2009-08-13 16:35:42 -04001569
1570 /* Profile interface */
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001571 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
Masami Hiramatsucd7e7bd2009-08-13 16:35:42 -04001572 NULL, &kprobe_profile_ops);
1573
1574 if (!entry)
Joe Perchesa395d6a2016-03-22 14:28:09 -07001575 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001576 return 0;
1577}
1578fs_initcall(init_kprobe_trace);
1579
1580
1581#ifdef CONFIG_FTRACE_STARTUP_TEST
Arnd Bergmann26a346f2017-02-01 17:57:56 +01001582static __init struct trace_event_file *
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001583find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001584{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001585 struct trace_event_file *file;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001586
1587 list_for_each_entry(file, &tr->events, list)
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001588 if (file->event_call == &tk->tp.call)
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001589 return file;
1590
1591 return NULL;
1592}
1593
Oleg Nesterov3fe3d612013-06-20 19:38:09 +02001594/*
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001595 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
Oleg Nesterov3fe3d612013-06-20 19:38:09 +02001596 * stage, we can do this lockless.
1597 */
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001598static __init int kprobe_trace_self_tests_init(void)
1599{
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001600 int ret, warn = 0;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001601 int (*target)(int, int, int, int, int, int);
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001602 struct trace_kprobe *tk;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001603 struct trace_event_file *file;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001604
Yoshihiro YUNOMAE748ec3a2014-06-06 07:35:20 +09001605 if (tracing_is_disabled())
1606 return -ENODEV;
1607
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001608 target = kprobe_trace_selftest_target;
1609
1610 pr_info("Testing kprobe tracing: ");
1611
Tom Zanussi7e465ba2017-09-22 14:58:20 -05001612 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
1613 "$stack $stack0 +0($stack)",
1614 create_trace_kprobe);
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001615 if (WARN_ON_ONCE(ret)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001616 pr_warn("error on probing function entry.\n");
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001617 warn++;
1618 } else {
1619 /* Enable trace point */
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001620 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1621 if (WARN_ON_ONCE(tk == NULL)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001622 pr_warn("error on getting new probe.\n");
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001623 warn++;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001624 } else {
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001625 file = find_trace_probe_file(tk, top_trace_array());
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001626 if (WARN_ON_ONCE(file == NULL)) {
1627 pr_warn("error on getting probe file.\n");
1628 warn++;
1629 } else
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001630 enable_trace_kprobe(tk, file);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001631 }
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001632 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001633
Tom Zanussi7e465ba2017-09-22 14:58:20 -05001634 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1635 "$retval", create_trace_kprobe);
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001636 if (WARN_ON_ONCE(ret)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001637 pr_warn("error on probing function return.\n");
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001638 warn++;
1639 } else {
1640 /* Enable trace point */
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001641 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1642 if (WARN_ON_ONCE(tk == NULL)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001643 pr_warn("error on getting 2nd new probe.\n");
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001644 warn++;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001645 } else {
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001646 file = find_trace_probe_file(tk, top_trace_array());
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001647 if (WARN_ON_ONCE(file == NULL)) {
1648 pr_warn("error on getting probe file.\n");
1649 warn++;
1650 } else
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001651 enable_trace_kprobe(tk, file);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001652 }
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001653 }
1654
1655 if (warn)
1656 goto end;
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001657
1658 ret = target(1, 2, 3, 4, 5, 6);
1659
Marcin Nowakowskid4d7ccc2016-12-09 15:19:38 +01001660 /*
1661 * Not expecting an error here, the check is only to prevent the
1662 * optimizer from removing the call to target() as otherwise there
1663 * are no side-effects and the call is never performed.
1664 */
1665 if (ret != 21)
1666 warn++;
1667
Masami Hiramatsu02ca1522011-10-04 19:44:38 +09001668 /* Disable trace points before removing it */
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001669 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1670 if (WARN_ON_ONCE(tk == NULL)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001671 pr_warn("error on getting test probe.\n");
Masami Hiramatsu02ca1522011-10-04 19:44:38 +09001672 warn++;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001673 } else {
Marcin Nowakowskid4d7ccc2016-12-09 15:19:38 +01001674 if (trace_kprobe_nhit(tk) != 1) {
1675 pr_warn("incorrect number of testprobe hits\n");
1676 warn++;
1677 }
1678
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001679 file = find_trace_probe_file(tk, top_trace_array());
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001680 if (WARN_ON_ONCE(file == NULL)) {
1681 pr_warn("error on getting probe file.\n");
1682 warn++;
1683 } else
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001684 disable_trace_kprobe(tk, file);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001685 }
Masami Hiramatsu02ca1522011-10-04 19:44:38 +09001686
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001687 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1688 if (WARN_ON_ONCE(tk == NULL)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001689 pr_warn("error on getting 2nd test probe.\n");
Masami Hiramatsu02ca1522011-10-04 19:44:38 +09001690 warn++;
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001691 } else {
Marcin Nowakowskid4d7ccc2016-12-09 15:19:38 +01001692 if (trace_kprobe_nhit(tk) != 1) {
1693 pr_warn("incorrect number of testprobe2 hits\n");
1694 warn++;
1695 }
1696
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001697 file = find_trace_probe_file(tk, top_trace_array());
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001698 if (WARN_ON_ONCE(file == NULL)) {
1699 pr_warn("error on getting probe file.\n");
1700 warn++;
1701 } else
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001702 disable_trace_kprobe(tk, file);
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001703 }
Masami Hiramatsu02ca1522011-10-04 19:44:38 +09001704
Tom Zanussi7e465ba2017-09-22 14:58:20 -05001705 ret = trace_run_command("-:testprobe", create_trace_kprobe);
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001706 if (WARN_ON_ONCE(ret)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001707 pr_warn("error on deleting a probe.\n");
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001708 warn++;
1709 }
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001710
Tom Zanussi7e465ba2017-09-22 14:58:20 -05001711 ret = trace_run_command("-:testprobe2", create_trace_kprobe);
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001712 if (WARN_ON_ONCE(ret)) {
Masami Hiramatsu41a7dd42013-05-09 14:44:49 +09001713 pr_warn("error on deleting a probe.\n");
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001714 warn++;
1715 }
1716
1717end:
Namhyung Kimc31ffb32013-07-03 13:50:51 +09001718 release_all_trace_kprobes();
Thomas Gleixner30e7d8942017-05-17 10:19:49 +02001719 /*
1720 * Wait for the optimizer work to finish. Otherwise it might fiddle
1721 * with probes in already freed __init text.
1722 */
1723 wait_for_kprobe_optimizer();
Masami Hiramatsu231e36f2010-01-14 00:12:12 -05001724 if (warn)
1725 pr_cont("NG: Some tests are failed. Please check them.\n");
1726 else
1727 pr_cont("OK\n");
Masami Hiramatsu413d37d2009-08-13 16:35:11 -04001728 return 0;
1729}
1730
1731late_initcall(kprobe_trace_self_tests_init);
1732
1733#endif