blob: 7660a7846586ccfc75f5b3b38d460e8d01ba4b25 [file] [log] [blame]
Alexei Starovoitovc4f66992018-03-28 12:05:37 -07001/* SPDX-License-Identifier: GPL-2.0 */
2
3#undef TRACE_SYSTEM_VAR
4
5#ifdef CONFIG_BPF_EVENTS
6
7#undef __entry
8#define __entry entry
9
10#undef __get_dynamic_array
11#define __get_dynamic_array(field) \
12 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
13
14#undef __get_dynamic_array_len
15#define __get_dynamic_array_len(field) \
16 ((__entry->__data_loc_##field >> 16) & 0xffff)
17
18#undef __get_str
19#define __get_str(field) ((char *)__get_dynamic_array(field))
20
21#undef __get_bitmask
22#define __get_bitmask(field) (char *)__get_dynamic_array(field)
23
Masami Hiramatsu55de2c02021-11-22 18:30:21 +090024#undef __get_rel_dynamic_array
25#define __get_rel_dynamic_array(field) \
26 ((void *)(&__entry->__rel_loc_##field) + \
27 sizeof(__entry->__rel_loc_##field) + \
28 (__entry->__rel_loc_##field & 0xffff))
29
30#undef __get_rel_dynamic_array_len
31#define __get_rel_dynamic_array_len(field) \
32 ((__entry->__rel_loc_##field >> 16) & 0xffff)
33
34#undef __get_rel_str
35#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
36
37#undef __get_rel_bitmask
38#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
39
Alexei Starovoitovc4f66992018-03-28 12:05:37 -070040#undef __perf_count
41#define __perf_count(c) (c)
42
43#undef __perf_task
44#define __perf_task(t) (t)
45
46/* cast any integer, pointer, or small struct to u64 */
47#define UINTTYPE(size) \
48 __typeof__(__builtin_choose_expr(size == 1, (u8)1, \
49 __builtin_choose_expr(size == 2, (u16)2, \
50 __builtin_choose_expr(size == 4, (u32)3, \
51 __builtin_choose_expr(size == 8, (u64)4, \
52 (void)5)))))
53#define __CAST_TO_U64(x) ({ \
54 typeof(x) __src = (x); \
55 UINTTYPE(sizeof(x)) __dst; \
56 memcpy(&__dst, &__src, sizeof(__dst)); \
57 (u64)__dst; })
58
59#define __CAST1(a,...) __CAST_TO_U64(a)
60#define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__)
61#define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__)
62#define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__)
63#define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__)
64#define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__)
65#define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__)
66#define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__)
67#define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__)
68#define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__)
69#define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__)
70#define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__)
71/* tracepoints with more than 12 arguments will hit build error */
72#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
73
Qais Yousef6939f4e2021-01-19 12:22:36 +000074#define __BPF_DECLARE_TRACE(call, proto, args) \
Alexei Starovoitovc4f66992018-03-28 12:05:37 -070075static notrace void \
76__bpf_trace_##call(void *__data, proto) \
77{ \
78 struct bpf_prog *prog = __data; \
79 CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
80}
81
Qais Yousef6939f4e2021-01-19 12:22:36 +000082#undef DECLARE_EVENT_CLASS
83#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
84 __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
85
Alexei Starovoitovc4f66992018-03-28 12:05:37 -070086/*
87 * This part is compiled out, it is only here as a build time check
88 * to make sure that if the tracepoint handling changes, the
89 * bpf probe will fail to compile unless it too is updated.
90 */
Matt Mullins9df1c282019-04-26 11:49:47 -070091#define __DEFINE_EVENT(template, call, proto, args, size) \
Alexei Starovoitovc4f66992018-03-28 12:05:37 -070092static inline void bpf_test_probe_##call(void) \
93{ \
94 check_trace_callback_type_##call(__bpf_trace_##template); \
95} \
Alexei Starovoitove8c423f2019-10-15 20:24:55 -070096typedef void (*btf_trace_##call)(void *__data, proto); \
Andrii Nakryiko441420a2020-03-01 00:10:43 -080097static union { \
98 struct bpf_raw_event_map event; \
99 btf_trace_##call handler; \
100} __bpf_trace_tp_map_##call __used \
Joe Perches33def842020-10-21 19:36:07 -0700101__section("__bpf_raw_tp_map") = { \
Andrii Nakryiko441420a2020-03-01 00:10:43 -0800102 .event = { \
103 .tp = &__tracepoint_##call, \
104 .bpf_func = __bpf_trace_##template, \
105 .num_args = COUNT_ARGS(args), \
106 .writable_size = size, \
107 }, \
Alexei Starovoitovc4f66992018-03-28 12:05:37 -0700108};
109
Matt Mullins9df1c282019-04-26 11:49:47 -0700110#define FIRST(x, ...) x
111
Hou Tao65223742021-10-04 17:48:55 +0800112#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \
Matt Mullins9df1c282019-04-26 11:49:47 -0700113static inline void bpf_test_buffer_##call(void) \
114{ \
115 /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
116 * BUILD_BUG_ON_ZERO() uses a different mechanism that is not \
117 * dead-code-eliminated. \
118 */ \
119 FIRST(proto); \
120 (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
Hou Tao65223742021-10-04 17:48:55 +0800121}
122
123#undef DEFINE_EVENT_WRITABLE
124#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
125 __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
126 __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
Matt Mullins9df1c282019-04-26 11:49:47 -0700127
128#undef DEFINE_EVENT
129#define DEFINE_EVENT(template, call, proto, args) \
130 __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0)
Alexei Starovoitovc4f66992018-03-28 12:05:37 -0700131
132#undef DEFINE_EVENT_PRINT
133#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
134 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
135
Qais Yousef6939f4e2021-01-19 12:22:36 +0000136#undef DECLARE_TRACE
137#define DECLARE_TRACE(call, proto, args) \
138 __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
139 __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
140
Hou Tao65223742021-10-04 17:48:55 +0800141#undef DECLARE_TRACE_WRITABLE
142#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
143 __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
144 __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
145 __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
146
Alexei Starovoitovc4f66992018-03-28 12:05:37 -0700147#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Matt Mullins9df1c282019-04-26 11:49:47 -0700148
Hou Tao65223742021-10-04 17:48:55 +0800149#undef DECLARE_TRACE_WRITABLE
Matt Mullins9df1c282019-04-26 11:49:47 -0700150#undef DEFINE_EVENT_WRITABLE
Hou Tao65223742021-10-04 17:48:55 +0800151#undef __CHECK_WRITABLE_BUF_SIZE
Matt Mullins9df1c282019-04-26 11:49:47 -0700152#undef __DEFINE_EVENT
153#undef FIRST
154
Alexei Starovoitovc4f66992018-03-28 12:05:37 -0700155#endif /* CONFIG_BPF_EVENTS */