Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #undef TRACE_SYSTEM_VAR |
| 4 | |
| 5 | #ifdef CONFIG_BPF_EVENTS |
| 6 | |
| 7 | #undef __entry |
| 8 | #define __entry entry |
| 9 | |
| 10 | #undef __get_dynamic_array |
| 11 | #define __get_dynamic_array(field) \ |
| 12 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
| 13 | |
| 14 | #undef __get_dynamic_array_len |
| 15 | #define __get_dynamic_array_len(field) \ |
| 16 | ((__entry->__data_loc_##field >> 16) & 0xffff) |
| 17 | |
| 18 | #undef __get_str |
| 19 | #define __get_str(field) ((char *)__get_dynamic_array(field)) |
| 20 | |
| 21 | #undef __get_bitmask |
| 22 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) |
| 23 | |
| 24 | #undef __perf_count |
| 25 | #define __perf_count(c) (c) |
| 26 | |
| 27 | #undef __perf_task |
| 28 | #define __perf_task(t) (t) |
| 29 | |
| 30 | /* cast any integer, pointer, or small struct to u64 */ |
| 31 | #define UINTTYPE(size) \ |
| 32 | __typeof__(__builtin_choose_expr(size == 1, (u8)1, \ |
| 33 | __builtin_choose_expr(size == 2, (u16)2, \ |
| 34 | __builtin_choose_expr(size == 4, (u32)3, \ |
| 35 | __builtin_choose_expr(size == 8, (u64)4, \ |
| 36 | (void)5))))) |
| 37 | #define __CAST_TO_U64(x) ({ \ |
| 38 | typeof(x) __src = (x); \ |
| 39 | UINTTYPE(sizeof(x)) __dst; \ |
| 40 | memcpy(&__dst, &__src, sizeof(__dst)); \ |
| 41 | (u64)__dst; }) |
| 42 | |
| 43 | #define __CAST1(a,...) __CAST_TO_U64(a) |
| 44 | #define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__) |
| 45 | #define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__) |
| 46 | #define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__) |
| 47 | #define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__) |
| 48 | #define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__) |
| 49 | #define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__) |
| 50 | #define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__) |
| 51 | #define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__) |
| 52 | #define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__) |
| 53 | #define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__) |
| 54 | #define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__) |
| 55 | /* tracepoints with more than 12 arguments will hit build error */ |
| 56 | #define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) |
| 57 | |
| 58 | #undef DECLARE_EVENT_CLASS |
| 59 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
| 60 | static notrace void \ |
| 61 | __bpf_trace_##call(void *__data, proto) \ |
| 62 | { \ |
| 63 | struct bpf_prog *prog = __data; \ |
| 64 | CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ |
| 65 | } |
| 66 | |
| 67 | /* |
| 68 | * This part is compiled out, it is only here as a build time check |
| 69 | * to make sure that if the tracepoint handling changes, the |
| 70 | * bpf probe will fail to compile unless it too is updated. |
| 71 | */ |
Matt Mullins | 9df1c28 | 2019-04-26 11:49:47 -0700 | [diff] [blame] | 72 | #define __DEFINE_EVENT(template, call, proto, args, size) \ |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 73 | static inline void bpf_test_probe_##call(void) \ |
| 74 | { \ |
| 75 | check_trace_callback_type_##call(__bpf_trace_##template); \ |
| 76 | } \ |
Alexei Starovoitov | e8c423f | 2019-10-15 20:24:55 -0700 | [diff] [blame] | 77 | typedef void (*btf_trace_##call)(void *__data, proto); \ |
Andrii Nakryiko | 441420a | 2020-03-01 00:10:43 -0800 | [diff] [blame] | 78 | static union { \ |
| 79 | struct bpf_raw_event_map event; \ |
| 80 | btf_trace_##call handler; \ |
| 81 | } __bpf_trace_tp_map_##call __used \ |
Joe Perches | 33def84 | 2020-10-21 19:36:07 -0700 | [diff] [blame^] | 82 | __section("__bpf_raw_tp_map") = { \ |
Andrii Nakryiko | 441420a | 2020-03-01 00:10:43 -0800 | [diff] [blame] | 83 | .event = { \ |
| 84 | .tp = &__tracepoint_##call, \ |
| 85 | .bpf_func = __bpf_trace_##template, \ |
| 86 | .num_args = COUNT_ARGS(args), \ |
| 87 | .writable_size = size, \ |
| 88 | }, \ |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 89 | }; |
| 90 | |
Matt Mullins | 9df1c28 | 2019-04-26 11:49:47 -0700 | [diff] [blame] | 91 | #define FIRST(x, ...) x |
| 92 | |
| 93 | #undef DEFINE_EVENT_WRITABLE |
| 94 | #define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ |
| 95 | static inline void bpf_test_buffer_##call(void) \ |
| 96 | { \ |
| 97 | /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \ |
| 98 | * BUILD_BUG_ON_ZERO() uses a different mechanism that is not \ |
| 99 | * dead-code-eliminated. \ |
| 100 | */ \ |
| 101 | FIRST(proto); \ |
| 102 | (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \ |
| 103 | } \ |
| 104 | __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) |
| 105 | |
| 106 | #undef DEFINE_EVENT |
| 107 | #define DEFINE_EVENT(template, call, proto, args) \ |
| 108 | __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0) |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 109 | |
| 110 | #undef DEFINE_EVENT_PRINT |
| 111 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ |
| 112 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) |
| 113 | |
| 114 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
Matt Mullins | 9df1c28 | 2019-04-26 11:49:47 -0700 | [diff] [blame] | 115 | |
| 116 | #undef DEFINE_EVENT_WRITABLE |
| 117 | #undef __DEFINE_EVENT |
| 118 | #undef FIRST |
| 119 | |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 120 | #endif /* CONFIG_BPF_EVENTS */ |