Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 2 | #ifndef __BPF_HELPERS_H |
| 3 | #define __BPF_HELPERS_H |
| 4 | |
| 5 | /* helper macro to place programs, maps, license in |
| 6 | * different sections in elf_bpf file. Section names |
| 7 | * are interpreted by elf_bpf loader |
| 8 | */ |
| 9 | #define SEC(NAME) __attribute__((section(NAME), used)) |
| 10 | |
| 11 | /* helper functions called from eBPF programs written in C */ |
| 12 | static void *(*bpf_map_lookup_elem)(void *map, void *key) = |
| 13 | (void *) BPF_FUNC_map_lookup_elem; |
| 14 | static int (*bpf_map_update_elem)(void *map, void *key, void *value, |
| 15 | unsigned long long flags) = |
| 16 | (void *) BPF_FUNC_map_update_elem; |
| 17 | static int (*bpf_map_delete_elem)(void *map, void *key) = |
| 18 | (void *) BPF_FUNC_map_delete_elem; |
Mauricio Vasquez B | 43b987d | 2018-10-18 15:16:41 +0200 | [diff] [blame] | 19 | static int (*bpf_map_push_elem)(void *map, void *value, |
| 20 | unsigned long long flags) = |
| 21 | (void *) BPF_FUNC_map_push_elem; |
| 22 | static int (*bpf_map_pop_elem)(void *map, void *value) = |
| 23 | (void *) BPF_FUNC_map_pop_elem; |
| 24 | static int (*bpf_map_peek_elem)(void *map, void *value) = |
| 25 | (void *) BPF_FUNC_map_peek_elem; |
Alexei Starovoitov | b896c4f | 2015-03-25 12:49:23 -0700 | [diff] [blame] | 26 | static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) = |
| 27 | (void *) BPF_FUNC_probe_read; |
| 28 | static unsigned long long (*bpf_ktime_get_ns)(void) = |
| 29 | (void *) BPF_FUNC_ktime_get_ns; |
| 30 | static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) = |
| 31 | (void *) BPF_FUNC_trace_printk; |
Alexei Starovoitov | 5bacd78 | 2015-05-19 16:59:05 -0700 | [diff] [blame] | 32 | static void (*bpf_tail_call)(void *ctx, void *map, int index) = |
| 33 | (void *) BPF_FUNC_tail_call; |
Alexei Starovoitov | 530b2c8 | 2015-05-19 16:59:06 -0700 | [diff] [blame] | 34 | static unsigned long long (*bpf_get_smp_processor_id)(void) = |
| 35 | (void *) BPF_FUNC_get_smp_processor_id; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 36 | static unsigned long long (*bpf_get_current_pid_tgid)(void) = |
| 37 | (void *) BPF_FUNC_get_current_pid_tgid; |
| 38 | static unsigned long long (*bpf_get_current_uid_gid)(void) = |
| 39 | (void *) BPF_FUNC_get_current_uid_gid; |
| 40 | static int (*bpf_get_current_comm)(void *buf, int buf_size) = |
| 41 | (void *) BPF_FUNC_get_current_comm; |
Teng Qin | 41e9a80 | 2017-06-02 21:03:53 -0700 | [diff] [blame] | 42 | static unsigned long long (*bpf_perf_event_read)(void *map, |
| 43 | unsigned long long flags) = |
Kaixu Xia | 47efb30 | 2015-08-06 07:02:36 +0000 | [diff] [blame] | 44 | (void *) BPF_FUNC_perf_event_read; |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 45 | static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) = |
| 46 | (void *) BPF_FUNC_clone_redirect; |
| 47 | static int (*bpf_redirect)(int ifindex, int flags) = |
| 48 | (void *) BPF_FUNC_redirect; |
John Fastabend | 9d6e005 | 2017-07-17 09:30:25 -0700 | [diff] [blame] | 49 | static int (*bpf_redirect_map)(void *map, int key, int flags) = |
| 50 | (void *) BPF_FUNC_redirect_map; |
Adam Barth | 05b8ad2 | 2016-08-10 09:45:39 -0700 | [diff] [blame] | 51 | static int (*bpf_perf_event_output)(void *ctx, void *map, |
| 52 | unsigned long long flags, void *data, |
| 53 | int size) = |
Alexei Starovoitov | 3911169 | 2015-10-20 20:02:35 -0700 | [diff] [blame] | 54 | (void *) BPF_FUNC_perf_event_output; |
Alexei Starovoitov | a6ffe7b | 2016-02-17 19:58:59 -0800 | [diff] [blame] | 55 | static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = |
| 56 | (void *) BPF_FUNC_get_stackid; |
Sargun Dhillon | 96ae522 | 2016-07-25 05:54:46 -0700 | [diff] [blame] | 57 | static int (*bpf_probe_write_user)(void *dst, void *src, int size) = |
| 58 | (void *) BPF_FUNC_probe_write_user; |
Sargun Dhillon | 9e6e60e | 2016-08-12 08:57:04 -0700 | [diff] [blame] | 59 | static int (*bpf_current_task_under_cgroup)(void *map, int index) = |
| 60 | (void *) BPF_FUNC_current_task_under_cgroup; |
William Tu | 6afb1e2 | 2016-08-19 11:55:44 -0700 | [diff] [blame] | 61 | static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) = |
| 62 | (void *) BPF_FUNC_skb_get_tunnel_key; |
| 63 | static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) = |
| 64 | (void *) BPF_FUNC_skb_set_tunnel_key; |
| 65 | static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) = |
| 66 | (void *) BPF_FUNC_skb_get_tunnel_opt; |
| 67 | static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) = |
| 68 | (void *) BPF_FUNC_skb_set_tunnel_opt; |
Alexei Starovoitov | 1c47910e | 2016-09-01 18:37:25 -0700 | [diff] [blame] | 69 | static unsigned long long (*bpf_get_prandom_u32)(void) = |
| 70 | (void *) BPF_FUNC_get_prandom_u32; |
Martin KaFai Lau | 12d8bb6 | 2016-12-07 15:53:14 -0800 | [diff] [blame] | 71 | static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = |
| 72 | (void *) BPF_FUNC_xdp_adjust_head; |
Daniel Borkmann | 22c8852 | 2017-09-25 02:25:53 +0200 | [diff] [blame] | 73 | static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = |
| 74 | (void *) BPF_FUNC_xdp_adjust_meta; |
Andrey Ignatov | 0289a2c | 2018-07-30 17:42:30 -0700 | [diff] [blame] | 75 | static int (*bpf_get_socket_cookie)(void *ctx) = |
| 76 | (void *) BPF_FUNC_get_socket_cookie; |
Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 77 | static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, |
| 78 | int optlen) = |
| 79 | (void *) BPF_FUNC_setsockopt; |
Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 80 | static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, |
| 81 | int optlen) = |
| 82 | (void *) BPF_FUNC_getsockopt; |
Lawrence Brakmo | d6d4f60 | 2018-01-25 16:14:16 -0800 | [diff] [blame] | 83 | static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) = |
| 84 | (void *) BPF_FUNC_sock_ops_cb_flags_set; |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 85 | static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = |
John Fastabend | 69e8cc1 | 2017-08-15 22:33:32 -0700 | [diff] [blame] | 86 | (void *) BPF_FUNC_sk_redirect_map; |
John Fastabend | b8b394f | 2018-05-14 10:00:18 -0700 | [diff] [blame] | 87 | static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) = |
| 88 | (void *) BPF_FUNC_sk_redirect_hash; |
John Fastabend | 69e8cc1 | 2017-08-15 22:33:32 -0700 | [diff] [blame] | 89 | static int (*bpf_sock_map_update)(void *map, void *key, void *value, |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 90 | unsigned long long flags) = |
John Fastabend | 69e8cc1 | 2017-08-15 22:33:32 -0700 | [diff] [blame] | 91 | (void *) BPF_FUNC_sock_map_update; |
John Fastabend | b8b394f | 2018-05-14 10:00:18 -0700 | [diff] [blame] | 92 | static int (*bpf_sock_hash_update)(void *map, void *key, void *value, |
| 93 | unsigned long long flags) = |
| 94 | (void *) BPF_FUNC_sock_hash_update; |
Yonghong Song | 020a32d | 2017-10-05 09:19:21 -0700 | [diff] [blame] | 95 | static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags, |
| 96 | void *buf, unsigned int buf_size) = |
| 97 | (void *) BPF_FUNC_perf_event_read_value; |
Yonghong Song | 81b9cf8 | 2017-10-05 09:19:23 -0700 | [diff] [blame] | 98 | static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, |
| 99 | unsigned int buf_size) = |
| 100 | (void *) BPF_FUNC_perf_prog_read_value; |
Josef Bacik | 965de87 | 2017-12-11 11:36:49 -0500 | [diff] [blame] | 101 | static int (*bpf_override_return)(void *ctx, unsigned long rc) = |
| 102 | (void *) BPF_FUNC_override_return; |
John Fastabend | 4c4c3c2 | 2018-03-18 12:57:41 -0700 | [diff] [blame] | 103 | static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) = |
| 104 | (void *) BPF_FUNC_msg_redirect_map; |
John Fastabend | b8b394f | 2018-05-14 10:00:18 -0700 | [diff] [blame] | 105 | static int (*bpf_msg_redirect_hash)(void *ctx, |
| 106 | void *map, void *key, int flags) = |
| 107 | (void *) BPF_FUNC_msg_redirect_hash; |
John Fastabend | 1c16c31 | 2018-03-18 12:57:56 -0700 | [diff] [blame] | 108 | static int (*bpf_msg_apply_bytes)(void *ctx, int len) = |
| 109 | (void *) BPF_FUNC_msg_apply_bytes; |
John Fastabend | 468b3fd | 2018-03-18 12:58:02 -0700 | [diff] [blame] | 110 | static int (*bpf_msg_cork_bytes)(void *ctx, int len) = |
| 111 | (void *) BPF_FUNC_msg_cork_bytes; |
John Fastabend | 0dcbbf6 | 2018-03-18 12:58:12 -0700 | [diff] [blame] | 112 | static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) = |
| 113 | (void *) BPF_FUNC_msg_pull_data; |
John Fastabend | f908d26 | 2018-10-19 19:56:50 -0700 | [diff] [blame] | 114 | static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) = |
| 115 | (void *) BPF_FUNC_msg_push_data; |
John Fastabend | d913a22 | 2018-11-26 14:16:18 -0800 | [diff] [blame] | 116 | static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) = |
| 117 | (void *) BPF_FUNC_msg_pop_data; |
Andrey Ignatov | 622adaf | 2018-03-30 15:08:06 -0700 | [diff] [blame] | 118 | static int (*bpf_bind)(void *ctx, void *addr, int addr_len) = |
| 119 | (void *) BPF_FUNC_bind; |
Nikita V. Shirokov | 0367d0a | 2018-04-17 21:42:22 -0700 | [diff] [blame] | 120 | static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = |
| 121 | (void *) BPF_FUNC_xdp_adjust_tail; |
Eyal Birger | 29a36f9 | 2018-04-24 17:50:30 +0300 | [diff] [blame] | 122 | static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state, |
| 123 | int size, int flags) = |
| 124 | (void *) BPF_FUNC_skb_get_xfrm_state; |
Martin KaFai Lau | 91134d8 | 2018-08-08 01:01:31 -0700 | [diff] [blame] | 125 | static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) = |
| 126 | (void *) BPF_FUNC_sk_select_reuseport; |
Yonghong Song | de2ff05 | 2018-04-28 22:28:12 -0700 | [diff] [blame] | 127 | static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) = |
| 128 | (void *) BPF_FUNC_get_stack; |
David Ahern | fe61605 | 2018-05-09 20:34:27 -0700 | [diff] [blame] | 129 | static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, |
| 130 | int plen, __u32 flags) = |
| 131 | (void *) BPF_FUNC_fib_lookup; |
Mathieu Xhonneux | c99a84e | 2018-05-20 14:58:17 +0100 | [diff] [blame] | 132 | static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr, |
| 133 | unsigned int len) = |
| 134 | (void *) BPF_FUNC_lwt_push_encap; |
| 135 | static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset, |
| 136 | void *from, unsigned int len) = |
| 137 | (void *) BPF_FUNC_lwt_seg6_store_bytes; |
| 138 | static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param, |
| 139 | unsigned int param_len) = |
| 140 | (void *) BPF_FUNC_lwt_seg6_action; |
| 141 | static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset, |
| 142 | unsigned int len) = |
| 143 | (void *) BPF_FUNC_lwt_seg6_adjust_srh; |
Sean Young | 6bdd533 | 2018-05-27 12:24:10 +0100 | [diff] [blame] | 144 | static int (*bpf_rc_repeat)(void *ctx) = |
| 145 | (void *) BPF_FUNC_rc_repeat; |
| 146 | static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol, |
| 147 | unsigned long long scancode, unsigned int toggle) = |
| 148 | (void *) BPF_FUNC_rc_keydown; |
Yonghong Song | c7ddbba | 2018-06-03 15:59:42 -0700 | [diff] [blame] | 149 | static unsigned long long (*bpf_get_current_cgroup_id)(void) = |
| 150 | (void *) BPF_FUNC_get_current_cgroup_id; |
Roman Gushchin | d4c9f57 | 2018-08-02 14:27:28 -0700 | [diff] [blame] | 151 | static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = |
| 152 | (void *) BPF_FUNC_get_local_storage; |
Andrey Ignatov | 02f6ac7 | 2018-08-12 10:49:29 -0700 | [diff] [blame] | 153 | static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = |
| 154 | (void *) BPF_FUNC_skb_cgroup_id; |
| 155 | static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = |
| 156 | (void *) BPF_FUNC_skb_ancestor_cgroup_id; |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 157 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, |
| 158 | struct bpf_sock_tuple *tuple, |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 159 | int size, unsigned long long netns_id, |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 160 | unsigned long long flags) = |
| 161 | (void *) BPF_FUNC_sk_lookup_tcp; |
| 162 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, |
| 163 | struct bpf_sock_tuple *tuple, |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 164 | int size, unsigned long long netns_id, |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 165 | unsigned long long flags) = |
| 166 | (void *) BPF_FUNC_sk_lookup_udp; |
| 167 | static int (*bpf_sk_release)(struct bpf_sock *sk) = |
| 168 | (void *) BPF_FUNC_sk_release; |
Jesper Dangaard Brouer | 4f77544 | 2018-10-09 12:04:48 +0200 | [diff] [blame] | 169 | static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) = |
| 170 | (void *) BPF_FUNC_skb_vlan_push; |
| 171 | static int (*bpf_skb_vlan_pop)(void *ctx) = |
| 172 | (void *) BPF_FUNC_skb_vlan_pop; |
Sean Young | 01d3240 | 2018-12-06 13:01:03 +0000 | [diff] [blame] | 173 | static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) = |
| 174 | (void *) BPF_FUNC_rc_pointer_rel; |
Alexei Starovoitov | ab963be | 2019-01-31 15:40:08 -0800 | [diff] [blame] | 175 | static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) = |
| 176 | (void *) BPF_FUNC_spin_lock; |
| 177 | static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = |
| 178 | (void *) BPF_FUNC_spin_unlock; |
Martin KaFai Lau | e0b27b3 | 2019-02-09 23:22:28 -0800 | [diff] [blame] | 179 | static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = |
| 180 | (void *) BPF_FUNC_sk_fullsock; |
| 181 | static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = |
| 182 | (void *) BPF_FUNC_tcp_sock; |
brakmo | 5cce85c | 2019-03-01 12:38:47 -0800 | [diff] [blame] | 183 | static int (*bpf_skb_ecn_set_ce)(void *ctx) = |
| 184 | (void *) BPF_FUNC_skb_ecn_set_ce; |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 185 | |
| 186 | /* llvm builtin functions that eBPF C program may use to |
| 187 | * emit BPF_LD_ABS and BPF_LD_IND instructions |
| 188 | */ |
| 189 | struct sk_buff; |
| 190 | unsigned long long load_byte(void *skb, |
| 191 | unsigned long long off) asm("llvm.bpf.load.byte"); |
| 192 | unsigned long long load_half(void *skb, |
| 193 | unsigned long long off) asm("llvm.bpf.load.half"); |
| 194 | unsigned long long load_word(void *skb, |
| 195 | unsigned long long off) asm("llvm.bpf.load.word"); |
| 196 | |
| 197 | /* a helper structure used by eBPF C program |
| 198 | * to describe map attributes to elf_bpf loader |
| 199 | */ |
| 200 | struct bpf_map_def { |
| 201 | unsigned int type; |
| 202 | unsigned int key_size; |
| 203 | unsigned int value_size; |
| 204 | unsigned int max_entries; |
Alexei Starovoitov | 89b9760 | 2016-03-07 21:57:20 -0800 | [diff] [blame] | 205 | unsigned int map_flags; |
Martin KaFai Lau | fb30d4b | 2017-03-22 10:00:35 -0700 | [diff] [blame] | 206 | unsigned int inner_map_idx; |
Martin KaFai Lau | ad17d0e | 2017-08-18 11:28:01 -0700 | [diff] [blame] | 207 | unsigned int numa_node; |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 208 | }; |
| 209 | |
Martin KaFai Lau | 38d5d3b | 2018-07-24 08:40:22 -0700 | [diff] [blame] | 210 | #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ |
| 211 | struct ____btf_map_##name { \ |
| 212 | type_key key; \ |
| 213 | type_val value; \ |
| 214 | }; \ |
| 215 | struct ____btf_map_##name \ |
| 216 | __attribute__ ((section(".maps." #name), used)) \ |
| 217 | ____btf_map_##name = { } |
| 218 | |
Thomas Graf | f74599f | 2016-11-30 17:10:11 +0100 | [diff] [blame] | 219 | static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = |
| 220 | (void *) BPF_FUNC_skb_load_bytes; |
Martin KaFai Lau | 91134d8 | 2018-08-08 01:01:31 -0700 | [diff] [blame] | 221 | static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) = |
| 222 | (void *) BPF_FUNC_skb_load_bytes_relative; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 223 | static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = |
| 224 | (void *) BPF_FUNC_skb_store_bytes; |
| 225 | static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) = |
| 226 | (void *) BPF_FUNC_l3_csum_replace; |
| 227 | static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = |
| 228 | (void *) BPF_FUNC_l4_csum_replace; |
Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 229 | static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) = |
| 230 | (void *) BPF_FUNC_csum_diff; |
Daniel Borkmann | 747ea55 | 2016-08-12 22:17:17 +0200 | [diff] [blame] | 231 | static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = |
| 232 | (void *) BPF_FUNC_skb_under_cgroup; |
Thomas Graf | f74599f | 2016-11-30 17:10:11 +0100 | [diff] [blame] | 233 | static int (*bpf_skb_change_head)(void *, int len, int flags) = |
| 234 | (void *) BPF_FUNC_skb_change_head; |
John Fastabend | 82a8616 | 2018-03-18 12:57:31 -0700 | [diff] [blame] | 235 | static int (*bpf_skb_pull_data)(void *, int len) = |
| 236 | (void *) BPF_FUNC_skb_pull_data; |
Willem de Bruijn | f2bb538 | 2019-02-27 11:08:06 -0500 | [diff] [blame] | 237 | static unsigned int (*bpf_get_cgroup_classid)(void *ctx) = |
| 238 | (void *) BPF_FUNC_get_cgroup_classid; |
| 239 | static unsigned int (*bpf_get_route_realm)(void *ctx) = |
| 240 | (void *) BPF_FUNC_get_route_realm; |
| 241 | static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) = |
| 242 | (void *) BPF_FUNC_skb_change_proto; |
| 243 | static int (*bpf_skb_change_type)(void *ctx, __u32 type) = |
| 244 | (void *) BPF_FUNC_skb_change_type; |
| 245 | static unsigned int (*bpf_get_hash_recalc)(void *ctx) = |
| 246 | (void *) BPF_FUNC_get_hash_recalc; |
| 247 | static unsigned long long (*bpf_get_current_task)(void *ctx) = |
| 248 | (void *) BPF_FUNC_get_current_task; |
| 249 | static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) = |
| 250 | (void *) BPF_FUNC_skb_change_tail; |
| 251 | static long long (*bpf_csum_update)(void *ctx, __u32 csum) = |
| 252 | (void *) BPF_FUNC_csum_update; |
| 253 | static void (*bpf_set_hash_invalid)(void *ctx) = |
| 254 | (void *) BPF_FUNC_set_hash_invalid; |
| 255 | static int (*bpf_get_numa_node_id)(void) = |
| 256 | (void *) BPF_FUNC_get_numa_node_id; |
| 257 | static int (*bpf_probe_read_str)(void *ctx, __u32 size, |
| 258 | const void *unsafe_ptr) = |
| 259 | (void *) BPF_FUNC_probe_read_str; |
| 260 | static unsigned int (*bpf_get_socket_uid)(void *ctx) = |
| 261 | (void *) BPF_FUNC_get_socket_uid; |
| 262 | static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) = |
| 263 | (void *) BPF_FUNC_set_hash; |
| 264 | static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode, |
| 265 | unsigned long long flags) = |
| 266 | (void *) BPF_FUNC_skb_adjust_room; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 267 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 268 | /* Scan the ARCH passed in from ARCH env variable (see Makefile) */ |
| 269 | #if defined(__TARGET_ARCH_x86) |
| 270 | #define bpf_target_x86 |
| 271 | #define bpf_target_defined |
| 272 | #elif defined(__TARGET_ARCH_s930x) |
| 273 | #define bpf_target_s930x |
| 274 | #define bpf_target_defined |
| 275 | #elif defined(__TARGET_ARCH_arm64) |
| 276 | #define bpf_target_arm64 |
| 277 | #define bpf_target_defined |
| 278 | #elif defined(__TARGET_ARCH_mips) |
| 279 | #define bpf_target_mips |
| 280 | #define bpf_target_defined |
| 281 | #elif defined(__TARGET_ARCH_powerpc) |
| 282 | #define bpf_target_powerpc |
| 283 | #define bpf_target_defined |
| 284 | #elif defined(__TARGET_ARCH_sparc) |
| 285 | #define bpf_target_sparc |
| 286 | #define bpf_target_defined |
| 287 | #else |
| 288 | #undef bpf_target_defined |
| 289 | #endif |
| 290 | |
| 291 | /* Fall back to what the compiler says */ |
| 292 | #ifndef bpf_target_defined |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 293 | #if defined(__x86_64__) |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 294 | #define bpf_target_x86 |
| 295 | #elif defined(__s390x__) |
| 296 | #define bpf_target_s930x |
| 297 | #elif defined(__aarch64__) |
| 298 | #define bpf_target_arm64 |
| 299 | #elif defined(__mips__) |
| 300 | #define bpf_target_mips |
| 301 | #elif defined(__powerpc__) |
| 302 | #define bpf_target_powerpc |
| 303 | #elif defined(__sparc__) |
| 304 | #define bpf_target_sparc |
| 305 | #endif |
| 306 | #endif |
| 307 | |
| 308 | #if defined(bpf_target_x86) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 309 | |
| 310 | #define PT_REGS_PARM1(x) ((x)->di) |
| 311 | #define PT_REGS_PARM2(x) ((x)->si) |
| 312 | #define PT_REGS_PARM3(x) ((x)->dx) |
| 313 | #define PT_REGS_PARM4(x) ((x)->cx) |
| 314 | #define PT_REGS_PARM5(x) ((x)->r8) |
| 315 | #define PT_REGS_RET(x) ((x)->sp) |
| 316 | #define PT_REGS_FP(x) ((x)->bp) |
| 317 | #define PT_REGS_RC(x) ((x)->ax) |
| 318 | #define PT_REGS_SP(x) ((x)->sp) |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 319 | #define PT_REGS_IP(x) ((x)->ip) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 320 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 321 | #elif defined(bpf_target_s390x) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 322 | |
| 323 | #define PT_REGS_PARM1(x) ((x)->gprs[2]) |
| 324 | #define PT_REGS_PARM2(x) ((x)->gprs[3]) |
| 325 | #define PT_REGS_PARM3(x) ((x)->gprs[4]) |
| 326 | #define PT_REGS_PARM4(x) ((x)->gprs[5]) |
| 327 | #define PT_REGS_PARM5(x) ((x)->gprs[6]) |
| 328 | #define PT_REGS_RET(x) ((x)->gprs[14]) |
| 329 | #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ |
| 330 | #define PT_REGS_RC(x) ((x)->gprs[2]) |
| 331 | #define PT_REGS_SP(x) ((x)->gprs[15]) |
Michael Holzheu | 2dbb4c0 | 2016-11-28 13:48:30 +0100 | [diff] [blame] | 332 | #define PT_REGS_IP(x) ((x)->psw.addr) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 333 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 334 | #elif defined(bpf_target_arm64) |
Yang Shi | 85ff8a43 | 2015-10-26 17:02:19 -0700 | [diff] [blame] | 335 | |
| 336 | #define PT_REGS_PARM1(x) ((x)->regs[0]) |
| 337 | #define PT_REGS_PARM2(x) ((x)->regs[1]) |
| 338 | #define PT_REGS_PARM3(x) ((x)->regs[2]) |
| 339 | #define PT_REGS_PARM4(x) ((x)->regs[3]) |
| 340 | #define PT_REGS_PARM5(x) ((x)->regs[4]) |
| 341 | #define PT_REGS_RET(x) ((x)->regs[30]) |
| 342 | #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ |
| 343 | #define PT_REGS_RC(x) ((x)->regs[0]) |
| 344 | #define PT_REGS_SP(x) ((x)->sp) |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 345 | #define PT_REGS_IP(x) ((x)->pc) |
| 346 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 347 | #elif defined(bpf_target_mips) |
David Daney | c1932cd | 2017-06-13 16:49:37 -0700 | [diff] [blame] | 348 | |
| 349 | #define PT_REGS_PARM1(x) ((x)->regs[4]) |
| 350 | #define PT_REGS_PARM2(x) ((x)->regs[5]) |
| 351 | #define PT_REGS_PARM3(x) ((x)->regs[6]) |
| 352 | #define PT_REGS_PARM4(x) ((x)->regs[7]) |
| 353 | #define PT_REGS_PARM5(x) ((x)->regs[8]) |
| 354 | #define PT_REGS_RET(x) ((x)->regs[31]) |
| 355 | #define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ |
| 356 | #define PT_REGS_RC(x) ((x)->regs[1]) |
| 357 | #define PT_REGS_SP(x) ((x)->regs[29]) |
| 358 | #define PT_REGS_IP(x) ((x)->cp0_epc) |
| 359 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 360 | #elif defined(bpf_target_powerpc) |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 361 | |
| 362 | #define PT_REGS_PARM1(x) ((x)->gpr[3]) |
| 363 | #define PT_REGS_PARM2(x) ((x)->gpr[4]) |
| 364 | #define PT_REGS_PARM3(x) ((x)->gpr[5]) |
| 365 | #define PT_REGS_PARM4(x) ((x)->gpr[6]) |
| 366 | #define PT_REGS_PARM5(x) ((x)->gpr[7]) |
| 367 | #define PT_REGS_RC(x) ((x)->gpr[3]) |
| 368 | #define PT_REGS_SP(x) ((x)->sp) |
| 369 | #define PT_REGS_IP(x) ((x)->nip) |
Yang Shi | 85ff8a43 | 2015-10-26 17:02:19 -0700 | [diff] [blame] | 370 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 371 | #elif defined(bpf_target_sparc) |
David S. Miller | b0c4780 | 2017-04-22 12:31:05 -0700 | [diff] [blame] | 372 | |
| 373 | #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) |
| 374 | #define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) |
| 375 | #define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2]) |
| 376 | #define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3]) |
| 377 | #define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4]) |
| 378 | #define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) |
| 379 | #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) |
| 380 | #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 381 | |
| 382 | /* Should this also be a bpf_target check for the sparc case? */ |
David S. Miller | b0c4780 | 2017-04-22 12:31:05 -0700 | [diff] [blame] | 383 | #if defined(__arch64__) |
| 384 | #define PT_REGS_IP(x) ((x)->tpc) |
| 385 | #else |
| 386 | #define PT_REGS_IP(x) ((x)->pc) |
| 387 | #endif |
| 388 | |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 389 | #endif |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 390 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 391 | #ifdef bpf_target_powerpc |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 392 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) |
| 393 | #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 394 | #elif bpf_target_sparc |
David S. Miller | b0c4780 | 2017-04-22 12:31:05 -0700 | [diff] [blame] | 395 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) |
| 396 | #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 397 | #else |
| 398 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \ |
| 399 | bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) |
| 400 | #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \ |
| 401 | bpf_probe_read(&(ip), sizeof(ip), \ |
| 402 | (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) |
| 403 | #endif |
| 404 | |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 405 | #endif |