Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 2 | #ifndef __BPF_HELPERS_H |
| 3 | #define __BPF_HELPERS_H |
| 4 | |
| 5 | /* helper macro to place programs, maps, license in |
| 6 | * different sections in elf_bpf file. Section names |
| 7 | * are interpreted by elf_bpf loader |
| 8 | */ |
| 9 | #define SEC(NAME) __attribute__((section(NAME), used)) |
| 10 | |
| 11 | /* helper functions called from eBPF programs written in C */ |
| 12 | static void *(*bpf_map_lookup_elem)(void *map, void *key) = |
| 13 | (void *) BPF_FUNC_map_lookup_elem; |
| 14 | static int (*bpf_map_update_elem)(void *map, void *key, void *value, |
| 15 | unsigned long long flags) = |
| 16 | (void *) BPF_FUNC_map_update_elem; |
| 17 | static int (*bpf_map_delete_elem)(void *map, void *key) = |
| 18 | (void *) BPF_FUNC_map_delete_elem; |
Alexei Starovoitov | b896c4f | 2015-03-25 12:49:23 -0700 | [diff] [blame] | 19 | static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) = |
| 20 | (void *) BPF_FUNC_probe_read; |
| 21 | static unsigned long long (*bpf_ktime_get_ns)(void) = |
| 22 | (void *) BPF_FUNC_ktime_get_ns; |
| 23 | static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) = |
| 24 | (void *) BPF_FUNC_trace_printk; |
Alexei Starovoitov | 5bacd78 | 2015-05-19 16:59:05 -0700 | [diff] [blame] | 25 | static void (*bpf_tail_call)(void *ctx, void *map, int index) = |
| 26 | (void *) BPF_FUNC_tail_call; |
Alexei Starovoitov | 530b2c8 | 2015-05-19 16:59:06 -0700 | [diff] [blame] | 27 | static unsigned long long (*bpf_get_smp_processor_id)(void) = |
| 28 | (void *) BPF_FUNC_get_smp_processor_id; |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 29 | static unsigned long long (*bpf_get_current_pid_tgid)(void) = |
| 30 | (void *) BPF_FUNC_get_current_pid_tgid; |
| 31 | static unsigned long long (*bpf_get_current_uid_gid)(void) = |
| 32 | (void *) BPF_FUNC_get_current_uid_gid; |
| 33 | static int (*bpf_get_current_comm)(void *buf, int buf_size) = |
| 34 | (void *) BPF_FUNC_get_current_comm; |
Teng Qin | 41e9a80 | 2017-06-02 21:03:53 -0700 | [diff] [blame] | 35 | static unsigned long long (*bpf_perf_event_read)(void *map, |
| 36 | unsigned long long flags) = |
Kaixu Xia | 47efb30 | 2015-08-06 07:02:36 +0000 | [diff] [blame] | 37 | (void *) BPF_FUNC_perf_event_read; |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 38 | static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) = |
| 39 | (void *) BPF_FUNC_clone_redirect; |
| 40 | static int (*bpf_redirect)(int ifindex, int flags) = |
| 41 | (void *) BPF_FUNC_redirect; |
John Fastabend | 9d6e005 | 2017-07-17 09:30:25 -0700 | [diff] [blame] | 42 | static int (*bpf_redirect_map)(void *map, int key, int flags) = |
| 43 | (void *) BPF_FUNC_redirect_map; |
Adam Barth | 05b8ad2 | 2016-08-10 09:45:39 -0700 | [diff] [blame] | 44 | static int (*bpf_perf_event_output)(void *ctx, void *map, |
| 45 | unsigned long long flags, void *data, |
| 46 | int size) = |
Alexei Starovoitov | 3911169 | 2015-10-20 20:02:35 -0700 | [diff] [blame] | 47 | (void *) BPF_FUNC_perf_event_output; |
Alexei Starovoitov | a6ffe7b | 2016-02-17 19:58:59 -0800 | [diff] [blame] | 48 | static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = |
| 49 | (void *) BPF_FUNC_get_stackid; |
Sargun Dhillon | 96ae522 | 2016-07-25 05:54:46 -0700 | [diff] [blame] | 50 | static int (*bpf_probe_write_user)(void *dst, void *src, int size) = |
| 51 | (void *) BPF_FUNC_probe_write_user; |
Sargun Dhillon | 9e6e60e | 2016-08-12 08:57:04 -0700 | [diff] [blame] | 52 | static int (*bpf_current_task_under_cgroup)(void *map, int index) = |
| 53 | (void *) BPF_FUNC_current_task_under_cgroup; |
William Tu | 6afb1e2 | 2016-08-19 11:55:44 -0700 | [diff] [blame] | 54 | static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) = |
| 55 | (void *) BPF_FUNC_skb_get_tunnel_key; |
| 56 | static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) = |
| 57 | (void *) BPF_FUNC_skb_set_tunnel_key; |
| 58 | static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) = |
| 59 | (void *) BPF_FUNC_skb_get_tunnel_opt; |
| 60 | static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) = |
| 61 | (void *) BPF_FUNC_skb_set_tunnel_opt; |
Alexei Starovoitov | 1c47910e | 2016-09-01 18:37:25 -0700 | [diff] [blame] | 62 | static unsigned long long (*bpf_get_prandom_u32)(void) = |
| 63 | (void *) BPF_FUNC_get_prandom_u32; |
Martin KaFai Lau | 12d8bb6 | 2016-12-07 15:53:14 -0800 | [diff] [blame] | 64 | static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = |
| 65 | (void *) BPF_FUNC_xdp_adjust_head; |
Daniel Borkmann | 22c8852 | 2017-09-25 02:25:53 +0200 | [diff] [blame] | 66 | static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = |
| 67 | (void *) BPF_FUNC_xdp_adjust_meta; |
Andrey Ignatov | 0289a2c | 2018-07-30 17:42:30 -0700 | [diff] [blame] | 68 | static int (*bpf_get_socket_cookie)(void *ctx) = |
| 69 | (void *) BPF_FUNC_get_socket_cookie; |
Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 70 | static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, |
| 71 | int optlen) = |
| 72 | (void *) BPF_FUNC_setsockopt; |
Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 73 | static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, |
| 74 | int optlen) = |
| 75 | (void *) BPF_FUNC_getsockopt; |
Lawrence Brakmo | d6d4f60 | 2018-01-25 16:14:16 -0800 | [diff] [blame] | 76 | static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) = |
| 77 | (void *) BPF_FUNC_sock_ops_cb_flags_set; |
John Fastabend | 34f79502 | 2017-10-18 07:10:36 -0700 | [diff] [blame] | 78 | static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = |
John Fastabend | 69e8cc1 | 2017-08-15 22:33:32 -0700 | [diff] [blame] | 79 | (void *) BPF_FUNC_sk_redirect_map; |
John Fastabend | b8b394f | 2018-05-14 10:00:18 -0700 | [diff] [blame] | 80 | static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) = |
| 81 | (void *) BPF_FUNC_sk_redirect_hash; |
John Fastabend | 69e8cc1 | 2017-08-15 22:33:32 -0700 | [diff] [blame] | 82 | static int (*bpf_sock_map_update)(void *map, void *key, void *value, |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 83 | unsigned long long flags) = |
John Fastabend | 69e8cc1 | 2017-08-15 22:33:32 -0700 | [diff] [blame] | 84 | (void *) BPF_FUNC_sock_map_update; |
John Fastabend | b8b394f | 2018-05-14 10:00:18 -0700 | [diff] [blame] | 85 | static int (*bpf_sock_hash_update)(void *map, void *key, void *value, |
| 86 | unsigned long long flags) = |
| 87 | (void *) BPF_FUNC_sock_hash_update; |
Yonghong Song | 020a32d | 2017-10-05 09:19:21 -0700 | [diff] [blame] | 88 | static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags, |
| 89 | void *buf, unsigned int buf_size) = |
| 90 | (void *) BPF_FUNC_perf_event_read_value; |
Yonghong Song | 81b9cf8 | 2017-10-05 09:19:23 -0700 | [diff] [blame] | 91 | static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, |
| 92 | unsigned int buf_size) = |
| 93 | (void *) BPF_FUNC_perf_prog_read_value; |
Josef Bacik | 965de87 | 2017-12-11 11:36:49 -0500 | [diff] [blame] | 94 | static int (*bpf_override_return)(void *ctx, unsigned long rc) = |
| 95 | (void *) BPF_FUNC_override_return; |
John Fastabend | 4c4c3c2 | 2018-03-18 12:57:41 -0700 | [diff] [blame] | 96 | static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) = |
| 97 | (void *) BPF_FUNC_msg_redirect_map; |
John Fastabend | b8b394f | 2018-05-14 10:00:18 -0700 | [diff] [blame] | 98 | static int (*bpf_msg_redirect_hash)(void *ctx, |
| 99 | void *map, void *key, int flags) = |
| 100 | (void *) BPF_FUNC_msg_redirect_hash; |
John Fastabend | 1c16c31 | 2018-03-18 12:57:56 -0700 | [diff] [blame] | 101 | static int (*bpf_msg_apply_bytes)(void *ctx, int len) = |
| 102 | (void *) BPF_FUNC_msg_apply_bytes; |
John Fastabend | 468b3fd | 2018-03-18 12:58:02 -0700 | [diff] [blame] | 103 | static int (*bpf_msg_cork_bytes)(void *ctx, int len) = |
| 104 | (void *) BPF_FUNC_msg_cork_bytes; |
John Fastabend | 0dcbbf6 | 2018-03-18 12:58:12 -0700 | [diff] [blame] | 105 | static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) = |
| 106 | (void *) BPF_FUNC_msg_pull_data; |
Andrey Ignatov | 622adaf | 2018-03-30 15:08:06 -0700 | [diff] [blame] | 107 | static int (*bpf_bind)(void *ctx, void *addr, int addr_len) = |
| 108 | (void *) BPF_FUNC_bind; |
Nikita V. Shirokov | 0367d0a | 2018-04-17 21:42:22 -0700 | [diff] [blame] | 109 | static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = |
| 110 | (void *) BPF_FUNC_xdp_adjust_tail; |
Eyal Birger | 29a36f9 | 2018-04-24 17:50:30 +0300 | [diff] [blame] | 111 | static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state, |
| 112 | int size, int flags) = |
| 113 | (void *) BPF_FUNC_skb_get_xfrm_state; |
Martin KaFai Lau | 91134d8 | 2018-08-08 01:01:31 -0700 | [diff] [blame] | 114 | static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) = |
| 115 | (void *) BPF_FUNC_sk_select_reuseport; |
Yonghong Song | de2ff05 | 2018-04-28 22:28:12 -0700 | [diff] [blame] | 116 | static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) = |
| 117 | (void *) BPF_FUNC_get_stack; |
David Ahern | fe61605 | 2018-05-09 20:34:27 -0700 | [diff] [blame] | 118 | static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, |
| 119 | int plen, __u32 flags) = |
| 120 | (void *) BPF_FUNC_fib_lookup; |
Mathieu Xhonneux | c99a84e | 2018-05-20 14:58:17 +0100 | [diff] [blame] | 121 | static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr, |
| 122 | unsigned int len) = |
| 123 | (void *) BPF_FUNC_lwt_push_encap; |
| 124 | static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset, |
| 125 | void *from, unsigned int len) = |
| 126 | (void *) BPF_FUNC_lwt_seg6_store_bytes; |
| 127 | static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param, |
| 128 | unsigned int param_len) = |
| 129 | (void *) BPF_FUNC_lwt_seg6_action; |
| 130 | static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset, |
| 131 | unsigned int len) = |
| 132 | (void *) BPF_FUNC_lwt_seg6_adjust_srh; |
Sean Young | 6bdd533 | 2018-05-27 12:24:10 +0100 | [diff] [blame] | 133 | static int (*bpf_rc_repeat)(void *ctx) = |
| 134 | (void *) BPF_FUNC_rc_repeat; |
| 135 | static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol, |
| 136 | unsigned long long scancode, unsigned int toggle) = |
| 137 | (void *) BPF_FUNC_rc_keydown; |
Yonghong Song | c7ddbba | 2018-06-03 15:59:42 -0700 | [diff] [blame] | 138 | static unsigned long long (*bpf_get_current_cgroup_id)(void) = |
| 139 | (void *) BPF_FUNC_get_current_cgroup_id; |
Roman Gushchin | d4c9f57 | 2018-08-02 14:27:28 -0700 | [diff] [blame] | 140 | static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = |
| 141 | (void *) BPF_FUNC_get_local_storage; |
Andrey Ignatov | 02f6ac7 | 2018-08-12 10:49:29 -0700 | [diff] [blame] | 142 | static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = |
| 143 | (void *) BPF_FUNC_skb_cgroup_id; |
| 144 | static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = |
| 145 | (void *) BPF_FUNC_skb_ancestor_cgroup_id; |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame^] | 146 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, |
| 147 | struct bpf_sock_tuple *tuple, |
| 148 | int size, unsigned int netns_id, |
| 149 | unsigned long long flags) = |
| 150 | (void *) BPF_FUNC_sk_lookup_tcp; |
| 151 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, |
| 152 | struct bpf_sock_tuple *tuple, |
| 153 | int size, unsigned int netns_id, |
| 154 | unsigned long long flags) = |
| 155 | (void *) BPF_FUNC_sk_lookup_udp; |
| 156 | static int (*bpf_sk_release)(struct bpf_sock *sk) = |
| 157 | (void *) BPF_FUNC_sk_release; |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 158 | |
| 159 | /* llvm builtin functions that eBPF C program may use to |
| 160 | * emit BPF_LD_ABS and BPF_LD_IND instructions |
| 161 | */ |
| 162 | struct sk_buff; |
| 163 | unsigned long long load_byte(void *skb, |
| 164 | unsigned long long off) asm("llvm.bpf.load.byte"); |
| 165 | unsigned long long load_half(void *skb, |
| 166 | unsigned long long off) asm("llvm.bpf.load.half"); |
| 167 | unsigned long long load_word(void *skb, |
| 168 | unsigned long long off) asm("llvm.bpf.load.word"); |
| 169 | |
| 170 | /* a helper structure used by eBPF C program |
| 171 | * to describe map attributes to elf_bpf loader |
| 172 | */ |
| 173 | struct bpf_map_def { |
| 174 | unsigned int type; |
| 175 | unsigned int key_size; |
| 176 | unsigned int value_size; |
| 177 | unsigned int max_entries; |
Alexei Starovoitov | 89b9760 | 2016-03-07 21:57:20 -0800 | [diff] [blame] | 178 | unsigned int map_flags; |
Martin KaFai Lau | fb30d4b | 2017-03-22 10:00:35 -0700 | [diff] [blame] | 179 | unsigned int inner_map_idx; |
Martin KaFai Lau | ad17d0e | 2017-08-18 11:28:01 -0700 | [diff] [blame] | 180 | unsigned int numa_node; |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 181 | }; |
| 182 | |
Martin KaFai Lau | 38d5d3b | 2018-07-24 08:40:22 -0700 | [diff] [blame] | 183 | #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ |
| 184 | struct ____btf_map_##name { \ |
| 185 | type_key key; \ |
| 186 | type_val value; \ |
| 187 | }; \ |
| 188 | struct ____btf_map_##name \ |
| 189 | __attribute__ ((section(".maps." #name), used)) \ |
| 190 | ____btf_map_##name = { } |
| 191 | |
Thomas Graf | f74599f | 2016-11-30 17:10:11 +0100 | [diff] [blame] | 192 | static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = |
| 193 | (void *) BPF_FUNC_skb_load_bytes; |
Martin KaFai Lau | 91134d8 | 2018-08-08 01:01:31 -0700 | [diff] [blame] | 194 | static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) = |
| 195 | (void *) BPF_FUNC_skb_load_bytes_relative; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 196 | static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = |
| 197 | (void *) BPF_FUNC_skb_store_bytes; |
| 198 | static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) = |
| 199 | (void *) BPF_FUNC_l3_csum_replace; |
| 200 | static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = |
| 201 | (void *) BPF_FUNC_l4_csum_replace; |
Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 202 | static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) = |
| 203 | (void *) BPF_FUNC_csum_diff; |
Daniel Borkmann | 747ea55 | 2016-08-12 22:17:17 +0200 | [diff] [blame] | 204 | static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = |
| 205 | (void *) BPF_FUNC_skb_under_cgroup; |
Thomas Graf | f74599f | 2016-11-30 17:10:11 +0100 | [diff] [blame] | 206 | static int (*bpf_skb_change_head)(void *, int len, int flags) = |
| 207 | (void *) BPF_FUNC_skb_change_head; |
John Fastabend | 82a8616 | 2018-03-18 12:57:31 -0700 | [diff] [blame] | 208 | static int (*bpf_skb_pull_data)(void *, int len) = |
| 209 | (void *) BPF_FUNC_skb_pull_data; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 210 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 211 | /* Scan the ARCH passed in from ARCH env variable (see Makefile) */ |
| 212 | #if defined(__TARGET_ARCH_x86) |
| 213 | #define bpf_target_x86 |
| 214 | #define bpf_target_defined |
| 215 | #elif defined(__TARGET_ARCH_s930x) |
| 216 | #define bpf_target_s930x |
| 217 | #define bpf_target_defined |
| 218 | #elif defined(__TARGET_ARCH_arm64) |
| 219 | #define bpf_target_arm64 |
| 220 | #define bpf_target_defined |
| 221 | #elif defined(__TARGET_ARCH_mips) |
| 222 | #define bpf_target_mips |
| 223 | #define bpf_target_defined |
| 224 | #elif defined(__TARGET_ARCH_powerpc) |
| 225 | #define bpf_target_powerpc |
| 226 | #define bpf_target_defined |
| 227 | #elif defined(__TARGET_ARCH_sparc) |
| 228 | #define bpf_target_sparc |
| 229 | #define bpf_target_defined |
| 230 | #else |
| 231 | #undef bpf_target_defined |
| 232 | #endif |
| 233 | |
| 234 | /* Fall back to what the compiler says */ |
| 235 | #ifndef bpf_target_defined |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 236 | #if defined(__x86_64__) |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 237 | #define bpf_target_x86 |
| 238 | #elif defined(__s390x__) |
| 239 | #define bpf_target_s930x |
| 240 | #elif defined(__aarch64__) |
| 241 | #define bpf_target_arm64 |
| 242 | #elif defined(__mips__) |
| 243 | #define bpf_target_mips |
| 244 | #elif defined(__powerpc__) |
| 245 | #define bpf_target_powerpc |
| 246 | #elif defined(__sparc__) |
| 247 | #define bpf_target_sparc |
| 248 | #endif |
| 249 | #endif |
| 250 | |
| 251 | #if defined(bpf_target_x86) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 252 | |
| 253 | #define PT_REGS_PARM1(x) ((x)->di) |
| 254 | #define PT_REGS_PARM2(x) ((x)->si) |
| 255 | #define PT_REGS_PARM3(x) ((x)->dx) |
| 256 | #define PT_REGS_PARM4(x) ((x)->cx) |
| 257 | #define PT_REGS_PARM5(x) ((x)->r8) |
| 258 | #define PT_REGS_RET(x) ((x)->sp) |
| 259 | #define PT_REGS_FP(x) ((x)->bp) |
| 260 | #define PT_REGS_RC(x) ((x)->ax) |
| 261 | #define PT_REGS_SP(x) ((x)->sp) |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 262 | #define PT_REGS_IP(x) ((x)->ip) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 263 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 264 | #elif defined(bpf_target_s390x) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 265 | |
| 266 | #define PT_REGS_PARM1(x) ((x)->gprs[2]) |
| 267 | #define PT_REGS_PARM2(x) ((x)->gprs[3]) |
| 268 | #define PT_REGS_PARM3(x) ((x)->gprs[4]) |
| 269 | #define PT_REGS_PARM4(x) ((x)->gprs[5]) |
| 270 | #define PT_REGS_PARM5(x) ((x)->gprs[6]) |
| 271 | #define PT_REGS_RET(x) ((x)->gprs[14]) |
| 272 | #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ |
| 273 | #define PT_REGS_RC(x) ((x)->gprs[2]) |
| 274 | #define PT_REGS_SP(x) ((x)->gprs[15]) |
Michael Holzheu | 2dbb4c0 | 2016-11-28 13:48:30 +0100 | [diff] [blame] | 275 | #define PT_REGS_IP(x) ((x)->psw.addr) |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 276 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 277 | #elif defined(bpf_target_arm64) |
Yang Shi | 85ff8a43 | 2015-10-26 17:02:19 -0700 | [diff] [blame] | 278 | |
| 279 | #define PT_REGS_PARM1(x) ((x)->regs[0]) |
| 280 | #define PT_REGS_PARM2(x) ((x)->regs[1]) |
| 281 | #define PT_REGS_PARM3(x) ((x)->regs[2]) |
| 282 | #define PT_REGS_PARM4(x) ((x)->regs[3]) |
| 283 | #define PT_REGS_PARM5(x) ((x)->regs[4]) |
| 284 | #define PT_REGS_RET(x) ((x)->regs[30]) |
| 285 | #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ |
| 286 | #define PT_REGS_RC(x) ((x)->regs[0]) |
| 287 | #define PT_REGS_SP(x) ((x)->sp) |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 288 | #define PT_REGS_IP(x) ((x)->pc) |
| 289 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 290 | #elif defined(bpf_target_mips) |
David Daney | c1932cd | 2017-06-13 16:49:37 -0700 | [diff] [blame] | 291 | |
| 292 | #define PT_REGS_PARM1(x) ((x)->regs[4]) |
| 293 | #define PT_REGS_PARM2(x) ((x)->regs[5]) |
| 294 | #define PT_REGS_PARM3(x) ((x)->regs[6]) |
| 295 | #define PT_REGS_PARM4(x) ((x)->regs[7]) |
| 296 | #define PT_REGS_PARM5(x) ((x)->regs[8]) |
| 297 | #define PT_REGS_RET(x) ((x)->regs[31]) |
| 298 | #define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ |
| 299 | #define PT_REGS_RC(x) ((x)->regs[1]) |
| 300 | #define PT_REGS_SP(x) ((x)->regs[29]) |
| 301 | #define PT_REGS_IP(x) ((x)->cp0_epc) |
| 302 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 303 | #elif defined(bpf_target_powerpc) |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 304 | |
| 305 | #define PT_REGS_PARM1(x) ((x)->gpr[3]) |
| 306 | #define PT_REGS_PARM2(x) ((x)->gpr[4]) |
| 307 | #define PT_REGS_PARM3(x) ((x)->gpr[5]) |
| 308 | #define PT_REGS_PARM4(x) ((x)->gpr[6]) |
| 309 | #define PT_REGS_PARM5(x) ((x)->gpr[7]) |
| 310 | #define PT_REGS_RC(x) ((x)->gpr[3]) |
| 311 | #define PT_REGS_SP(x) ((x)->sp) |
| 312 | #define PT_REGS_IP(x) ((x)->nip) |
Yang Shi | 85ff8a43 | 2015-10-26 17:02:19 -0700 | [diff] [blame] | 313 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 314 | #elif defined(bpf_target_sparc) |
David S. Miller | b0c4780 | 2017-04-22 12:31:05 -0700 | [diff] [blame] | 315 | |
| 316 | #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) |
| 317 | #define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) |
| 318 | #define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2]) |
| 319 | #define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3]) |
| 320 | #define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4]) |
| 321 | #define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) |
| 322 | #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) |
| 323 | #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 324 | |
| 325 | /* Should this also be a bpf_target check for the sparc case? */ |
David S. Miller | b0c4780 | 2017-04-22 12:31:05 -0700 | [diff] [blame] | 326 | #if defined(__arch64__) |
| 327 | #define PT_REGS_IP(x) ((x)->tpc) |
| 328 | #else |
| 329 | #define PT_REGS_IP(x) ((x)->pc) |
| 330 | #endif |
| 331 | |
Michael Holzheu | d912557 | 2015-07-06 16:20:07 +0200 | [diff] [blame] | 332 | #endif |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 333 | |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 334 | #ifdef bpf_target_powerpc |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 335 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) |
| 336 | #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP |
Joel Fernandes | b655fc1 | 2017-09-20 09:11:58 -0700 | [diff] [blame] | 337 | #elif bpf_target_sparc |
David S. Miller | b0c4780 | 2017-04-22 12:31:05 -0700 | [diff] [blame] | 338 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) |
| 339 | #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP |
Naveen N. Rao | 138d615 | 2016-04-04 22:31:34 +0530 | [diff] [blame] | 340 | #else |
| 341 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \ |
| 342 | bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) |
| 343 | #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \ |
| 344 | bpf_probe_read(&(ip), sizeof(ip), \ |
| 345 | (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) |
| 346 | #endif |
| 347 | |
Alexei Starovoitov | 249b812 | 2014-12-01 15:06:37 -0800 | [diff] [blame] | 348 | #endif |