blob: 1d407b3494f949520c8c4cb8d0303c1d70dfba27 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Alexei Starovoitov249b8122014-12-01 15:06:37 -08002#ifndef __BPF_HELPERS_H
3#define __BPF_HELPERS_H
4
5/* helper macro to place programs, maps, license in
6 * different sections in elf_bpf file. Section names
7 * are interpreted by elf_bpf loader
8 */
9#define SEC(NAME) __attribute__((section(NAME), used))
10
11/* helper functions called from eBPF programs written in C */
12static void *(*bpf_map_lookup_elem)(void *map, void *key) =
13 (void *) BPF_FUNC_map_lookup_elem;
14static int (*bpf_map_update_elem)(void *map, void *key, void *value,
15 unsigned long long flags) =
16 (void *) BPF_FUNC_map_update_elem;
17static int (*bpf_map_delete_elem)(void *map, void *key) =
18 (void *) BPF_FUNC_map_delete_elem;
Alexei Starovoitovb896c4f2015-03-25 12:49:23 -070019static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
20 (void *) BPF_FUNC_probe_read;
21static unsigned long long (*bpf_ktime_get_ns)(void) =
22 (void *) BPF_FUNC_ktime_get_ns;
23static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
24 (void *) BPF_FUNC_trace_printk;
Alexei Starovoitov5bacd782015-05-19 16:59:05 -070025static void (*bpf_tail_call)(void *ctx, void *map, int index) =
26 (void *) BPF_FUNC_tail_call;
Alexei Starovoitov530b2c82015-05-19 16:59:06 -070027static unsigned long long (*bpf_get_smp_processor_id)(void) =
28 (void *) BPF_FUNC_get_smp_processor_id;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -070029static unsigned long long (*bpf_get_current_pid_tgid)(void) =
30 (void *) BPF_FUNC_get_current_pid_tgid;
31static unsigned long long (*bpf_get_current_uid_gid)(void) =
32 (void *) BPF_FUNC_get_current_uid_gid;
33static int (*bpf_get_current_comm)(void *buf, int buf_size) =
34 (void *) BPF_FUNC_get_current_comm;
Teng Qin41e9a802017-06-02 21:03:53 -070035static unsigned long long (*bpf_perf_event_read)(void *map,
36 unsigned long long flags) =
Kaixu Xia47efb302015-08-06 07:02:36 +000037 (void *) BPF_FUNC_perf_event_read;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070038static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
39 (void *) BPF_FUNC_clone_redirect;
40static int (*bpf_redirect)(int ifindex, int flags) =
41 (void *) BPF_FUNC_redirect;
John Fastabend9d6e0052017-07-17 09:30:25 -070042static int (*bpf_redirect_map)(void *map, int key, int flags) =
43 (void *) BPF_FUNC_redirect_map;
Adam Barth05b8ad22016-08-10 09:45:39 -070044static int (*bpf_perf_event_output)(void *ctx, void *map,
45 unsigned long long flags, void *data,
46 int size) =
Alexei Starovoitov39111692015-10-20 20:02:35 -070047 (void *) BPF_FUNC_perf_event_output;
Alexei Starovoitova6ffe7b2016-02-17 19:58:59 -080048static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
49 (void *) BPF_FUNC_get_stackid;
Sargun Dhillon96ae5222016-07-25 05:54:46 -070050static int (*bpf_probe_write_user)(void *dst, void *src, int size) =
51 (void *) BPF_FUNC_probe_write_user;
Sargun Dhillon9e6e60e2016-08-12 08:57:04 -070052static int (*bpf_current_task_under_cgroup)(void *map, int index) =
53 (void *) BPF_FUNC_current_task_under_cgroup;
William Tu6afb1e22016-08-19 11:55:44 -070054static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
55 (void *) BPF_FUNC_skb_get_tunnel_key;
56static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
57 (void *) BPF_FUNC_skb_set_tunnel_key;
58static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
59 (void *) BPF_FUNC_skb_get_tunnel_opt;
60static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
61 (void *) BPF_FUNC_skb_set_tunnel_opt;
Alexei Starovoitov1c47910e2016-09-01 18:37:25 -070062static unsigned long long (*bpf_get_prandom_u32)(void) =
63 (void *) BPF_FUNC_get_prandom_u32;
Martin KaFai Lau12d8bb62016-12-07 15:53:14 -080064static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
65 (void *) BPF_FUNC_xdp_adjust_head;
Daniel Borkmann22c88522017-09-25 02:25:53 +020066static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
67 (void *) BPF_FUNC_xdp_adjust_meta;
Andrey Ignatov0289a2c2018-07-30 17:42:30 -070068static int (*bpf_get_socket_cookie)(void *ctx) =
69 (void *) BPF_FUNC_get_socket_cookie;
Lawrence Brakmo8c4b4c72017-06-30 20:02:46 -070070static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
71 int optlen) =
72 (void *) BPF_FUNC_setsockopt;
Lawrence Brakmocd86d1f2017-10-20 11:05:40 -070073static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
74 int optlen) =
75 (void *) BPF_FUNC_getsockopt;
Lawrence Brakmod6d4f602018-01-25 16:14:16 -080076static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
77 (void *) BPF_FUNC_sock_ops_cb_flags_set;
John Fastabend34f795022017-10-18 07:10:36 -070078static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
John Fastabend69e8cc12017-08-15 22:33:32 -070079 (void *) BPF_FUNC_sk_redirect_map;
John Fastabendb8b394f2018-05-14 10:00:18 -070080static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) =
81 (void *) BPF_FUNC_sk_redirect_hash;
John Fastabend69e8cc12017-08-15 22:33:32 -070082static int (*bpf_sock_map_update)(void *map, void *key, void *value,
John Fastabend464bc0f2017-08-28 07:10:04 -070083 unsigned long long flags) =
John Fastabend69e8cc12017-08-15 22:33:32 -070084 (void *) BPF_FUNC_sock_map_update;
John Fastabendb8b394f2018-05-14 10:00:18 -070085static int (*bpf_sock_hash_update)(void *map, void *key, void *value,
86 unsigned long long flags) =
87 (void *) BPF_FUNC_sock_hash_update;
Yonghong Song020a32d2017-10-05 09:19:21 -070088static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
89 void *buf, unsigned int buf_size) =
90 (void *) BPF_FUNC_perf_event_read_value;
Yonghong Song81b9cf82017-10-05 09:19:23 -070091static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
92 unsigned int buf_size) =
93 (void *) BPF_FUNC_perf_prog_read_value;
Josef Bacik965de872017-12-11 11:36:49 -050094static int (*bpf_override_return)(void *ctx, unsigned long rc) =
95 (void *) BPF_FUNC_override_return;
John Fastabend4c4c3c22018-03-18 12:57:41 -070096static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
97 (void *) BPF_FUNC_msg_redirect_map;
John Fastabendb8b394f2018-05-14 10:00:18 -070098static int (*bpf_msg_redirect_hash)(void *ctx,
99 void *map, void *key, int flags) =
100 (void *) BPF_FUNC_msg_redirect_hash;
John Fastabend1c16c312018-03-18 12:57:56 -0700101static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
102 (void *) BPF_FUNC_msg_apply_bytes;
John Fastabend468b3fd2018-03-18 12:58:02 -0700103static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
104 (void *) BPF_FUNC_msg_cork_bytes;
John Fastabend0dcbbf62018-03-18 12:58:12 -0700105static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
106 (void *) BPF_FUNC_msg_pull_data;
Andrey Ignatov622adaf2018-03-30 15:08:06 -0700107static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
108 (void *) BPF_FUNC_bind;
Nikita V. Shirokov0367d0a2018-04-17 21:42:22 -0700109static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
110 (void *) BPF_FUNC_xdp_adjust_tail;
Eyal Birger29a36f92018-04-24 17:50:30 +0300111static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
112 int size, int flags) =
113 (void *) BPF_FUNC_skb_get_xfrm_state;
Martin KaFai Lau91134d82018-08-08 01:01:31 -0700114static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
115 (void *) BPF_FUNC_sk_select_reuseport;
Yonghong Songde2ff052018-04-28 22:28:12 -0700116static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
117 (void *) BPF_FUNC_get_stack;
David Ahernfe616052018-05-09 20:34:27 -0700118static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
119 int plen, __u32 flags) =
120 (void *) BPF_FUNC_fib_lookup;
Mathieu Xhonneuxc99a84e2018-05-20 14:58:17 +0100121static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr,
122 unsigned int len) =
123 (void *) BPF_FUNC_lwt_push_encap;
124static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset,
125 void *from, unsigned int len) =
126 (void *) BPF_FUNC_lwt_seg6_store_bytes;
127static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param,
128 unsigned int param_len) =
129 (void *) BPF_FUNC_lwt_seg6_action;
130static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset,
131 unsigned int len) =
132 (void *) BPF_FUNC_lwt_seg6_adjust_srh;
Sean Young6bdd5332018-05-27 12:24:10 +0100133static int (*bpf_rc_repeat)(void *ctx) =
134 (void *) BPF_FUNC_rc_repeat;
135static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
136 unsigned long long scancode, unsigned int toggle) =
137 (void *) BPF_FUNC_rc_keydown;
Yonghong Songc7ddbba2018-06-03 15:59:42 -0700138static unsigned long long (*bpf_get_current_cgroup_id)(void) =
139 (void *) BPF_FUNC_get_current_cgroup_id;
Roman Gushchind4c9f572018-08-02 14:27:28 -0700140static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
141 (void *) BPF_FUNC_get_local_storage;
Andrey Ignatov02f6ac72018-08-12 10:49:29 -0700142static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
143 (void *) BPF_FUNC_skb_cgroup_id;
144static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
145 (void *) BPF_FUNC_skb_ancestor_cgroup_id;
Joe Stringer6acc9b42018-10-02 13:35:36 -0700146static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
147 struct bpf_sock_tuple *tuple,
148 int size, unsigned int netns_id,
149 unsigned long long flags) =
150 (void *) BPF_FUNC_sk_lookup_tcp;
151static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
152 struct bpf_sock_tuple *tuple,
153 int size, unsigned int netns_id,
154 unsigned long long flags) =
155 (void *) BPF_FUNC_sk_lookup_udp;
156static int (*bpf_sk_release)(struct bpf_sock *sk) =
157 (void *) BPF_FUNC_sk_release;
Alexei Starovoitov249b8122014-12-01 15:06:37 -0800158
159/* llvm builtin functions that eBPF C program may use to
160 * emit BPF_LD_ABS and BPF_LD_IND instructions
161 */
162struct sk_buff;
163unsigned long long load_byte(void *skb,
164 unsigned long long off) asm("llvm.bpf.load.byte");
165unsigned long long load_half(void *skb,
166 unsigned long long off) asm("llvm.bpf.load.half");
167unsigned long long load_word(void *skb,
168 unsigned long long off) asm("llvm.bpf.load.word");
169
170/* a helper structure used by eBPF C program
171 * to describe map attributes to elf_bpf loader
172 */
173struct bpf_map_def {
174 unsigned int type;
175 unsigned int key_size;
176 unsigned int value_size;
177 unsigned int max_entries;
Alexei Starovoitov89b97602016-03-07 21:57:20 -0800178 unsigned int map_flags;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700179 unsigned int inner_map_idx;
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700180 unsigned int numa_node;
Alexei Starovoitov249b8122014-12-01 15:06:37 -0800181};
182
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700183#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
184 struct ____btf_map_##name { \
185 type_key key; \
186 type_val value; \
187 }; \
188 struct ____btf_map_##name \
189 __attribute__ ((section(".maps." #name), used)) \
190 ____btf_map_##name = { }
191
Thomas Graff74599f2016-11-30 17:10:11 +0100192static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
193 (void *) BPF_FUNC_skb_load_bytes;
Martin KaFai Lau91134d82018-08-08 01:01:31 -0700194static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
195 (void *) BPF_FUNC_skb_load_bytes_relative;
Alexei Starovoitov91bc48222015-04-01 17:12:13 -0700196static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
197 (void *) BPF_FUNC_skb_store_bytes;
198static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
199 (void *) BPF_FUNC_l3_csum_replace;
200static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
201 (void *) BPF_FUNC_l4_csum_replace;
Nikita V. Shirokovc6ffd1f2018-04-17 21:42:23 -0700202static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
203 (void *) BPF_FUNC_csum_diff;
Daniel Borkmann747ea552016-08-12 22:17:17 +0200204static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
205 (void *) BPF_FUNC_skb_under_cgroup;
Thomas Graff74599f2016-11-30 17:10:11 +0100206static int (*bpf_skb_change_head)(void *, int len, int flags) =
207 (void *) BPF_FUNC_skb_change_head;
John Fastabend82a86162018-03-18 12:57:31 -0700208static int (*bpf_skb_pull_data)(void *, int len) =
209 (void *) BPF_FUNC_skb_pull_data;
Alexei Starovoitov91bc48222015-04-01 17:12:13 -0700210
Joel Fernandesb655fc12017-09-20 09:11:58 -0700211/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
212#if defined(__TARGET_ARCH_x86)
213 #define bpf_target_x86
214 #define bpf_target_defined
215#elif defined(__TARGET_ARCH_s930x)
216 #define bpf_target_s930x
217 #define bpf_target_defined
218#elif defined(__TARGET_ARCH_arm64)
219 #define bpf_target_arm64
220 #define bpf_target_defined
221#elif defined(__TARGET_ARCH_mips)
222 #define bpf_target_mips
223 #define bpf_target_defined
224#elif defined(__TARGET_ARCH_powerpc)
225 #define bpf_target_powerpc
226 #define bpf_target_defined
227#elif defined(__TARGET_ARCH_sparc)
228 #define bpf_target_sparc
229 #define bpf_target_defined
230#else
231 #undef bpf_target_defined
232#endif
233
234/* Fall back to what the compiler says */
235#ifndef bpf_target_defined
Michael Holzheud9125572015-07-06 16:20:07 +0200236#if defined(__x86_64__)
Joel Fernandesb655fc12017-09-20 09:11:58 -0700237 #define bpf_target_x86
238#elif defined(__s390x__)
239 #define bpf_target_s930x
240#elif defined(__aarch64__)
241 #define bpf_target_arm64
242#elif defined(__mips__)
243 #define bpf_target_mips
244#elif defined(__powerpc__)
245 #define bpf_target_powerpc
246#elif defined(__sparc__)
247 #define bpf_target_sparc
248#endif
249#endif
250
251#if defined(bpf_target_x86)
Michael Holzheud9125572015-07-06 16:20:07 +0200252
253#define PT_REGS_PARM1(x) ((x)->di)
254#define PT_REGS_PARM2(x) ((x)->si)
255#define PT_REGS_PARM3(x) ((x)->dx)
256#define PT_REGS_PARM4(x) ((x)->cx)
257#define PT_REGS_PARM5(x) ((x)->r8)
258#define PT_REGS_RET(x) ((x)->sp)
259#define PT_REGS_FP(x) ((x)->bp)
260#define PT_REGS_RC(x) ((x)->ax)
261#define PT_REGS_SP(x) ((x)->sp)
Naveen N. Rao138d6152016-04-04 22:31:34 +0530262#define PT_REGS_IP(x) ((x)->ip)
Michael Holzheud9125572015-07-06 16:20:07 +0200263
Joel Fernandesb655fc12017-09-20 09:11:58 -0700264#elif defined(bpf_target_s390x)
Michael Holzheud9125572015-07-06 16:20:07 +0200265
266#define PT_REGS_PARM1(x) ((x)->gprs[2])
267#define PT_REGS_PARM2(x) ((x)->gprs[3])
268#define PT_REGS_PARM3(x) ((x)->gprs[4])
269#define PT_REGS_PARM4(x) ((x)->gprs[5])
270#define PT_REGS_PARM5(x) ((x)->gprs[6])
271#define PT_REGS_RET(x) ((x)->gprs[14])
272#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
273#define PT_REGS_RC(x) ((x)->gprs[2])
274#define PT_REGS_SP(x) ((x)->gprs[15])
Michael Holzheu2dbb4c02016-11-28 13:48:30 +0100275#define PT_REGS_IP(x) ((x)->psw.addr)
Michael Holzheud9125572015-07-06 16:20:07 +0200276
Joel Fernandesb655fc12017-09-20 09:11:58 -0700277#elif defined(bpf_target_arm64)
Yang Shi85ff8a432015-10-26 17:02:19 -0700278
279#define PT_REGS_PARM1(x) ((x)->regs[0])
280#define PT_REGS_PARM2(x) ((x)->regs[1])
281#define PT_REGS_PARM3(x) ((x)->regs[2])
282#define PT_REGS_PARM4(x) ((x)->regs[3])
283#define PT_REGS_PARM5(x) ((x)->regs[4])
284#define PT_REGS_RET(x) ((x)->regs[30])
285#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
286#define PT_REGS_RC(x) ((x)->regs[0])
287#define PT_REGS_SP(x) ((x)->sp)
Naveen N. Rao138d6152016-04-04 22:31:34 +0530288#define PT_REGS_IP(x) ((x)->pc)
289
Joel Fernandesb655fc12017-09-20 09:11:58 -0700290#elif defined(bpf_target_mips)
David Daneyc1932cd2017-06-13 16:49:37 -0700291
292#define PT_REGS_PARM1(x) ((x)->regs[4])
293#define PT_REGS_PARM2(x) ((x)->regs[5])
294#define PT_REGS_PARM3(x) ((x)->regs[6])
295#define PT_REGS_PARM4(x) ((x)->regs[7])
296#define PT_REGS_PARM5(x) ((x)->regs[8])
297#define PT_REGS_RET(x) ((x)->regs[31])
298#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
299#define PT_REGS_RC(x) ((x)->regs[1])
300#define PT_REGS_SP(x) ((x)->regs[29])
301#define PT_REGS_IP(x) ((x)->cp0_epc)
302
Joel Fernandesb655fc12017-09-20 09:11:58 -0700303#elif defined(bpf_target_powerpc)
Naveen N. Rao138d6152016-04-04 22:31:34 +0530304
305#define PT_REGS_PARM1(x) ((x)->gpr[3])
306#define PT_REGS_PARM2(x) ((x)->gpr[4])
307#define PT_REGS_PARM3(x) ((x)->gpr[5])
308#define PT_REGS_PARM4(x) ((x)->gpr[6])
309#define PT_REGS_PARM5(x) ((x)->gpr[7])
310#define PT_REGS_RC(x) ((x)->gpr[3])
311#define PT_REGS_SP(x) ((x)->sp)
312#define PT_REGS_IP(x) ((x)->nip)
Yang Shi85ff8a432015-10-26 17:02:19 -0700313
Joel Fernandesb655fc12017-09-20 09:11:58 -0700314#elif defined(bpf_target_sparc)
David S. Millerb0c47802017-04-22 12:31:05 -0700315
316#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
317#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
318#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
319#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
320#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
321#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
322#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
323#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
Joel Fernandesb655fc12017-09-20 09:11:58 -0700324
325/* Should this also be a bpf_target check for the sparc case? */
David S. Millerb0c47802017-04-22 12:31:05 -0700326#if defined(__arch64__)
327#define PT_REGS_IP(x) ((x)->tpc)
328#else
329#define PT_REGS_IP(x) ((x)->pc)
330#endif
331
Michael Holzheud9125572015-07-06 16:20:07 +0200332#endif
Naveen N. Rao138d6152016-04-04 22:31:34 +0530333
Joel Fernandesb655fc12017-09-20 09:11:58 -0700334#ifdef bpf_target_powerpc
Naveen N. Rao138d6152016-04-04 22:31:34 +0530335#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
336#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
Joel Fernandesb655fc12017-09-20 09:11:58 -0700337#elif bpf_target_sparc
David S. Millerb0c47802017-04-22 12:31:05 -0700338#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
339#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
Naveen N. Rao138d6152016-04-04 22:31:34 +0530340#else
341#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
342 bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
343#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
344 bpf_probe_read(&(ip), sizeof(ip), \
345 (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
346#endif
347
Alexei Starovoitov249b8122014-12-01 15:06:37 -0800348#endif