Greg Kroah-Hartman | e2be04c | 2017-11-01 15:09:13 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of version 2 of the GNU General Public |
| 6 | * License as published by the Free Software Foundation. |
| 7 | */ |
| 8 | #ifndef _UAPI__LINUX_BPF_H__ |
| 9 | #define _UAPI__LINUX_BPF_H__ |
| 10 | |
| 11 | #include <linux/types.h> |
Alexei Starovoitov | c15952d | 2014-10-14 02:08:54 -0700 | [diff] [blame] | 12 | #include <linux/bpf_common.h> |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 13 | |
| 14 | /* Extended instruction set based on top of classic BPF */ |
| 15 | |
| 16 | /* instruction classes */ |
Jiong Wang | d405c74 | 2019-01-26 12:25:59 -0500 | [diff] [blame] | 17 | #define BPF_JMP32 0x06 /* jmp mode in word width */ |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 18 | #define BPF_ALU64 0x07 /* alu mode in double word width */ |
| 19 | |
| 20 | /* ld/ldx fields */ |
Jesper Dangaard Brouer | cb5f733 | 2018-01-17 12:05:36 +0100 | [diff] [blame] | 21 | #define BPF_DW 0x18 /* double word (64-bit) */ |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 22 | #define BPF_XADD 0xc0 /* exclusive add */ |
| 23 | |
| 24 | /* alu/jmp fields */ |
| 25 | #define BPF_MOV 0xb0 /* mov reg to reg */ |
| 26 | #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ |
| 27 | |
| 28 | /* change endianness of a register */ |
| 29 | #define BPF_END 0xd0 /* flags for endianness conversion: */ |
| 30 | #define BPF_TO_LE 0x00 /* convert to little-endian */ |
| 31 | #define BPF_TO_BE 0x08 /* convert to big-endian */ |
| 32 | #define BPF_FROM_LE BPF_TO_LE |
| 33 | #define BPF_FROM_BE BPF_TO_BE |
| 34 | |
Daniel Borkmann | 92b31a9 | 2017-08-10 01:39:55 +0200 | [diff] [blame] | 35 | /* jmp encodings */ |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 36 | #define BPF_JNE 0x50 /* jump != */ |
Daniel Borkmann | 92b31a9 | 2017-08-10 01:39:55 +0200 | [diff] [blame] | 37 | #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ |
| 38 | #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 39 | #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ |
| 40 | #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ |
Daniel Borkmann | 92b31a9 | 2017-08-10 01:39:55 +0200 | [diff] [blame] | 41 | #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ |
| 42 | #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 43 | #define BPF_CALL 0x80 /* function call */ |
| 44 | #define BPF_EXIT 0x90 /* function return */ |
| 45 | |
| 46 | /* Register numbers */ |
| 47 | enum { |
| 48 | BPF_REG_0 = 0, |
| 49 | BPF_REG_1, |
| 50 | BPF_REG_2, |
| 51 | BPF_REG_3, |
| 52 | BPF_REG_4, |
| 53 | BPF_REG_5, |
| 54 | BPF_REG_6, |
| 55 | BPF_REG_7, |
| 56 | BPF_REG_8, |
| 57 | BPF_REG_9, |
| 58 | BPF_REG_10, |
| 59 | __MAX_BPF_REG, |
| 60 | }; |
| 61 | |
| 62 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ |
| 63 | #define MAX_BPF_REG __MAX_BPF_REG |
| 64 | |
| 65 | struct bpf_insn { |
| 66 | __u8 code; /* opcode */ |
| 67 | __u8 dst_reg:4; /* dest register */ |
| 68 | __u8 src_reg:4; /* source register */ |
| 69 | __s16 off; /* signed offset */ |
| 70 | __s32 imm; /* signed immediate constant */ |
| 71 | }; |
| 72 | |
Daniel Mack | b95a5c4 | 2017-01-21 17:26:11 +0100 | [diff] [blame] | 73 | /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ |
| 74 | struct bpf_lpm_trie_key { |
| 75 | __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ |
Gustavo A. R. Silva | 1e6e9d0 | 2020-04-24 10:50:00 -0500 | [diff] [blame] | 76 | __u8 data[0]; /* Arbitrary size */ |
Daniel Mack | b95a5c4 | 2017-01-21 17:26:11 +0100 | [diff] [blame] | 77 | }; |
| 78 | |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 79 | struct bpf_cgroup_storage_key { |
| 80 | __u64 cgroup_inode_id; /* cgroup inode id */ |
| 81 | __u32 attach_type; /* program attach type */ |
| 82 | }; |
| 83 | |
Yonghong Song | 5e7b302 | 2020-08-04 22:50:56 -0700 | [diff] [blame] | 84 | union bpf_iter_link_info { |
| 85 | struct { |
| 86 | __u32 map_fd; |
| 87 | } map; |
| 88 | }; |
| 89 | |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 90 | /* BPF syscall commands, see bpf(2) man-page for details. */ |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 91 | enum bpf_cmd { |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 92 | BPF_MAP_CREATE, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 93 | BPF_MAP_LOOKUP_ELEM, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 94 | BPF_MAP_UPDATE_ELEM, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 95 | BPF_MAP_DELETE_ELEM, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 96 | BPF_MAP_GET_NEXT_KEY, |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 97 | BPF_PROG_LOAD, |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 98 | BPF_OBJ_PIN, |
| 99 | BPF_OBJ_GET, |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 100 | BPF_PROG_ATTACH, |
| 101 | BPF_PROG_DETACH, |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 102 | BPF_PROG_TEST_RUN, |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 103 | BPF_PROG_GET_NEXT_ID, |
| 104 | BPF_MAP_GET_NEXT_ID, |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 105 | BPF_PROG_GET_FD_BY_ID, |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 106 | BPF_MAP_GET_FD_BY_ID, |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 107 | BPF_OBJ_GET_INFO_BY_FD, |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 108 | BPF_PROG_QUERY, |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 109 | BPF_RAW_TRACEPOINT_OPEN, |
Martin KaFai Lau | f56a653 | 2018-04-18 15:56:01 -0700 | [diff] [blame] | 110 | BPF_BTF_LOAD, |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 111 | BPF_BTF_GET_FD_BY_ID, |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 112 | BPF_TASK_FD_QUERY, |
Mauricio Vasquez B | bd513cd | 2018-10-18 15:16:30 +0200 | [diff] [blame] | 113 | BPF_MAP_LOOKUP_AND_DELETE_ELEM, |
Daniel Borkmann | 87df15d | 2019-04-09 23:20:06 +0200 | [diff] [blame] | 114 | BPF_MAP_FREEZE, |
Quentin Monnet | 1b9ed84 | 2019-08-20 10:31:50 +0100 | [diff] [blame] | 115 | BPF_BTF_GET_NEXT_ID, |
Brian Vazquez | cb4d03a | 2020-01-15 10:43:01 -0800 | [diff] [blame] | 116 | BPF_MAP_LOOKUP_BATCH, |
Yonghong Song | 0579963 | 2020-01-15 10:43:04 -0800 | [diff] [blame] | 117 | BPF_MAP_LOOKUP_AND_DELETE_BATCH, |
Brian Vazquez | aa2e93b | 2020-01-15 10:43:02 -0800 | [diff] [blame] | 118 | BPF_MAP_UPDATE_BATCH, |
| 119 | BPF_MAP_DELETE_BATCH, |
Andrii Nakryiko | af6eea5 | 2020-03-29 19:59:58 -0700 | [diff] [blame] | 120 | BPF_LINK_CREATE, |
Andrii Nakryiko | 0c991eb | 2020-03-29 19:59:59 -0700 | [diff] [blame] | 121 | BPF_LINK_UPDATE, |
Andrii Nakryiko | 2d602c8 | 2020-04-28 17:16:07 -0700 | [diff] [blame] | 122 | BPF_LINK_GET_FD_BY_ID, |
| 123 | BPF_LINK_GET_NEXT_ID, |
Song Liu | d46edd6 | 2020-04-30 00:15:04 -0700 | [diff] [blame] | 124 | BPF_ENABLE_STATS, |
Yonghong Song | ac51d99 | 2020-05-09 10:59:05 -0700 | [diff] [blame] | 125 | BPF_ITER_CREATE, |
Andrii Nakryiko | 73b11c2a | 2020-07-31 11:28:26 -0700 | [diff] [blame] | 126 | BPF_LINK_DETACH, |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 127 | }; |
| 128 | |
| 129 | enum bpf_map_type { |
| 130 | BPF_MAP_TYPE_UNSPEC, |
Alexei Starovoitov | 0f8e4bd | 2014-11-13 17:36:45 -0800 | [diff] [blame] | 131 | BPF_MAP_TYPE_HASH, |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 132 | BPF_MAP_TYPE_ARRAY, |
Alexei Starovoitov | 04fd61ab | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 133 | BPF_MAP_TYPE_PROG_ARRAY, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 134 | BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
Alexei Starovoitov | 824bd0c | 2016-02-01 22:39:53 -0800 | [diff] [blame] | 135 | BPF_MAP_TYPE_PERCPU_HASH, |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 136 | BPF_MAP_TYPE_PERCPU_ARRAY, |
Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 137 | BPF_MAP_TYPE_STACK_TRACE, |
Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 138 | BPF_MAP_TYPE_CGROUP_ARRAY, |
Martin KaFai Lau | 29ba732 | 2016-11-11 10:55:09 -0800 | [diff] [blame] | 139 | BPF_MAP_TYPE_LRU_HASH, |
Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 140 | BPF_MAP_TYPE_LRU_PERCPU_HASH, |
Daniel Mack | b95a5c4 | 2017-01-21 17:26:11 +0100 | [diff] [blame] | 141 | BPF_MAP_TYPE_LPM_TRIE, |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 142 | BPF_MAP_TYPE_ARRAY_OF_MAPS, |
Martin KaFai Lau | bcc6b1b | 2017-03-22 10:00:34 -0700 | [diff] [blame] | 143 | BPF_MAP_TYPE_HASH_OF_MAPS, |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 144 | BPF_MAP_TYPE_DEVMAP, |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 145 | BPF_MAP_TYPE_SOCKMAP, |
Jesper Dangaard Brouer | 6710e11 | 2017-10-16 12:19:28 +0200 | [diff] [blame] | 146 | BPF_MAP_TYPE_CPUMAP, |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 147 | BPF_MAP_TYPE_XSKMAP, |
John Fastabend | 8111038 | 2018-05-14 10:00:17 -0700 | [diff] [blame] | 148 | BPF_MAP_TYPE_SOCKHASH, |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 149 | BPF_MAP_TYPE_CGROUP_STORAGE, |
Martin KaFai Lau | 5dc4c4b | 2018-08-08 01:01:24 -0700 | [diff] [blame] | 150 | BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 151 | BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, |
Mauricio Vasquez B | f1a2e44 | 2018-10-18 15:16:25 +0200 | [diff] [blame] | 152 | BPF_MAP_TYPE_QUEUE, |
| 153 | BPF_MAP_TYPE_STACK, |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 154 | BPF_MAP_TYPE_SK_STORAGE, |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 155 | BPF_MAP_TYPE_DEVMAP_HASH, |
Martin KaFai Lau | 85d33df | 2020-01-08 16:35:05 -0800 | [diff] [blame] | 156 | BPF_MAP_TYPE_STRUCT_OPS, |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 157 | BPF_MAP_TYPE_RINGBUF, |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 158 | }; |
| 159 | |
Daniel Borkmann | 6c4fc20 | 2018-12-16 00:49:47 +0100 | [diff] [blame] | 160 | /* Note that tracing related programs such as |
| 161 | * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} |
| 162 | * are not subject to a stable API since kernel internal data |
| 163 | * structures can change from release to release and may |
| 164 | * therefore break existing tracing BPF programs. Tracing BPF |
| 165 | * programs correspond to /a/ specific kernel which is to be |
| 166 | * analyzed, and not /a/ specific kernel /and/ all future ones. |
| 167 | */ |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 168 | enum bpf_prog_type { |
| 169 | BPF_PROG_TYPE_UNSPEC, |
Alexei Starovoitov | ddd872b | 2014-12-01 15:06:34 -0800 | [diff] [blame] | 170 | BPF_PROG_TYPE_SOCKET_FILTER, |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 171 | BPF_PROG_TYPE_KPROBE, |
Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 172 | BPF_PROG_TYPE_SCHED_CLS, |
Daniel Borkmann | 94caee8c | 2015-03-20 15:11:11 +0100 | [diff] [blame] | 173 | BPF_PROG_TYPE_SCHED_ACT, |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 174 | BPF_PROG_TYPE_TRACEPOINT, |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 175 | BPF_PROG_TYPE_XDP, |
Alexei Starovoitov | 0515e59 | 2016-09-01 18:37:22 -0700 | [diff] [blame] | 176 | BPF_PROG_TYPE_PERF_EVENT, |
Daniel Mack | 0e33661 | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 177 | BPF_PROG_TYPE_CGROUP_SKB, |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 178 | BPF_PROG_TYPE_CGROUP_SOCK, |
Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 179 | BPF_PROG_TYPE_LWT_IN, |
| 180 | BPF_PROG_TYPE_LWT_OUT, |
| 181 | BPF_PROG_TYPE_LWT_XMIT, |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 182 | BPF_PROG_TYPE_SOCK_OPS, |
John Fastabend | b005fd1 | 2017-08-15 22:31:58 -0700 | [diff] [blame] | 183 | BPF_PROG_TYPE_SK_SKB, |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 184 | BPF_PROG_TYPE_CGROUP_DEVICE, |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 185 | BPF_PROG_TYPE_SK_MSG, |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 186 | BPF_PROG_TYPE_RAW_TRACEPOINT, |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 187 | BPF_PROG_TYPE_CGROUP_SOCK_ADDR, |
Mathieu Xhonneux | 004d4b2 | 2018-05-20 14:58:16 +0100 | [diff] [blame] | 188 | BPF_PROG_TYPE_LWT_SEG6LOCAL, |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 189 | BPF_PROG_TYPE_LIRC_MODE2, |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 190 | BPF_PROG_TYPE_SK_REUSEPORT, |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 191 | BPF_PROG_TYPE_FLOW_DISSECTOR, |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame] | 192 | BPF_PROG_TYPE_CGROUP_SYSCTL, |
Matt Mullins | 9df1c28 | 2019-04-26 11:49:47 -0700 | [diff] [blame] | 193 | BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, |
Stanislav Fomichev | 0d01da6 | 2019-06-27 13:38:47 -0700 | [diff] [blame] | 194 | BPF_PROG_TYPE_CGROUP_SOCKOPT, |
Alexei Starovoitov | f1b9509 | 2019-10-30 15:32:11 -0700 | [diff] [blame] | 195 | BPF_PROG_TYPE_TRACING, |
Martin KaFai Lau | 27ae7997 | 2020-01-08 16:35:03 -0800 | [diff] [blame] | 196 | BPF_PROG_TYPE_STRUCT_OPS, |
Alexei Starovoitov | be8704f | 2020-01-20 16:53:46 -0800 | [diff] [blame] | 197 | BPF_PROG_TYPE_EXT, |
KP Singh | fc611f4 | 2020-03-29 01:43:49 +0100 | [diff] [blame] | 198 | BPF_PROG_TYPE_LSM, |
Jakub Sitnicki | e9ddbb7 | 2020-07-17 12:35:23 +0200 | [diff] [blame] | 199 | BPF_PROG_TYPE_SK_LOOKUP, |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 200 | }; |
| 201 | |
Daniel Mack | 0e33661 | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 202 | enum bpf_attach_type { |
| 203 | BPF_CGROUP_INET_INGRESS, |
| 204 | BPF_CGROUP_INET_EGRESS, |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 205 | BPF_CGROUP_INET_SOCK_CREATE, |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 206 | BPF_CGROUP_SOCK_OPS, |
John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 207 | BPF_SK_SKB_STREAM_PARSER, |
| 208 | BPF_SK_SKB_STREAM_VERDICT, |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 209 | BPF_CGROUP_DEVICE, |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 210 | BPF_SK_MSG_VERDICT, |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 211 | BPF_CGROUP_INET4_BIND, |
| 212 | BPF_CGROUP_INET6_BIND, |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 213 | BPF_CGROUP_INET4_CONNECT, |
| 214 | BPF_CGROUP_INET6_CONNECT, |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 215 | BPF_CGROUP_INET4_POST_BIND, |
| 216 | BPF_CGROUP_INET6_POST_BIND, |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 217 | BPF_CGROUP_UDP4_SENDMSG, |
| 218 | BPF_CGROUP_UDP6_SENDMSG, |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 219 | BPF_LIRC_MODE2, |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 220 | BPF_FLOW_DISSECTOR, |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame] | 221 | BPF_CGROUP_SYSCTL, |
Daniel Borkmann | 983695f | 2019-06-07 01:48:57 +0200 | [diff] [blame] | 222 | BPF_CGROUP_UDP4_RECVMSG, |
| 223 | BPF_CGROUP_UDP6_RECVMSG, |
Stanislav Fomichev | 0d01da6 | 2019-06-27 13:38:47 -0700 | [diff] [blame] | 224 | BPF_CGROUP_GETSOCKOPT, |
| 225 | BPF_CGROUP_SETSOCKOPT, |
Alexei Starovoitov | f1b9509 | 2019-10-30 15:32:11 -0700 | [diff] [blame] | 226 | BPF_TRACE_RAW_TP, |
Alexei Starovoitov | fec56f5 | 2019-11-14 10:57:04 -0800 | [diff] [blame] | 227 | BPF_TRACE_FENTRY, |
| 228 | BPF_TRACE_FEXIT, |
KP Singh | ae24082 | 2020-03-04 20:18:49 +0100 | [diff] [blame] | 229 | BPF_MODIFY_RETURN, |
KP Singh | fc611f4 | 2020-03-29 01:43:49 +0100 | [diff] [blame] | 230 | BPF_LSM_MAC, |
Yonghong Song | 15d83c4 | 2020-05-09 10:59:00 -0700 | [diff] [blame] | 231 | BPF_TRACE_ITER, |
Daniel Borkmann | 1b66d25 | 2020-05-19 00:45:45 +0200 | [diff] [blame] | 232 | BPF_CGROUP_INET4_GETPEERNAME, |
| 233 | BPF_CGROUP_INET6_GETPEERNAME, |
| 234 | BPF_CGROUP_INET4_GETSOCKNAME, |
| 235 | BPF_CGROUP_INET6_GETSOCKNAME, |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 236 | BPF_XDP_DEVMAP, |
Stanislav Fomichev | f583674 | 2020-07-06 16:01:25 -0700 | [diff] [blame] | 237 | BPF_CGROUP_INET_SOCK_RELEASE, |
Lorenzo Bianconi | 9216477 | 2020-07-14 15:56:38 +0200 | [diff] [blame] | 238 | BPF_XDP_CPUMAP, |
Jakub Sitnicki | e9ddbb7 | 2020-07-17 12:35:23 +0200 | [diff] [blame] | 239 | BPF_SK_LOOKUP, |
Andrii Nakryiko | aa8d3a7 | 2020-07-21 23:45:57 -0700 | [diff] [blame] | 240 | BPF_XDP, |
Daniel Mack | 0e33661 | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 241 | __MAX_BPF_ATTACH_TYPE |
| 242 | }; |
| 243 | |
| 244 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE |
| 245 | |
Andrii Nakryiko | f2e10bf | 2020-04-28 17:16:08 -0700 | [diff] [blame] | 246 | enum bpf_link_type { |
| 247 | BPF_LINK_TYPE_UNSPEC = 0, |
| 248 | BPF_LINK_TYPE_RAW_TRACEPOINT = 1, |
| 249 | BPF_LINK_TYPE_TRACING = 2, |
| 250 | BPF_LINK_TYPE_CGROUP = 3, |
Yonghong Song | de4e05c | 2020-05-09 10:59:01 -0700 | [diff] [blame] | 251 | BPF_LINK_TYPE_ITER = 4, |
Jakub Sitnicki | 7f045a4 | 2020-05-31 10:28:38 +0200 | [diff] [blame] | 252 | BPF_LINK_TYPE_NETNS = 5, |
Andrii Nakryiko | aa8d3a7 | 2020-07-21 23:45:57 -0700 | [diff] [blame] | 253 | BPF_LINK_TYPE_XDP = 6, |
Andrii Nakryiko | f2e10bf | 2020-04-28 17:16:08 -0700 | [diff] [blame] | 254 | |
| 255 | MAX_BPF_LINK_TYPE, |
| 256 | }; |
| 257 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 258 | /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command |
| 259 | * |
| 260 | * NONE(default): No further bpf programs allowed in the subtree. |
| 261 | * |
| 262 | * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, |
| 263 | * the program in this cgroup yields to sub-cgroup program. |
| 264 | * |
| 265 | * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, |
| 266 | * that cgroup program gets run in addition to the program in this cgroup. |
| 267 | * |
| 268 | * Only one program is allowed to be attached to a cgroup with |
| 269 | * NONE or BPF_F_ALLOW_OVERRIDE flag. |
| 270 | * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will |
| 271 | * release old program and attach the new one. Attach flags has to match. |
| 272 | * |
| 273 | * Multiple programs are allowed to be attached to a cgroup with |
| 274 | * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order |
| 275 | * (those that were attached first, run first) |
| 276 | * The programs of sub-cgroup are executed first, then programs of |
| 277 | * this cgroup and then programs of parent cgroup. |
| 278 | * When children program makes decision (like picking TCP CA or sock bind) |
| 279 | * parent program has a chance to override it. |
| 280 | * |
Andrey Ignatov | 7dd68b3 | 2019-12-18 23:44:35 -0800 | [diff] [blame] | 281 | * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of |
| 282 | * programs for a cgroup. Though it's possible to replace an old program at |
| 283 | * any position by also specifying BPF_F_REPLACE flag and position itself in |
| 284 | * replace_bpf_fd attribute. Old program at this position will be released. |
| 285 | * |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 286 | * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. |
| 287 | * A cgroup with NONE doesn't allow any programs in sub-cgroups. |
| 288 | * Ex1: |
| 289 | * cgrp1 (MULTI progs A, B) -> |
| 290 | * cgrp2 (OVERRIDE prog C) -> |
| 291 | * cgrp3 (MULTI prog D) -> |
| 292 | * cgrp4 (OVERRIDE prog E) -> |
| 293 | * cgrp5 (NONE prog F) |
| 294 | * the event in cgrp5 triggers execution of F,D,A,B in that order. |
| 295 | * if prog F is detached, the execution is E,D,A,B |
| 296 | * if prog F and D are detached, the execution is E,A,B |
| 297 | * if prog F, E and D are detached, the execution is C,A,B |
| 298 | * |
| 299 | * All eligible programs are executed regardless of return code from |
| 300 | * earlier programs. |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 301 | */ |
| 302 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 303 | #define BPF_F_ALLOW_MULTI (1U << 1) |
Andrey Ignatov | 7dd68b3 | 2019-12-18 23:44:35 -0800 | [diff] [blame] | 304 | #define BPF_F_REPLACE (1U << 2) |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 305 | |
David S. Miller | e07b98d | 2017-05-10 11:38:07 -0700 | [diff] [blame] | 306 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the |
| 307 | * verifier will perform strict alignment checking as if the kernel |
| 308 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, |
| 309 | * and NET_IP_ALIGN defined to 2. |
| 310 | */ |
| 311 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) |
| 312 | |
David Miller | e9ee9ef | 2018-11-30 21:08:14 -0800 | [diff] [blame] | 313 | /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the |
| 314 | * verifier will allow any alignment whatsoever. On platforms |
| 315 | * with strict alignment requirements for loads ands stores (such |
| 316 | * as sparc and mips) the verifier validates that all loads and |
| 317 | * stores provably follow this requirement. This flag turns that |
| 318 | * checking and enforcement off. |
| 319 | * |
| 320 | * It is mostly used for testing when we want to validate the |
| 321 | * context and memory access aspects of the verifier, but because |
| 322 | * of an unaligned access the alignment check would trigger before |
| 323 | * the one we are interested in. |
| 324 | */ |
| 325 | #define BPF_F_ANY_ALIGNMENT (1U << 1) |
| 326 | |
Jiong Wang | c240eff | 2019-05-24 23:25:16 +0100 | [diff] [blame] | 327 | /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. |
| 328 | * Verifier does sub-register def/use analysis and identifies instructions whose |
| 329 | * def only matters for low 32-bit, high 32-bit is never referenced later |
| 330 | * through implicit zero extension. Therefore verifier notifies JIT back-ends |
| 331 | * that it is safe to ignore clearing high 32-bit for these instructions. This |
| 332 | * saves some back-ends a lot of code-gen. However such optimization is not |
| 333 | * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends |
| 334 | * hence hasn't used verifier's analysis result. But, we really want to have a |
| 335 | * way to be able to verify the correctness of the described optimization on |
| 336 | * x86_64 on which testsuites are frequently exercised. |
| 337 | * |
| 338 | * So, this flag is introduced. Once it is set, verifier will randomize high |
| 339 | * 32-bit for those instructions who has been identified as safe to ignore them. |
| 340 | * Then, if verifier is not doing correct analysis, such randomization will |
| 341 | * regress tests to expose bugs. |
| 342 | */ |
| 343 | #define BPF_F_TEST_RND_HI32 (1U << 2) |
| 344 | |
Alexei Starovoitov | 10d274e | 2019-08-22 22:52:12 -0700 | [diff] [blame] | 345 | /* The verifier internal test flag. Behavior is undefined */ |
| 346 | #define BPF_F_TEST_STATE_FREQ (1U << 3) |
| 347 | |
Daniel Borkmann | d8eca5b | 2019-04-09 23:20:03 +0200 | [diff] [blame] | 348 | /* When BPF ldimm64's insn[0].src_reg != 0 then this can have |
| 349 | * two extensions: |
| 350 | * |
| 351 | * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE |
| 352 | * insn[0].imm: map fd map fd |
| 353 | * insn[1].imm: 0 offset into value |
| 354 | * insn[0].off: 0 0 |
| 355 | * insn[1].off: 0 0 |
| 356 | * ldimm64 rewrite: address of map address of map[0]+offset |
| 357 | * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE |
| 358 | */ |
Daniel Borkmann | f1a66f8 | 2015-03-01 12:31:43 +0100 | [diff] [blame] | 359 | #define BPF_PSEUDO_MAP_FD 1 |
Daniel Borkmann | d8eca5b | 2019-04-09 23:20:03 +0200 | [diff] [blame] | 360 | #define BPF_PSEUDO_MAP_VALUE 2 |
Daniel Borkmann | f1a66f8 | 2015-03-01 12:31:43 +0100 | [diff] [blame] | 361 | |
Alexei Starovoitov | cc8b0b9 | 2017-12-14 17:55:05 -0800 | [diff] [blame] | 362 | /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative |
| 363 | * offset to another bpf function |
| 364 | */ |
| 365 | #define BPF_PSEUDO_CALL 1 |
| 366 | |
Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 367 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 368 | enum { |
| 369 | BPF_ANY = 0, /* create new element or update existing */ |
| 370 | BPF_NOEXIST = 1, /* create new element if it didn't exist */ |
| 371 | BPF_EXIST = 2, /* update existing element */ |
| 372 | BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ |
| 373 | }; |
Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 374 | |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 375 | /* flags for BPF_MAP_CREATE command */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 376 | enum { |
| 377 | BPF_F_NO_PREALLOC = (1U << 0), |
Martin KaFai Lau | 29ba732 | 2016-11-11 10:55:09 -0800 | [diff] [blame] | 378 | /* Instead of having one common LRU list in the |
Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 379 | * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list |
Martin KaFai Lau | 29ba732 | 2016-11-11 10:55:09 -0800 | [diff] [blame] | 380 | * which can scale and perform better. |
| 381 | * Note, the LRU nodes (including free nodes) cannot be moved |
| 382 | * across different LRU lists. |
| 383 | */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 384 | BPF_F_NO_COMMON_LRU = (1U << 1), |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 385 | /* Specify numa node during map creation */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 386 | BPF_F_NUMA_NODE = (1U << 2), |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 387 | |
Daniel Borkmann | 591fe98 | 2019-04-09 23:20:05 +0200 | [diff] [blame] | 388 | /* Flags for accessing BPF object from syscall side. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 389 | BPF_F_RDONLY = (1U << 3), |
| 390 | BPF_F_WRONLY = (1U << 4), |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 391 | |
Song Liu | 615755a | 2018-03-14 10:23:21 -0700 | [diff] [blame] | 392 | /* Flag for stack_map, store build_id+offset instead of pointer */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 393 | BPF_F_STACK_BUILD_ID = (1U << 5), |
Song Liu | 615755a | 2018-03-14 10:23:21 -0700 | [diff] [blame] | 394 | |
Lorenz Bauer | 96b3b6c | 2018-11-16 11:41:08 +0000 | [diff] [blame] | 395 | /* Zero-initialize hash function seed. This should only be used for testing. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 396 | BPF_F_ZERO_SEED = (1U << 6), |
Lorenz Bauer | 96b3b6c | 2018-11-16 11:41:08 +0000 | [diff] [blame] | 397 | |
Daniel Borkmann | 591fe98 | 2019-04-09 23:20:05 +0200 | [diff] [blame] | 398 | /* Flags for accessing BPF object from program side. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 399 | BPF_F_RDONLY_PROG = (1U << 7), |
| 400 | BPF_F_WRONLY_PROG = (1U << 8), |
Daniel Borkmann | 591fe98 | 2019-04-09 23:20:05 +0200 | [diff] [blame] | 401 | |
Stanislav Fomichev | 8f51dfc | 2019-08-14 10:37:49 -0700 | [diff] [blame] | 402 | /* Clone map from listener for newly accepted socket */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 403 | BPF_F_CLONE = (1U << 9), |
Stanislav Fomichev | 8f51dfc | 2019-08-14 10:37:49 -0700 | [diff] [blame] | 404 | |
Andrii Nakryiko | fc97022 | 2019-11-17 09:28:04 -0800 | [diff] [blame] | 405 | /* Enable memory-mapping BPF map */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 406 | BPF_F_MMAPABLE = (1U << 10), |
| 407 | }; |
Andrii Nakryiko | fc97022 | 2019-11-17 09:28:04 -0800 | [diff] [blame] | 408 | |
Andrey Ignatov | f5bfcd9 | 2020-01-07 17:40:06 -0800 | [diff] [blame] | 409 | /* Flags for BPF_PROG_QUERY. */ |
| 410 | |
| 411 | /* Query effective (directly attached + inherited from ancestor cgroups) |
| 412 | * programs that will be executed for events within a cgroup. |
| 413 | * attach_flags with this flag are returned only for directly attached programs. |
| 414 | */ |
Lorenz Bauer | 2f18336 | 2018-11-16 11:41:09 +0000 | [diff] [blame] | 415 | #define BPF_F_QUERY_EFFECTIVE (1U << 0) |
| 416 | |
Song Liu | d46edd6 | 2020-04-30 00:15:04 -0700 | [diff] [blame] | 417 | /* type for BPF_ENABLE_STATS */ |
| 418 | enum bpf_stats_type { |
| 419 | /* enabled run_time_ns and run_cnt */ |
| 420 | BPF_STATS_RUN_TIME = 0, |
| 421 | }; |
| 422 | |
Song Liu | 615755a | 2018-03-14 10:23:21 -0700 | [diff] [blame] | 423 | enum bpf_stack_build_id_status { |
| 424 | /* user space need an empty entry to identify end of a trace */ |
| 425 | BPF_STACK_BUILD_ID_EMPTY = 0, |
| 426 | /* with valid build_id and offset */ |
| 427 | BPF_STACK_BUILD_ID_VALID = 1, |
| 428 | /* couldn't get build_id, fallback to ip */ |
| 429 | BPF_STACK_BUILD_ID_IP = 2, |
| 430 | }; |
| 431 | |
| 432 | #define BPF_BUILD_ID_SIZE 20 |
| 433 | struct bpf_stack_build_id { |
| 434 | __s32 status; |
| 435 | unsigned char build_id[BPF_BUILD_ID_SIZE]; |
| 436 | union { |
| 437 | __u64 offset; |
| 438 | __u64 ip; |
| 439 | }; |
| 440 | }; |
| 441 | |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 442 | #define BPF_OBJ_NAME_LEN 16U |
| 443 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 444 | union bpf_attr { |
| 445 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ |
| 446 | __u32 map_type; /* one of enum bpf_map_type */ |
| 447 | __u32 key_size; /* size of key in bytes */ |
| 448 | __u32 value_size; /* size of value in bytes */ |
| 449 | __u32 max_entries; /* max number of entries in a map */ |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 450 | __u32 map_flags; /* BPF_MAP_CREATE related |
| 451 | * flags defined above. |
| 452 | */ |
Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 453 | __u32 inner_map_fd; /* fd pointing to the inner map */ |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 454 | __u32 numa_node; /* numa node (effective only if |
| 455 | * BPF_F_NUMA_NODE is set). |
| 456 | */ |
Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 457 | char map_name[BPF_OBJ_NAME_LEN]; |
Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 458 | __u32 map_ifindex; /* ifindex of netdev to create on */ |
Martin KaFai Lau | a26ca7c | 2018-04-18 15:56:03 -0700 | [diff] [blame] | 459 | __u32 btf_fd; /* fd pointing to a BTF type data */ |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 460 | __u32 btf_key_type_id; /* BTF type_id of the key */ |
| 461 | __u32 btf_value_type_id; /* BTF type_id of the value */ |
Martin KaFai Lau | 85d33df | 2020-01-08 16:35:05 -0800 | [diff] [blame] | 462 | __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- |
| 463 | * struct stored as the |
| 464 | * map value |
| 465 | */ |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 466 | }; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 467 | |
| 468 | struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ |
| 469 | __u32 map_fd; |
| 470 | __aligned_u64 key; |
| 471 | union { |
| 472 | __aligned_u64 value; |
| 473 | __aligned_u64 next_key; |
| 474 | }; |
Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 475 | __u64 flags; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 476 | }; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 477 | |
Brian Vazquez | cb4d03a | 2020-01-15 10:43:01 -0800 | [diff] [blame] | 478 | struct { /* struct used by BPF_MAP_*_BATCH commands */ |
| 479 | __aligned_u64 in_batch; /* start batch, |
| 480 | * NULL to start from beginning |
| 481 | */ |
| 482 | __aligned_u64 out_batch; /* output: next start batch */ |
| 483 | __aligned_u64 keys; |
| 484 | __aligned_u64 values; |
| 485 | __u32 count; /* input/output: |
| 486 | * input: # of key/value |
| 487 | * elements |
| 488 | * output: # of filled elements |
| 489 | */ |
| 490 | __u32 map_fd; |
| 491 | __u64 elem_flags; |
| 492 | __u64 flags; |
| 493 | } batch; |
| 494 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 495 | struct { /* anonymous struct used by BPF_PROG_LOAD command */ |
| 496 | __u32 prog_type; /* one of enum bpf_prog_type */ |
| 497 | __u32 insn_cnt; |
| 498 | __aligned_u64 insns; |
| 499 | __aligned_u64 license; |
Alexei Starovoitov | cbd3570 | 2014-09-26 00:17:03 -0700 | [diff] [blame] | 500 | __u32 log_level; /* verbosity level of verifier */ |
| 501 | __u32 log_size; /* size of user buffer */ |
| 502 | __aligned_u64 log_buf; /* user supplied buffer */ |
Daniel Borkmann | 6c4fc20 | 2018-12-16 00:49:47 +0100 | [diff] [blame] | 503 | __u32 kern_version; /* not used */ |
David S. Miller | e07b98d | 2017-05-10 11:38:07 -0700 | [diff] [blame] | 504 | __u32 prog_flags; |
Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 505 | char prog_name[BPF_OBJ_NAME_LEN]; |
Jakub Kicinski | 1f6f4cb | 2017-11-20 15:21:53 -0800 | [diff] [blame] | 506 | __u32 prog_ifindex; /* ifindex of netdev to prep for */ |
Andrey Ignatov | 5e43f89 | 2018-03-30 15:08:00 -0700 | [diff] [blame] | 507 | /* For some prog types expected attach type must be known at |
| 508 | * load time to verify attach type specific parts of prog |
| 509 | * (context accesses, allowed helpers, etc). |
| 510 | */ |
| 511 | __u32 expected_attach_type; |
Yonghong Song | 838e969 | 2018-11-19 15:29:11 -0800 | [diff] [blame] | 512 | __u32 prog_btf_fd; /* fd pointing to BTF type data */ |
| 513 | __u32 func_info_rec_size; /* userspace bpf_func_info size */ |
| 514 | __aligned_u64 func_info; /* func info */ |
| 515 | __u32 func_info_cnt; /* number of bpf_func_info records */ |
Martin KaFai Lau | c454a46 | 2018-12-07 16:42:25 -0800 | [diff] [blame] | 516 | __u32 line_info_rec_size; /* userspace bpf_line_info size */ |
| 517 | __aligned_u64 line_info; /* line info */ |
| 518 | __u32 line_info_cnt; /* number of bpf_line_info records */ |
Alexei Starovoitov | ccfe29eb2 | 2019-10-15 20:24:58 -0700 | [diff] [blame] | 519 | __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ |
Alexei Starovoitov | 5b92a28 | 2019-11-14 10:57:17 -0800 | [diff] [blame] | 520 | __u32 attach_prog_fd; /* 0 to attach to vmlinux */ |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 521 | }; |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 522 | |
| 523 | struct { /* anonymous struct used by BPF_OBJ_* commands */ |
| 524 | __aligned_u64 pathname; |
| 525 | __u32 bpf_fd; |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 526 | __u32 file_flags; |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 527 | }; |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 528 | |
| 529 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ |
| 530 | __u32 target_fd; /* container object to attach to */ |
| 531 | __u32 attach_bpf_fd; /* eBPF program to attach */ |
| 532 | __u32 attach_type; |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 533 | __u32 attach_flags; |
Andrey Ignatov | 7dd68b3 | 2019-12-18 23:44:35 -0800 | [diff] [blame] | 534 | __u32 replace_bpf_fd; /* previously attached eBPF |
| 535 | * program to replace if |
| 536 | * BPF_F_REPLACE is used |
| 537 | */ |
Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 538 | }; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 539 | |
| 540 | struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ |
| 541 | __u32 prog_fd; |
| 542 | __u32 retval; |
Lorenz Bauer | b5a36b1 | 2018-12-03 11:31:23 +0000 | [diff] [blame] | 543 | __u32 data_size_in; /* input: len of data_in */ |
| 544 | __u32 data_size_out; /* input/output: len of data_out |
| 545 | * returns ENOSPC if data_out |
| 546 | * is too small. |
| 547 | */ |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 548 | __aligned_u64 data_in; |
| 549 | __aligned_u64 data_out; |
| 550 | __u32 repeat; |
| 551 | __u32 duration; |
Stanislav Fomichev | b0b9395 | 2019-04-09 11:49:09 -0700 | [diff] [blame] | 552 | __u32 ctx_size_in; /* input: len of ctx_in */ |
| 553 | __u32 ctx_size_out; /* input/output: len of ctx_out |
| 554 | * returns ENOSPC if ctx_out |
| 555 | * is too small. |
| 556 | */ |
| 557 | __aligned_u64 ctx_in; |
| 558 | __aligned_u64 ctx_out; |
Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 559 | } test; |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 560 | |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 561 | struct { /* anonymous struct used by BPF_*_GET_*_ID */ |
| 562 | union { |
| 563 | __u32 start_id; |
| 564 | __u32 prog_id; |
Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 565 | __u32 map_id; |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 566 | __u32 btf_id; |
Andrii Nakryiko | a3b80e1 | 2020-04-28 17:16:06 -0700 | [diff] [blame] | 567 | __u32 link_id; |
Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 568 | }; |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 569 | __u32 next_id; |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 570 | __u32 open_flags; |
Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 571 | }; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 572 | |
| 573 | struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ |
| 574 | __u32 bpf_fd; |
| 575 | __u32 info_len; |
| 576 | __aligned_u64 info; |
| 577 | } info; |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 578 | |
| 579 | struct { /* anonymous struct used by BPF_PROG_QUERY command */ |
| 580 | __u32 target_fd; /* container object to query */ |
| 581 | __u32 attach_type; |
| 582 | __u32 query_flags; |
| 583 | __u32 attach_flags; |
| 584 | __aligned_u64 prog_ids; |
| 585 | __u32 prog_cnt; |
| 586 | } query; |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 587 | |
Andrii Nakryiko | af6eea5 | 2020-03-29 19:59:58 -0700 | [diff] [blame] | 588 | struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 589 | __u64 name; |
| 590 | __u32 prog_fd; |
| 591 | } raw_tracepoint; |
Martin KaFai Lau | f56a653 | 2018-04-18 15:56:01 -0700 | [diff] [blame] | 592 | |
| 593 | struct { /* anonymous struct for BPF_BTF_LOAD */ |
| 594 | __aligned_u64 btf; |
| 595 | __aligned_u64 btf_log_buf; |
| 596 | __u32 btf_size; |
| 597 | __u32 btf_log_size; |
| 598 | __u32 btf_log_level; |
| 599 | }; |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 600 | |
| 601 | struct { |
| 602 | __u32 pid; /* input: pid */ |
| 603 | __u32 fd; /* input: fd */ |
| 604 | __u32 flags; /* input: flags */ |
| 605 | __u32 buf_len; /* input/output: buf len */ |
| 606 | __aligned_u64 buf; /* input/output: |
| 607 | * tp_name for tracepoint |
| 608 | * symbol for kprobe |
| 609 | * filename for uprobe |
| 610 | */ |
| 611 | __u32 prog_id; /* output: prod_id */ |
| 612 | __u32 fd_type; /* output: BPF_FD_TYPE_* */ |
| 613 | __u64 probe_offset; /* output: probe_offset */ |
| 614 | __u64 probe_addr; /* output: probe_addr */ |
| 615 | } task_fd_query; |
Andrii Nakryiko | af6eea5 | 2020-03-29 19:59:58 -0700 | [diff] [blame] | 616 | |
| 617 | struct { /* struct used by BPF_LINK_CREATE command */ |
| 618 | __u32 prog_fd; /* eBPF program to attach */ |
Andrii Nakryiko | aa8d3a7 | 2020-07-21 23:45:57 -0700 | [diff] [blame] | 619 | union { |
| 620 | __u32 target_fd; /* object to attach to */ |
| 621 | __u32 target_ifindex; /* target ifindex */ |
| 622 | }; |
Andrii Nakryiko | af6eea5 | 2020-03-29 19:59:58 -0700 | [diff] [blame] | 623 | __u32 attach_type; /* attach type */ |
| 624 | __u32 flags; /* extra flags */ |
Yonghong Song | 5e7b302 | 2020-08-04 22:50:56 -0700 | [diff] [blame] | 625 | __aligned_u64 iter_info; /* extra bpf_iter_link_info */ |
| 626 | __u32 iter_info_len; /* iter_info length */ |
Andrii Nakryiko | af6eea5 | 2020-03-29 19:59:58 -0700 | [diff] [blame] | 627 | } link_create; |
Andrii Nakryiko | 0c991eb | 2020-03-29 19:59:59 -0700 | [diff] [blame] | 628 | |
| 629 | struct { /* struct used by BPF_LINK_UPDATE command */ |
| 630 | __u32 link_fd; /* link fd */ |
| 631 | /* new program fd to update link with */ |
| 632 | __u32 new_prog_fd; |
| 633 | __u32 flags; /* extra flags */ |
| 634 | /* expected link's program fd; is specified only if |
| 635 | * BPF_F_REPLACE flag is set in flags */ |
| 636 | __u32 old_prog_fd; |
| 637 | } link_update; |
| 638 | |
Andrii Nakryiko | 73b11c2a | 2020-07-31 11:28:26 -0700 | [diff] [blame] | 639 | struct { |
| 640 | __u32 link_fd; |
| 641 | } link_detach; |
| 642 | |
Song Liu | d46edd6 | 2020-04-30 00:15:04 -0700 | [diff] [blame] | 643 | struct { /* struct used by BPF_ENABLE_STATS command */ |
| 644 | __u32 type; |
| 645 | } enable_stats; |
| 646 | |
Yonghong Song | ac51d99 | 2020-05-09 10:59:05 -0700 | [diff] [blame] | 647 | struct { /* struct used by BPF_ITER_CREATE command */ |
| 648 | __u32 link_fd; |
| 649 | __u32 flags; |
| 650 | } iter_create; |
| 651 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 652 | } __attribute__((aligned(8))); |
| 653 | |
Quentin Monnet | 56a092c | 2018-04-25 18:16:52 +0100 | [diff] [blame] | 654 | /* The description below is an attempt at providing documentation to eBPF |
| 655 | * developers about the multiple available eBPF helper functions. It can be |
| 656 | * parsed and used to produce a manual page. The workflow is the following, |
| 657 | * and requires the rst2man utility: |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 658 | * |
Quentin Monnet | 56a092c | 2018-04-25 18:16:52 +0100 | [diff] [blame] | 659 | * $ ./scripts/bpf_helpers_doc.py \ |
| 660 | * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst |
| 661 | * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 |
| 662 | * $ man /tmp/bpf-helpers.7 |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 663 | * |
Quentin Monnet | 56a092c | 2018-04-25 18:16:52 +0100 | [diff] [blame] | 664 | * Note that in order to produce this external documentation, some RST |
| 665 | * formatting is used in the descriptions to get "bold" and "italics" in |
| 666 | * manual pages. Also note that the few trailing white spaces are |
| 667 | * intentional, removing them would break paragraphs for rst2man. |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 668 | * |
Quentin Monnet | 56a092c | 2018-04-25 18:16:52 +0100 | [diff] [blame] | 669 | * Start of BPF helper function descriptions: |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 670 | * |
| 671 | * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) |
| 672 | * Description |
| 673 | * Perform a lookup in *map* for an entry associated to *key*. |
| 674 | * Return |
| 675 | * Map value associated to *key*, or **NULL** if no entry was |
| 676 | * found. |
| 677 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 678 | * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 679 | * Description |
| 680 | * Add or update the value of the entry associated to *key* in |
| 681 | * *map* with *value*. *flags* is one of: |
| 682 | * |
| 683 | * **BPF_NOEXIST** |
| 684 | * The entry for *key* must not exist in the map. |
| 685 | * **BPF_EXIST** |
| 686 | * The entry for *key* must already exist in the map. |
| 687 | * **BPF_ANY** |
| 688 | * No condition on the existence of the entry for *key*. |
| 689 | * |
| 690 | * Flag value **BPF_NOEXIST** cannot be used for maps of types |
| 691 | * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all |
| 692 | * elements always exist), the helper would return an error. |
| 693 | * Return |
| 694 | * 0 on success, or a negative error in case of failure. |
| 695 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 696 | * long bpf_map_delete_elem(struct bpf_map *map, const void *key) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 697 | * Description |
| 698 | * Delete entry with *key* from *map*. |
| 699 | * Return |
| 700 | * 0 on success, or a negative error in case of failure. |
| 701 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 702 | * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 703 | * Description |
| 704 | * For tracing programs, safely attempt to read *size* bytes from |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 705 | * kernel space address *unsafe_ptr* and store the data in *dst*. |
| 706 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 707 | * Generally, use **bpf_probe_read_user**\ () or |
| 708 | * **bpf_probe_read_kernel**\ () instead. |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 709 | * Return |
| 710 | * 0 on success, or a negative error in case of failure. |
| 711 | * |
| 712 | * u64 bpf_ktime_get_ns(void) |
| 713 | * Description |
| 714 | * Return the time elapsed since system boot, in nanoseconds. |
Maciej Żenczykowski | 71d1921 | 2020-04-26 09:15:25 -0700 | [diff] [blame] | 715 | * Does not include time the system was suspended. |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 716 | * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 717 | * Return |
| 718 | * Current *ktime*. |
| 719 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 720 | * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 721 | * Description |
| 722 | * This helper is a "printk()-like" facility for debugging. It |
| 723 | * prints a message defined by format *fmt* (of size *fmt_size*) |
| 724 | * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if |
| 725 | * available. It can take up to three additional **u64** |
| 726 | * arguments (as an eBPF helpers, the total number of arguments is |
| 727 | * limited to five). |
| 728 | * |
| 729 | * Each time the helper is called, it appends a line to the trace. |
Peter Wu | 55c33df | 2019-08-21 00:08:59 +0100 | [diff] [blame] | 730 | * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is |
| 731 | * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 732 | * The format of the trace is customizable, and the exact output |
| 733 | * one will get depends on the options set in |
| 734 | * *\/sys/kernel/debug/tracing/trace_options* (see also the |
| 735 | * *README* file under the same directory). However, it usually |
| 736 | * defaults to something like: |
| 737 | * |
| 738 | * :: |
| 739 | * |
| 740 | * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> |
| 741 | * |
| 742 | * In the above: |
| 743 | * |
| 744 | * * ``telnet`` is the name of the current task. |
| 745 | * * ``470`` is the PID of the current task. |
| 746 | * * ``001`` is the CPU number on which the task is |
| 747 | * running. |
| 748 | * * In ``.N..``, each character refers to a set of |
| 749 | * options (whether irqs are enabled, scheduling |
| 750 | * options, whether hard/softirqs are running, level of |
| 751 | * preempt_disabled respectively). **N** means that |
| 752 | * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** |
| 753 | * are set. |
| 754 | * * ``419421.045894`` is a timestamp. |
| 755 | * * ``0x00000001`` is a fake value used by BPF for the |
| 756 | * instruction pointer register. |
| 757 | * * ``<formatted msg>`` is the message formatted with |
| 758 | * *fmt*. |
| 759 | * |
| 760 | * The conversion specifiers supported by *fmt* are similar, but |
| 761 | * more limited than for printk(). They are **%d**, **%i**, |
| 762 | * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, |
| 763 | * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size |
| 764 | * of field, padding with zeroes, etc.) is available, and the |
| 765 | * helper will return **-EINVAL** (but print nothing) if it |
| 766 | * encounters an unknown specifier. |
| 767 | * |
| 768 | * Also, note that **bpf_trace_printk**\ () is slow, and should |
| 769 | * only be used for debugging purposes. For this reason, a notice |
Tobias Klauser | b16fc09 | 2020-08-21 15:36:42 +0200 | [diff] [blame] | 770 | * block (spanning several lines) is printed to kernel logs and |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 771 | * states that the helper should not be used "for production use" |
| 772 | * the first time this helper is used (or more precisely, when |
| 773 | * **trace_printk**\ () buffers are allocated). For passing values |
| 774 | * to user space, perf events should be preferred. |
| 775 | * Return |
| 776 | * The number of bytes written to the buffer, or a negative error |
| 777 | * in case of failure. |
| 778 | * |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 779 | * u32 bpf_get_prandom_u32(void) |
| 780 | * Description |
| 781 | * Get a pseudo-random number. |
| 782 | * |
| 783 | * From a security point of view, this helper uses its own |
| 784 | * pseudo-random internal state, and cannot be used to infer the |
| 785 | * seed of other random functions in the kernel. However, it is |
| 786 | * essential to note that the generator used by the helper is not |
| 787 | * cryptographically secure. |
| 788 | * Return |
| 789 | * A random 32-bit unsigned value. |
| 790 | * |
| 791 | * u32 bpf_get_smp_processor_id(void) |
| 792 | * Description |
| 793 | * Get the SMP (symmetric multiprocessing) processor id. Note that |
| 794 | * all programs run with preemption disabled, which means that the |
| 795 | * SMP processor id is stable during all the execution of the |
| 796 | * program. |
| 797 | * Return |
| 798 | * The SMP id of the processor running the program. |
| 799 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 800 | * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 801 | * Description |
| 802 | * Store *len* bytes from address *from* into the packet |
| 803 | * associated to *skb*, at *offset*. *flags* are a combination of |
| 804 | * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the |
| 805 | * checksum for the packet after storing the bytes) and |
| 806 | * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ |
| 807 | * **->swhash** and *skb*\ **->l4hash** to 0). |
| 808 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 809 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 810 | * packet buffer. Therefore, at load time, all checks on pointers |
| 811 | * previously done by the verifier are invalidated and must be |
| 812 | * performed again, if the helper is used in combination with |
| 813 | * direct packet access. |
| 814 | * Return |
| 815 | * 0 on success, or a negative error in case of failure. |
| 816 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 817 | * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 818 | * Description |
| 819 | * Recompute the layer 3 (e.g. IP) checksum for the packet |
| 820 | * associated to *skb*. Computation is incremental, so the helper |
| 821 | * must know the former value of the header field that was |
| 822 | * modified (*from*), the new value of this field (*to*), and the |
| 823 | * number of bytes (2 or 4) for this field, stored in *size*. |
| 824 | * Alternatively, it is possible to store the difference between |
| 825 | * the previous and the new values of the header field in *to*, by |
| 826 | * setting *from* and *size* to 0. For both methods, *offset* |
| 827 | * indicates the location of the IP checksum within the packet. |
| 828 | * |
| 829 | * This helper works in combination with **bpf_csum_diff**\ (), |
| 830 | * which does not update the checksum in-place, but offers more |
| 831 | * flexibility and can handle sizes larger than 2 or 4 for the |
| 832 | * checksum to update. |
| 833 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 834 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 835 | * packet buffer. Therefore, at load time, all checks on pointers |
| 836 | * previously done by the verifier are invalidated and must be |
| 837 | * performed again, if the helper is used in combination with |
| 838 | * direct packet access. |
| 839 | * Return |
| 840 | * 0 on success, or a negative error in case of failure. |
| 841 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 842 | * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 843 | * Description |
| 844 | * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the |
| 845 | * packet associated to *skb*. Computation is incremental, so the |
| 846 | * helper must know the former value of the header field that was |
| 847 | * modified (*from*), the new value of this field (*to*), and the |
| 848 | * number of bytes (2 or 4) for this field, stored on the lowest |
| 849 | * four bits of *flags*. Alternatively, it is possible to store |
| 850 | * the difference between the previous and the new values of the |
| 851 | * header field in *to*, by setting *from* and the four lowest |
| 852 | * bits of *flags* to 0. For both methods, *offset* indicates the |
| 853 | * location of the IP checksum within the packet. In addition to |
| 854 | * the size of the field, *flags* can be added (bitwise OR) actual |
| 855 | * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left |
| 856 | * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and |
| 857 | * for updates resulting in a null checksum the value is set to |
| 858 | * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates |
| 859 | * the checksum is to be computed against a pseudo-header. |
| 860 | * |
| 861 | * This helper works in combination with **bpf_csum_diff**\ (), |
| 862 | * which does not update the checksum in-place, but offers more |
| 863 | * flexibility and can handle sizes larger than 2 or 4 for the |
| 864 | * checksum to update. |
| 865 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 866 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 867 | * packet buffer. Therefore, at load time, all checks on pointers |
| 868 | * previously done by the verifier are invalidated and must be |
| 869 | * performed again, if the helper is used in combination with |
| 870 | * direct packet access. |
| 871 | * Return |
| 872 | * 0 on success, or a negative error in case of failure. |
| 873 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 874 | * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 875 | * Description |
| 876 | * This special helper is used to trigger a "tail call", or in |
| 877 | * other words, to jump into another eBPF program. The same stack |
| 878 | * frame is used (but values on stack and in registers for the |
| 879 | * caller are not accessible to the callee). This mechanism allows |
| 880 | * for program chaining, either for raising the maximum number of |
| 881 | * available eBPF instructions, or to execute given programs in |
| 882 | * conditional blocks. For security reasons, there is an upper |
| 883 | * limit to the number of successive tail calls that can be |
| 884 | * performed. |
| 885 | * |
| 886 | * Upon call of this helper, the program attempts to jump into a |
| 887 | * program referenced at index *index* in *prog_array_map*, a |
| 888 | * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes |
| 889 | * *ctx*, a pointer to the context. |
| 890 | * |
| 891 | * If the call succeeds, the kernel immediately runs the first |
| 892 | * instruction of the new program. This is not a function call, |
| 893 | * and it never returns to the previous program. If the call |
| 894 | * fails, then the helper has no effect, and the caller continues |
| 895 | * to run its subsequent instructions. A call can fail if the |
| 896 | * destination program for the jump does not exist (i.e. *index* |
| 897 | * is superior to the number of entries in *prog_array_map*), or |
| 898 | * if the maximum number of tail calls has been reached for this |
| 899 | * chain of programs. This limit is defined in the kernel by the |
| 900 | * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), |
| 901 | * which is currently set to 32. |
| 902 | * Return |
| 903 | * 0 on success, or a negative error in case of failure. |
| 904 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 905 | * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 906 | * Description |
| 907 | * Clone and redirect the packet associated to *skb* to another |
| 908 | * net device of index *ifindex*. Both ingress and egress |
| 909 | * interfaces can be used for redirection. The **BPF_F_INGRESS** |
| 910 | * value in *flags* is used to make the distinction (ingress path |
| 911 | * is selected if the flag is present, egress path otherwise). |
| 912 | * This is the only flag supported for now. |
| 913 | * |
| 914 | * In comparison with **bpf_redirect**\ () helper, |
| 915 | * **bpf_clone_redirect**\ () has the associated cost of |
| 916 | * duplicating the packet buffer, but this can be executed out of |
| 917 | * the eBPF program. Conversely, **bpf_redirect**\ () is more |
| 918 | * efficient, but it is handled through an action code where the |
| 919 | * redirection happens only after the eBPF program has returned. |
| 920 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 921 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | ad4a522 | 2018-04-25 18:16:53 +0100 | [diff] [blame] | 922 | * packet buffer. Therefore, at load time, all checks on pointers |
| 923 | * previously done by the verifier are invalidated and must be |
| 924 | * performed again, if the helper is used in combination with |
| 925 | * direct packet access. |
| 926 | * Return |
| 927 | * 0 on success, or a negative error in case of failure. |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 928 | * |
| 929 | * u64 bpf_get_current_pid_tgid(void) |
| 930 | * Return |
| 931 | * A 64-bit integer containing the current tgid and pid, and |
| 932 | * created as such: |
| 933 | * *current_task*\ **->tgid << 32 \|** |
| 934 | * *current_task*\ **->pid**. |
| 935 | * |
| 936 | * u64 bpf_get_current_uid_gid(void) |
| 937 | * Return |
| 938 | * A 64-bit integer containing the current GID and UID, and |
| 939 | * created as such: *current_gid* **<< 32 \|** *current_uid*. |
| 940 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 941 | * long bpf_get_current_comm(void *buf, u32 size_of_buf) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 942 | * Description |
| 943 | * Copy the **comm** attribute of the current task into *buf* of |
| 944 | * *size_of_buf*. The **comm** attribute contains the name of |
| 945 | * the executable (excluding the path) for the current task. The |
| 946 | * *size_of_buf* must be strictly positive. On success, the |
| 947 | * helper makes sure that the *buf* is NUL-terminated. On failure, |
| 948 | * it is filled with zeroes. |
| 949 | * Return |
| 950 | * 0 on success, or a negative error in case of failure. |
| 951 | * |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 952 | * u32 bpf_get_cgroup_classid(struct sk_buff *skb) |
| 953 | * Description |
| 954 | * Retrieve the classid for the current task, i.e. for the net_cls |
| 955 | * cgroup to which *skb* belongs. |
| 956 | * |
| 957 | * This helper can be used on TC egress path, but not on ingress. |
| 958 | * |
| 959 | * The net_cls cgroup provides an interface to tag network packets |
| 960 | * based on a user-provided identifier for all traffic coming from |
| 961 | * the tasks belonging to the related cgroup. See also the related |
| 962 | * kernel documentation, available from the Linux sources in file |
Mauro Carvalho Chehab | da82c92 | 2019-06-27 13:08:35 -0300 | [diff] [blame] | 963 | * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 964 | * |
| 965 | * The Linux kernel has two versions for cgroups: there are |
| 966 | * cgroups v1 and cgroups v2. Both are available to users, who can |
| 967 | * use a mixture of them, but note that the net_cls cgroup is for |
| 968 | * cgroup v1 only. This makes it incompatible with BPF programs |
| 969 | * run on cgroups, which is a cgroup-v2-only feature (a socket can |
| 970 | * only hold data for one version of cgroups at a time). |
| 971 | * |
| 972 | * This helper is only available is the kernel was compiled with |
| 973 | * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to |
| 974 | * "**y**" or to "**m**". |
| 975 | * Return |
| 976 | * The classid, or 0 for the default unconfigured classid. |
| 977 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 978 | * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 979 | * Description |
| 980 | * Push a *vlan_tci* (VLAN tag control information) of protocol |
| 981 | * *vlan_proto* to the packet associated to *skb*, then update |
| 982 | * the checksum. Note that if *vlan_proto* is different from |
| 983 | * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to |
| 984 | * be **ETH_P_8021Q**. |
| 985 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 986 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 987 | * packet buffer. Therefore, at load time, all checks on pointers |
| 988 | * previously done by the verifier are invalidated and must be |
| 989 | * performed again, if the helper is used in combination with |
| 990 | * direct packet access. |
| 991 | * Return |
| 992 | * 0 on success, or a negative error in case of failure. |
| 993 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 994 | * long bpf_skb_vlan_pop(struct sk_buff *skb) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 995 | * Description |
| 996 | * Pop a VLAN header from the packet associated to *skb*. |
| 997 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 998 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 999 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1000 | * previously done by the verifier are invalidated and must be |
| 1001 | * performed again, if the helper is used in combination with |
| 1002 | * direct packet access. |
| 1003 | * Return |
| 1004 | * 0 on success, or a negative error in case of failure. |
| 1005 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1006 | * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1007 | * Description |
| 1008 | * Get tunnel metadata. This helper takes a pointer *key* to an |
| 1009 | * empty **struct bpf_tunnel_key** of **size**, that will be |
| 1010 | * filled with tunnel metadata for the packet associated to *skb*. |
| 1011 | * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which |
| 1012 | * indicates that the tunnel is based on IPv6 protocol instead of |
| 1013 | * IPv4. |
| 1014 | * |
| 1015 | * The **struct bpf_tunnel_key** is an object that generalizes the |
| 1016 | * principal parameters used by various tunneling protocols into a |
| 1017 | * single struct. This way, it can be used to easily make a |
| 1018 | * decision based on the contents of the encapsulation header, |
| 1019 | * "summarized" in this struct. In particular, it holds the IP |
| 1020 | * address of the remote end (IPv4 or IPv6, depending on the case) |
| 1021 | * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, |
| 1022 | * this struct exposes the *key*\ **->tunnel_id**, which is |
| 1023 | * generally mapped to a VNI (Virtual Network Identifier), making |
| 1024 | * it programmable together with the **bpf_skb_set_tunnel_key**\ |
| 1025 | * () helper. |
| 1026 | * |
| 1027 | * Let's imagine that the following code is part of a program |
| 1028 | * attached to the TC ingress interface, on one end of a GRE |
| 1029 | * tunnel, and is supposed to filter out all messages coming from |
| 1030 | * remote ends with IPv4 address other than 10.0.0.1: |
| 1031 | * |
| 1032 | * :: |
| 1033 | * |
| 1034 | * int ret; |
| 1035 | * struct bpf_tunnel_key key = {}; |
Tobias Klauser | b16fc09 | 2020-08-21 15:36:42 +0200 | [diff] [blame] | 1036 | * |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1037 | * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); |
| 1038 | * if (ret < 0) |
| 1039 | * return TC_ACT_SHOT; // drop packet |
Tobias Klauser | b16fc09 | 2020-08-21 15:36:42 +0200 | [diff] [blame] | 1040 | * |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1041 | * if (key.remote_ipv4 != 0x0a000001) |
| 1042 | * return TC_ACT_SHOT; // drop packet |
Tobias Klauser | b16fc09 | 2020-08-21 15:36:42 +0200 | [diff] [blame] | 1043 | * |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1044 | * return TC_ACT_OK; // accept packet |
| 1045 | * |
| 1046 | * This interface can also be used with all encapsulation devices |
| 1047 | * that can operate in "collect metadata" mode: instead of having |
| 1048 | * one network device per specific configuration, the "collect |
| 1049 | * metadata" mode only requires a single device where the |
| 1050 | * configuration can be extracted from this helper. |
| 1051 | * |
| 1052 | * This can be used together with various tunnels such as VXLan, |
| 1053 | * Geneve, GRE or IP in IP (IPIP). |
| 1054 | * Return |
| 1055 | * 0 on success, or a negative error in case of failure. |
| 1056 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1057 | * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1058 | * Description |
| 1059 | * Populate tunnel metadata for packet associated to *skb.* The |
| 1060 | * tunnel metadata is set to the contents of *key*, of *size*. The |
| 1061 | * *flags* can be set to a combination of the following values: |
| 1062 | * |
| 1063 | * **BPF_F_TUNINFO_IPV6** |
| 1064 | * Indicate that the tunnel is based on IPv6 protocol |
| 1065 | * instead of IPv4. |
| 1066 | * **BPF_F_ZERO_CSUM_TX** |
| 1067 | * For IPv4 packets, add a flag to tunnel metadata |
| 1068 | * indicating that checksum computation should be skipped |
| 1069 | * and checksum set to zeroes. |
| 1070 | * **BPF_F_DONT_FRAGMENT** |
| 1071 | * Add a flag to tunnel metadata indicating that the |
| 1072 | * packet should not be fragmented. |
| 1073 | * **BPF_F_SEQ_NUMBER** |
| 1074 | * Add a flag to tunnel metadata indicating that a |
| 1075 | * sequence number should be added to tunnel header before |
| 1076 | * sending the packet. This flag was added for GRE |
| 1077 | * encapsulation, but might be used with other protocols |
| 1078 | * as well in the future. |
| 1079 | * |
| 1080 | * Here is a typical usage on the transmit path: |
| 1081 | * |
| 1082 | * :: |
| 1083 | * |
| 1084 | * struct bpf_tunnel_key key; |
| 1085 | * populate key ... |
| 1086 | * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); |
| 1087 | * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); |
| 1088 | * |
| 1089 | * See also the description of the **bpf_skb_get_tunnel_key**\ () |
| 1090 | * helper for additional information. |
| 1091 | * Return |
| 1092 | * 0 on success, or a negative error in case of failure. |
| 1093 | * |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1094 | * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) |
| 1095 | * Description |
| 1096 | * Read the value of a perf event counter. This helper relies on a |
| 1097 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of |
| 1098 | * the perf event counter is selected when *map* is updated with |
| 1099 | * perf event file descriptors. The *map* is an array whose size |
| 1100 | * is the number of available CPUs, and each cell contains a value |
| 1101 | * relative to one CPU. The value to retrieve is indicated by |
| 1102 | * *flags*, that contains the index of the CPU to look up, masked |
| 1103 | * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to |
| 1104 | * **BPF_F_CURRENT_CPU** to indicate that the value for the |
| 1105 | * current CPU should be retrieved. |
| 1106 | * |
| 1107 | * Note that before Linux 4.13, only hardware perf event can be |
| 1108 | * retrieved. |
| 1109 | * |
| 1110 | * Also, be aware that the newer helper |
| 1111 | * **bpf_perf_event_read_value**\ () is recommended over |
Quentin Monnet | 3bd5a09 | 2018-04-30 11:39:03 +0100 | [diff] [blame] | 1112 | * **bpf_perf_event_read**\ () in general. The latter has some ABI |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1113 | * quirks where error and counter value are used as a return code |
| 1114 | * (which is wrong to do since ranges may overlap). This issue is |
Quentin Monnet | 3bd5a09 | 2018-04-30 11:39:03 +0100 | [diff] [blame] | 1115 | * fixed with **bpf_perf_event_read_value**\ (), which at the same |
| 1116 | * time provides more features over the **bpf_perf_event_read**\ |
| 1117 | * () interface. Please refer to the description of |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1118 | * **bpf_perf_event_read_value**\ () for details. |
| 1119 | * Return |
| 1120 | * The value of the perf event counter read from the map, or a |
| 1121 | * negative error code in case of failure. |
| 1122 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1123 | * long bpf_redirect(u32 ifindex, u64 flags) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1124 | * Description |
| 1125 | * Redirect the packet to another net device of index *ifindex*. |
| 1126 | * This helper is somewhat similar to **bpf_clone_redirect**\ |
| 1127 | * (), except that the packet is not cloned, which provides |
| 1128 | * increased performance. |
| 1129 | * |
| 1130 | * Except for XDP, both ingress and egress interfaces can be used |
| 1131 | * for redirection. The **BPF_F_INGRESS** value in *flags* is used |
| 1132 | * to make the distinction (ingress path is selected if the flag |
| 1133 | * is present, egress path otherwise). Currently, XDP only |
| 1134 | * supports redirection to the egress interface, and accepts no |
| 1135 | * flag at all. |
| 1136 | * |
Toke Høiland-Jørgensen | f25975f | 2020-02-18 14:03:34 +0100 | [diff] [blame] | 1137 | * The same effect can also be attained with the more generic |
| 1138 | * **bpf_redirect_map**\ (), which uses a BPF map to store the |
| 1139 | * redirect target instead of providing it directly to the helper. |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1140 | * Return |
| 1141 | * For XDP, the helper returns **XDP_REDIRECT** on success or |
| 1142 | * **XDP_ABORTED** on error. For other program types, the values |
| 1143 | * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on |
| 1144 | * error. |
| 1145 | * |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1146 | * u32 bpf_get_route_realm(struct sk_buff *skb) |
| 1147 | * Description |
| 1148 | * Retrieve the realm or the route, that is to say the |
| 1149 | * **tclassid** field of the destination for the *skb*. The |
Tobias Klauser | b16fc09 | 2020-08-21 15:36:42 +0200 | [diff] [blame] | 1150 | * identifier retrieved is a user-provided tag, similar to the |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1151 | * one used with the net_cls cgroup (see description for |
| 1152 | * **bpf_get_cgroup_classid**\ () helper), but here this tag is |
| 1153 | * held by a route (a destination entry), not by a task. |
| 1154 | * |
| 1155 | * Retrieving this identifier works with the clsact TC egress hook |
| 1156 | * (see also **tc-bpf(8)**), or alternatively on conventional |
| 1157 | * classful egress qdiscs, but not on TC ingress path. In case of |
| 1158 | * clsact TC egress hook, this has the advantage that, internally, |
| 1159 | * the destination entry has not been dropped yet in the transmit |
| 1160 | * path. Therefore, the destination entry does not need to be |
| 1161 | * artificially held via **netif_keep_dst**\ () for a classful |
| 1162 | * qdisc until the *skb* is freed. |
| 1163 | * |
| 1164 | * This helper is available only if the kernel was compiled with |
| 1165 | * **CONFIG_IP_ROUTE_CLASSID** configuration option. |
| 1166 | * Return |
| 1167 | * The realm of the route for the packet associated to *skb*, or 0 |
| 1168 | * if none was found. |
| 1169 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1170 | * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1171 | * Description |
| 1172 | * Write raw *data* blob into a special BPF perf event held by |
| 1173 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf |
| 1174 | * event must have the following attributes: **PERF_SAMPLE_RAW** |
| 1175 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and |
| 1176 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. |
| 1177 | * |
| 1178 | * The *flags* are used to indicate the index in *map* for which |
| 1179 | * the value must be put, masked with **BPF_F_INDEX_MASK**. |
| 1180 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** |
| 1181 | * to indicate that the index of the current CPU core should be |
| 1182 | * used. |
| 1183 | * |
| 1184 | * The value to write, of *size*, is passed through eBPF stack and |
| 1185 | * pointed by *data*. |
| 1186 | * |
| 1187 | * The context of the program *ctx* needs also be passed to the |
| 1188 | * helper. |
| 1189 | * |
| 1190 | * On user space, a program willing to read the values needs to |
| 1191 | * call **perf_event_open**\ () on the perf event (either for |
| 1192 | * one or for all CPUs) and to store the file descriptor into the |
| 1193 | * *map*. This must be done before the eBPF program can send data |
| 1194 | * into it. An example is available in file |
| 1195 | * *samples/bpf/trace_output_user.c* in the Linux kernel source |
| 1196 | * tree (the eBPF program counterpart is in |
| 1197 | * *samples/bpf/trace_output_kern.c*). |
| 1198 | * |
| 1199 | * **bpf_perf_event_output**\ () achieves better performance |
| 1200 | * than **bpf_trace_printk**\ () for sharing data with user |
| 1201 | * space, and is much better suitable for streaming data from eBPF |
| 1202 | * programs. |
| 1203 | * |
| 1204 | * Note that this helper is not restricted to tracing use cases |
| 1205 | * and can be used with programs attached to TC or XDP as well, |
| 1206 | * where it allows for passing data to user space listeners. Data |
| 1207 | * can be: |
| 1208 | * |
| 1209 | * * Only custom structs, |
| 1210 | * * Only the packet payload, or |
| 1211 | * * A combination of both. |
| 1212 | * Return |
| 1213 | * 0 on success, or a negative error in case of failure. |
| 1214 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1215 | * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1216 | * Description |
| 1217 | * This helper was provided as an easy way to load data from a |
| 1218 | * packet. It can be used to load *len* bytes from *offset* from |
| 1219 | * the packet associated to *skb*, into the buffer pointed by |
| 1220 | * *to*. |
| 1221 | * |
| 1222 | * Since Linux 4.7, usage of this helper has mostly been replaced |
| 1223 | * by "direct packet access", enabling packet data to be |
| 1224 | * manipulated with *skb*\ **->data** and *skb*\ **->data_end** |
| 1225 | * pointing respectively to the first byte of packet data and to |
| 1226 | * the byte after the last byte of packet data. However, it |
| 1227 | * remains useful if one wishes to read large quantities of data |
| 1228 | * at once from a packet into the eBPF stack. |
| 1229 | * Return |
| 1230 | * 0 on success, or a negative error in case of failure. |
| 1231 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1232 | * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1233 | * Description |
| 1234 | * Walk a user or a kernel stack and return its id. To achieve |
| 1235 | * this, the helper needs *ctx*, which is a pointer to the context |
| 1236 | * on which the tracing program is executed, and a pointer to a |
| 1237 | * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. |
| 1238 | * |
| 1239 | * The last argument, *flags*, holds the number of stack frames to |
| 1240 | * skip (from 0 to 255), masked with |
| 1241 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set |
| 1242 | * a combination of the following flags: |
| 1243 | * |
| 1244 | * **BPF_F_USER_STACK** |
| 1245 | * Collect a user space stack instead of a kernel stack. |
| 1246 | * **BPF_F_FAST_STACK_CMP** |
| 1247 | * Compare stacks by hash only. |
| 1248 | * **BPF_F_REUSE_STACKID** |
| 1249 | * If two different stacks hash into the same *stackid*, |
| 1250 | * discard the old one. |
| 1251 | * |
| 1252 | * The stack id retrieved is a 32 bit long integer handle which |
| 1253 | * can be further combined with other data (including other stack |
| 1254 | * ids) and used as a key into maps. This can be useful for |
| 1255 | * generating a variety of graphs (such as flame graphs or off-cpu |
| 1256 | * graphs). |
| 1257 | * |
| 1258 | * For walking a stack, this helper is an improvement over |
| 1259 | * **bpf_probe_read**\ (), which can be used with unrolled loops |
| 1260 | * but is not efficient and consumes a lot of eBPF instructions. |
| 1261 | * Instead, **bpf_get_stackid**\ () can collect up to |
| 1262 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that |
| 1263 | * this limit can be controlled with the **sysctl** program, and |
| 1264 | * that it should be manually increased in order to profile long |
| 1265 | * user stacks (such as stacks for Java programs). To do so, use: |
| 1266 | * |
| 1267 | * :: |
| 1268 | * |
| 1269 | * # sysctl kernel.perf_event_max_stack=<new value> |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1270 | * Return |
| 1271 | * The positive or null stack id on success, or a negative error |
| 1272 | * in case of failure. |
| 1273 | * |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1274 | * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) |
| 1275 | * Description |
| 1276 | * Compute a checksum difference, from the raw buffer pointed by |
| 1277 | * *from*, of length *from_size* (that must be a multiple of 4), |
| 1278 | * towards the raw buffer pointed by *to*, of size *to_size* |
| 1279 | * (same remark). An optional *seed* can be added to the value |
| 1280 | * (this can be cascaded, the seed may come from a previous call |
| 1281 | * to the helper). |
| 1282 | * |
| 1283 | * This is flexible enough to be used in several ways: |
| 1284 | * |
| 1285 | * * With *from_size* == 0, *to_size* > 0 and *seed* set to |
| 1286 | * checksum, it can be used when pushing new data. |
| 1287 | * * With *from_size* > 0, *to_size* == 0 and *seed* set to |
| 1288 | * checksum, it can be used when removing data from a packet. |
| 1289 | * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it |
| 1290 | * can be used to compute a diff. Note that *from_size* and |
| 1291 | * *to_size* do not need to be equal. |
| 1292 | * |
| 1293 | * This helper can be used in combination with |
| 1294 | * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to |
| 1295 | * which one can feed in the difference computed with |
| 1296 | * **bpf_csum_diff**\ (). |
| 1297 | * Return |
| 1298 | * The checksum result, or a negative error code in case of |
| 1299 | * failure. |
| 1300 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1301 | * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1302 | * Description |
| 1303 | * Retrieve tunnel options metadata for the packet associated to |
| 1304 | * *skb*, and store the raw tunnel option data to the buffer *opt* |
| 1305 | * of *size*. |
| 1306 | * |
| 1307 | * This helper can be used with encapsulation devices that can |
| 1308 | * operate in "collect metadata" mode (please refer to the related |
| 1309 | * note in the description of **bpf_skb_get_tunnel_key**\ () for |
| 1310 | * more details). A particular example where this can be used is |
| 1311 | * in combination with the Geneve encapsulation protocol, where it |
| 1312 | * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) |
| 1313 | * and retrieving arbitrary TLVs (Type-Length-Value headers) from |
| 1314 | * the eBPF program. This allows for full customization of these |
| 1315 | * headers. |
| 1316 | * Return |
| 1317 | * The size of the option data retrieved. |
| 1318 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1319 | * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1320 | * Description |
| 1321 | * Set tunnel options metadata for the packet associated to *skb* |
| 1322 | * to the option data contained in the raw buffer *opt* of *size*. |
| 1323 | * |
| 1324 | * See also the description of the **bpf_skb_get_tunnel_opt**\ () |
| 1325 | * helper for additional information. |
| 1326 | * Return |
| 1327 | * 0 on success, or a negative error in case of failure. |
| 1328 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1329 | * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1330 | * Description |
| 1331 | * Change the protocol of the *skb* to *proto*. Currently |
| 1332 | * supported are transition from IPv4 to IPv6, and from IPv6 to |
| 1333 | * IPv4. The helper takes care of the groundwork for the |
| 1334 | * transition, including resizing the socket buffer. The eBPF |
| 1335 | * program is expected to fill the new headers, if any, via |
| 1336 | * **skb_store_bytes**\ () and to recompute the checksums with |
| 1337 | * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ |
| 1338 | * (). The main case for this helper is to perform NAT64 |
| 1339 | * operations out of an eBPF program. |
| 1340 | * |
| 1341 | * Internally, the GSO type is marked as dodgy so that headers are |
| 1342 | * checked and segments are recalculated by the GSO/GRO engine. |
| 1343 | * The size for GSO target is adapted as well. |
| 1344 | * |
| 1345 | * All values for *flags* are reserved for future usage, and must |
| 1346 | * be left at zero. |
| 1347 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1348 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1349 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1350 | * previously done by the verifier are invalidated and must be |
| 1351 | * performed again, if the helper is used in combination with |
| 1352 | * direct packet access. |
| 1353 | * Return |
| 1354 | * 0 on success, or a negative error in case of failure. |
| 1355 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1356 | * long bpf_skb_change_type(struct sk_buff *skb, u32 type) |
Quentin Monnet | 1fdd08b | 2018-04-25 18:16:55 +0100 | [diff] [blame] | 1357 | * Description |
| 1358 | * Change the packet type for the packet associated to *skb*. This |
| 1359 | * comes down to setting *skb*\ **->pkt_type** to *type*, except |
| 1360 | * the eBPF program does not have a write access to *skb*\ |
| 1361 | * **->pkt_type** beside this helper. Using a helper here allows |
| 1362 | * for graceful handling of errors. |
| 1363 | * |
| 1364 | * The major use case is to change incoming *skb*s to |
| 1365 | * **PACKET_HOST** in a programmatic way instead of having to |
| 1366 | * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for |
| 1367 | * example. |
| 1368 | * |
| 1369 | * Note that *type* only allows certain values. At this time, they |
| 1370 | * are: |
| 1371 | * |
| 1372 | * **PACKET_HOST** |
| 1373 | * Packet is for us. |
| 1374 | * **PACKET_BROADCAST** |
| 1375 | * Send packet to all. |
| 1376 | * **PACKET_MULTICAST** |
| 1377 | * Send packet to group. |
| 1378 | * **PACKET_OTHERHOST** |
| 1379 | * Send packet to someone else. |
| 1380 | * Return |
| 1381 | * 0 on success, or a negative error in case of failure. |
| 1382 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1383 | * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1384 | * Description |
| 1385 | * Check whether *skb* is a descendant of the cgroup2 held by |
| 1386 | * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. |
| 1387 | * Return |
| 1388 | * The return value depends on the result of the test, and can be: |
| 1389 | * |
| 1390 | * * 0, if the *skb* failed the cgroup2 descendant test. |
| 1391 | * * 1, if the *skb* succeeded the cgroup2 descendant test. |
| 1392 | * * A negative error code, if an error occurred. |
| 1393 | * |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1394 | * u32 bpf_get_hash_recalc(struct sk_buff *skb) |
| 1395 | * Description |
| 1396 | * Retrieve the hash of the packet, *skb*\ **->hash**. If it is |
| 1397 | * not set, in particular if the hash was cleared due to mangling, |
| 1398 | * recompute this hash. Later accesses to the hash can be done |
| 1399 | * directly with *skb*\ **->hash**. |
| 1400 | * |
| 1401 | * Calling **bpf_set_hash_invalid**\ (), changing a packet |
| 1402 | * prototype with **bpf_skb_change_proto**\ (), or calling |
| 1403 | * **bpf_skb_store_bytes**\ () with the |
| 1404 | * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear |
| 1405 | * the hash and to trigger a new computation for the next call to |
| 1406 | * **bpf_get_hash_recalc**\ (). |
| 1407 | * Return |
| 1408 | * The 32-bit hash. |
| 1409 | * |
Quentin Monnet | c456dec | 2018-04-25 18:16:54 +0100 | [diff] [blame] | 1410 | * u64 bpf_get_current_task(void) |
| 1411 | * Return |
| 1412 | * A pointer to the current task struct. |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1413 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1414 | * long bpf_probe_write_user(void *dst, const void *src, u32 len) |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1415 | * Description |
| 1416 | * Attempt in a safe way to write *len* bytes from the buffer |
| 1417 | * *src* to *dst* in memory. It only works for threads that are in |
| 1418 | * user context, and *dst* must be a valid user space address. |
| 1419 | * |
| 1420 | * This helper should not be used to implement any kind of |
| 1421 | * security mechanism because of TOC-TOU attacks, but rather to |
| 1422 | * debug, divert, and manipulate execution of semi-cooperative |
| 1423 | * processes. |
| 1424 | * |
| 1425 | * Keep in mind that this feature is meant for experiments, and it |
| 1426 | * has a risk of crashing the system and running programs. |
| 1427 | * Therefore, when an eBPF program using this helper is attached, |
| 1428 | * a warning including PID and process name is printed to kernel |
| 1429 | * logs. |
| 1430 | * Return |
| 1431 | * 0 on success, or a negative error in case of failure. |
| 1432 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1433 | * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1434 | * Description |
| 1435 | * Check whether the probe is being run is the context of a given |
| 1436 | * subset of the cgroup2 hierarchy. The cgroup2 to test is held by |
| 1437 | * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. |
| 1438 | * Return |
| 1439 | * The return value depends on the result of the test, and can be: |
| 1440 | * |
| 1441 | * * 0, if the *skb* task belongs to the cgroup2. |
| 1442 | * * 1, if the *skb* task does not belong to the cgroup2. |
| 1443 | * * A negative error code, if an error occurred. |
| 1444 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1445 | * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1446 | * Description |
| 1447 | * Resize (trim or grow) the packet associated to *skb* to the |
| 1448 | * new *len*. The *flags* are reserved for future usage, and must |
| 1449 | * be left at zero. |
| 1450 | * |
| 1451 | * The basic idea is that the helper performs the needed work to |
| 1452 | * change the size of the packet, then the eBPF program rewrites |
| 1453 | * the rest via helpers like **bpf_skb_store_bytes**\ (), |
| 1454 | * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () |
| 1455 | * and others. This helper is a slow path utility intended for |
| 1456 | * replies with control messages. And because it is targeted for |
| 1457 | * slow path, the helper itself can afford to be slow: it |
| 1458 | * implicitly linearizes, unclones and drops offloads from the |
| 1459 | * *skb*. |
| 1460 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1461 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1462 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1463 | * previously done by the verifier are invalidated and must be |
| 1464 | * performed again, if the helper is used in combination with |
| 1465 | * direct packet access. |
| 1466 | * Return |
| 1467 | * 0 on success, or a negative error in case of failure. |
| 1468 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1469 | * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1470 | * Description |
| 1471 | * Pull in non-linear data in case the *skb* is non-linear and not |
| 1472 | * all of *len* are part of the linear section. Make *len* bytes |
| 1473 | * from *skb* readable and writable. If a zero value is passed for |
| 1474 | * *len*, then the whole length of the *skb* is pulled. |
| 1475 | * |
| 1476 | * This helper is only needed for reading and writing with direct |
| 1477 | * packet access. |
| 1478 | * |
| 1479 | * For direct packet access, testing that offsets to access |
| 1480 | * are within packet boundaries (test on *skb*\ **->data_end**) is |
| 1481 | * susceptible to fail if offsets are invalid, or if the requested |
| 1482 | * data is in non-linear parts of the *skb*. On failure the |
| 1483 | * program can just bail out, or in the case of a non-linear |
| 1484 | * buffer, use a helper to make the data available. The |
| 1485 | * **bpf_skb_load_bytes**\ () helper is a first solution to access |
| 1486 | * the data. Another one consists in using **bpf_skb_pull_data** |
| 1487 | * to pull in once the non-linear parts, then retesting and |
| 1488 | * eventually access the data. |
| 1489 | * |
| 1490 | * At the same time, this also makes sure the *skb* is uncloned, |
| 1491 | * which is a necessary condition for direct write. As this needs |
| 1492 | * to be an invariant for the write part only, the verifier |
| 1493 | * detects writes and adds a prologue that is calling |
| 1494 | * **bpf_skb_pull_data()** to effectively unclone the *skb* from |
| 1495 | * the very beginning in case it is indeed cloned. |
| 1496 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1497 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1498 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1499 | * previously done by the verifier are invalidated and must be |
| 1500 | * performed again, if the helper is used in combination with |
| 1501 | * direct packet access. |
| 1502 | * Return |
| 1503 | * 0 on success, or a negative error in case of failure. |
| 1504 | * |
| 1505 | * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) |
| 1506 | * Description |
| 1507 | * Add the checksum *csum* into *skb*\ **->csum** in case the |
| 1508 | * driver has supplied a checksum for the entire packet into that |
| 1509 | * field. Return an error otherwise. This helper is intended to be |
| 1510 | * used in combination with **bpf_csum_diff**\ (), in particular |
| 1511 | * when the checksum needs to be updated after data has been |
| 1512 | * written into the packet through direct packet access. |
| 1513 | * Return |
| 1514 | * The checksum on success, or a negative error code in case of |
| 1515 | * failure. |
| 1516 | * |
| 1517 | * void bpf_set_hash_invalid(struct sk_buff *skb) |
| 1518 | * Description |
| 1519 | * Invalidate the current *skb*\ **->hash**. It can be used after |
| 1520 | * mangling on headers through direct packet access, in order to |
| 1521 | * indicate that the hash is outdated and to trigger a |
| 1522 | * recalculation the next time the kernel tries to access this |
| 1523 | * hash or when the **bpf_get_hash_recalc**\ () helper is called. |
| 1524 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1525 | * long bpf_get_numa_node_id(void) |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1526 | * Description |
| 1527 | * Return the id of the current NUMA node. The primary use case |
| 1528 | * for this helper is the selection of sockets for the local NUMA |
| 1529 | * node, when the program is attached to sockets using the |
| 1530 | * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), |
| 1531 | * but the helper is also available to other eBPF program types, |
| 1532 | * similarly to **bpf_get_smp_processor_id**\ (). |
| 1533 | * Return |
| 1534 | * The id of current NUMA node. |
| 1535 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1536 | * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1537 | * Description |
| 1538 | * Grows headroom of packet associated to *skb* and adjusts the |
| 1539 | * offset of the MAC header accordingly, adding *len* bytes of |
| 1540 | * space. It automatically extends and reallocates memory as |
| 1541 | * required. |
| 1542 | * |
| 1543 | * This helper can be used on a layer 3 *skb* to push a MAC header |
| 1544 | * for redirection into a layer 2 device. |
| 1545 | * |
| 1546 | * All values for *flags* are reserved for future usage, and must |
| 1547 | * be left at zero. |
| 1548 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1549 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1550 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1551 | * previously done by the verifier are invalidated and must be |
| 1552 | * performed again, if the helper is used in combination with |
| 1553 | * direct packet access. |
| 1554 | * Return |
| 1555 | * 0 on success, or a negative error in case of failure. |
| 1556 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1557 | * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1558 | * Description |
| 1559 | * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that |
| 1560 | * it is possible to use a negative value for *delta*. This helper |
| 1561 | * can be used to prepare the packet for pushing or popping |
| 1562 | * headers. |
| 1563 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1564 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1565 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1566 | * previously done by the verifier are invalidated and must be |
| 1567 | * performed again, if the helper is used in combination with |
| 1568 | * direct packet access. |
| 1569 | * Return |
| 1570 | * 0 on success, or a negative error in case of failure. |
| 1571 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1572 | * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1573 | * Description |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 1574 | * Copy a NUL terminated string from an unsafe kernel address |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1575 | * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 1576 | * more details. |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1577 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1578 | * Generally, use **bpf_probe_read_user_str**\ () or |
| 1579 | * **bpf_probe_read_kernel_str**\ () instead. |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1580 | * Return |
| 1581 | * On success, the strictly positive length of the string, |
| 1582 | * including the trailing NUL character. On error, a negative |
| 1583 | * value. |
| 1584 | * |
| 1585 | * u64 bpf_get_socket_cookie(struct sk_buff *skb) |
| 1586 | * Description |
| 1587 | * If the **struct sk_buff** pointed by *skb* has a known socket, |
| 1588 | * retrieve the cookie (generated by the kernel) of this socket. |
| 1589 | * If no cookie has been set yet, generate a new cookie. Once |
| 1590 | * generated, the socket cookie remains stable for the life of the |
| 1591 | * socket. This helper can be useful for monitoring per socket |
Daniel Borkmann | cd48bdd | 2019-08-08 13:57:25 +0200 | [diff] [blame] | 1592 | * networking traffic statistics as it provides a global socket |
| 1593 | * identifier that can be assumed unique. |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1594 | * Return |
| 1595 | * A 8-byte long non-decreasing number on success, or 0 if the |
| 1596 | * socket field is missing inside *skb*. |
| 1597 | * |
Andrey Ignatov | d692f11 | 2018-07-30 17:42:28 -0700 | [diff] [blame] | 1598 | * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) |
| 1599 | * Description |
| 1600 | * Equivalent to bpf_get_socket_cookie() helper that accepts |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 1601 | * *skb*, but gets socket from **struct bpf_sock_addr** context. |
Andrey Ignatov | d692f11 | 2018-07-30 17:42:28 -0700 | [diff] [blame] | 1602 | * Return |
| 1603 | * A 8-byte long non-decreasing number. |
| 1604 | * |
| 1605 | * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) |
| 1606 | * Description |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1607 | * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 1608 | * *skb*, but gets socket from **struct bpf_sock_ops** context. |
Andrey Ignatov | d692f11 | 2018-07-30 17:42:28 -0700 | [diff] [blame] | 1609 | * Return |
| 1610 | * A 8-byte long non-decreasing number. |
| 1611 | * |
Quentin Monnet | c6b5fb8 | 2018-04-25 18:16:57 +0100 | [diff] [blame] | 1612 | * u32 bpf_get_socket_uid(struct sk_buff *skb) |
| 1613 | * Return |
| 1614 | * The owner UID of the socket associated to *skb*. If the socket |
| 1615 | * is **NULL**, or if it is not a full socket (i.e. if it is a |
| 1616 | * time-wait or a request socket instead), **overflowuid** value |
| 1617 | * is returned (note that **overflowuid** might also be the actual |
| 1618 | * UID value for the socket). |
| 1619 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1620 | * long bpf_set_hash(struct sk_buff *skb, u32 hash) |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1621 | * Description |
| 1622 | * Set the full hash for *skb* (set the field *skb*\ **->hash**) |
| 1623 | * to value *hash*. |
| 1624 | * Return |
| 1625 | * 0 |
| 1626 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1627 | * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1628 | * Description |
| 1629 | * Emulate a call to **setsockopt()** on the socket associated to |
| 1630 | * *bpf_socket*, which must be a full socket. The *level* at |
| 1631 | * which the option resides and the name *optname* of the option |
| 1632 | * must be specified, see **setsockopt(2)** for more information. |
| 1633 | * The option value of length *optlen* is pointed by *optval*. |
| 1634 | * |
Stanislav Fomichev | beecf11 | 2020-04-30 16:31:52 -0700 | [diff] [blame] | 1635 | * *bpf_socket* should be one of the following: |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1636 | * |
Stanislav Fomichev | beecf11 | 2020-04-30 16:31:52 -0700 | [diff] [blame] | 1637 | * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. |
| 1638 | * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** |
| 1639 | * and **BPF_CGROUP_INET6_CONNECT**. |
| 1640 | * |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1641 | * This helper actually implements a subset of **setsockopt()**. |
| 1642 | * It supports the following *level*\ s: |
| 1643 | * |
| 1644 | * * **SOL_SOCKET**, which supports the following *optname*\ s: |
| 1645 | * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, |
Dmitry Yakunin | f9bcf96 | 2020-06-20 18:30:52 +0300 | [diff] [blame] | 1646 | * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, |
| 1647 | * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1648 | * * **IPPROTO_TCP**, which supports the following *optname*\ s: |
| 1649 | * **TCP_CONGESTION**, **TCP_BPF_IW**, |
Dmitry Yakunin | f9bcf96 | 2020-06-20 18:30:52 +0300 | [diff] [blame] | 1650 | * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, |
| 1651 | * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, |
| 1652 | * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1653 | * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. |
| 1654 | * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. |
| 1655 | * Return |
| 1656 | * 0 on success, or a negative error in case of failure. |
| 1657 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1658 | * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1659 | * Description |
| 1660 | * Grow or shrink the room for data in the packet associated to |
| 1661 | * *skb* by *len_diff*, and according to the selected *mode*. |
| 1662 | * |
Daniel Borkmann | 836e66c | 2020-06-02 16:58:32 +0200 | [diff] [blame] | 1663 | * By default, the helper will reset any offloaded checksum |
| 1664 | * indicator of the skb to CHECKSUM_NONE. This can be avoided |
| 1665 | * by the following flag: |
| 1666 | * |
| 1667 | * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded |
| 1668 | * checksum data of the skb to CHECKSUM_NONE. |
| 1669 | * |
Willem de Bruijn | 14aa319 | 2019-03-22 14:32:54 -0400 | [diff] [blame] | 1670 | * There are two supported modes at this time: |
| 1671 | * |
| 1672 | * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer |
| 1673 | * (room space is added or removed below the layer 2 header). |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1674 | * |
| 1675 | * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer |
| 1676 | * (room space is added or removed below the layer 3 header). |
| 1677 | * |
Willem de Bruijn | 868d523 | 2019-03-22 14:32:56 -0400 | [diff] [blame] | 1678 | * The following flags are supported at this time: |
Willem de Bruijn | 2278f6c | 2019-03-22 14:32:55 -0400 | [diff] [blame] | 1679 | * |
| 1680 | * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. |
| 1681 | * Adjusting mss in this way is not allowed for datagrams. |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1682 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 1683 | * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, |
| 1684 | * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: |
Willem de Bruijn | 868d523 | 2019-03-22 14:32:56 -0400 | [diff] [blame] | 1685 | * Any new space is reserved to hold a tunnel header. |
| 1686 | * Configure skb offsets and other fields accordingly. |
| 1687 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 1688 | * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, |
| 1689 | * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: |
Willem de Bruijn | 868d523 | 2019-03-22 14:32:56 -0400 | [diff] [blame] | 1690 | * Use with ENCAP_L3 flags to further specify the tunnel type. |
| 1691 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 1692 | * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): |
Alan Maguire | 58dfc90 | 2019-04-09 15:06:41 +0100 | [diff] [blame] | 1693 | * Use with ENCAP_L3/L4 flags to further specify the tunnel |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 1694 | * type; *len* is the length of the inner MAC header. |
Alan Maguire | 58dfc90 | 2019-04-09 15:06:41 +0100 | [diff] [blame] | 1695 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1696 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1697 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1698 | * previously done by the verifier are invalidated and must be |
| 1699 | * performed again, if the helper is used in combination with |
| 1700 | * direct packet access. |
| 1701 | * Return |
| 1702 | * 0 on success, or a negative error in case of failure. |
| 1703 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1704 | * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1705 | * Description |
| 1706 | * Redirect the packet to the endpoint referenced by *map* at |
| 1707 | * index *key*. Depending on its type, this *map* can contain |
| 1708 | * references to net devices (for forwarding packets through other |
| 1709 | * ports), or to CPUs (for redirecting XDP frames to another CPU; |
| 1710 | * but this is only implemented for native XDP (with driver |
| 1711 | * support) as of this writing). |
| 1712 | * |
Toke Høiland-Jørgensen | 43e74c0 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 1713 | * The lower two bits of *flags* are used as the return code if |
| 1714 | * the map lookup fails. This is so that the return value can be |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1715 | * one of the XDP program return codes up to **XDP_TX**, as chosen |
| 1716 | * by the caller. Any higher bits in the *flags* argument must be |
Toke Høiland-Jørgensen | 43e74c0 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 1717 | * unset. |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1718 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1719 | * See also **bpf_redirect**\ (), which only supports redirecting |
| 1720 | * to an ifindex, but doesn't require a map to do so. |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1721 | * Return |
Toke Høiland-Jørgensen | f25975f | 2020-02-18 14:03:34 +0100 | [diff] [blame] | 1722 | * **XDP_REDIRECT** on success, or the value of the two lower bits |
Jakub Wilk | a33d314 | 2020-04-22 10:23:24 +0200 | [diff] [blame] | 1723 | * of the *flags* argument on error. |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1724 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1725 | * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1726 | * Description |
| 1727 | * Redirect the packet to the socket referenced by *map* (of type |
| 1728 | * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and |
| 1729 | * egress interfaces can be used for redirection. The |
| 1730 | * **BPF_F_INGRESS** value in *flags* is used to make the |
| 1731 | * distinction (ingress path is selected if the flag is present, |
| 1732 | * egress path otherwise). This is the only flag supported for now. |
| 1733 | * Return |
| 1734 | * **SK_PASS** on success, or **SK_DROP** on error. |
| 1735 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1736 | * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1737 | * Description |
| 1738 | * Add an entry to, or update a *map* referencing sockets. The |
| 1739 | * *skops* is used as a new value for the entry associated to |
| 1740 | * *key*. *flags* is one of: |
| 1741 | * |
| 1742 | * **BPF_NOEXIST** |
| 1743 | * The entry for *key* must not exist in the map. |
| 1744 | * **BPF_EXIST** |
| 1745 | * The entry for *key* must already exist in the map. |
| 1746 | * **BPF_ANY** |
| 1747 | * No condition on the existence of the entry for *key*. |
| 1748 | * |
| 1749 | * If the *map* has eBPF programs (parser and verdict), those will |
| 1750 | * be inherited by the socket being added. If the socket is |
| 1751 | * already attached to eBPF programs, this results in an error. |
| 1752 | * Return |
| 1753 | * 0 on success, or a negative error in case of failure. |
| 1754 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1755 | * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1756 | * Description |
| 1757 | * Adjust the address pointed by *xdp_md*\ **->data_meta** by |
| 1758 | * *delta* (which can be positive or negative). Note that this |
| 1759 | * operation modifies the address stored in *xdp_md*\ **->data**, |
| 1760 | * so the latter must be loaded only after the helper has been |
| 1761 | * called. |
| 1762 | * |
| 1763 | * The use of *xdp_md*\ **->data_meta** is optional and programs |
| 1764 | * are not required to use it. The rationale is that when the |
| 1765 | * packet is processed with XDP (e.g. as DoS filter), it is |
| 1766 | * possible to push further meta data along with it before passing |
| 1767 | * to the stack, and to give the guarantee that an ingress eBPF |
| 1768 | * program attached as a TC classifier on the same device can pick |
| 1769 | * this up for further post-processing. Since TC works with socket |
| 1770 | * buffers, it remains possible to set from XDP the **mark** or |
| 1771 | * **priority** pointers, or other pointers for the socket buffer. |
| 1772 | * Having this scratch space generic and programmable allows for |
| 1773 | * more flexibility as the user is free to store whatever meta |
| 1774 | * data they need. |
| 1775 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 1776 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | fa15601 | 2018-04-25 18:16:56 +0100 | [diff] [blame] | 1777 | * packet buffer. Therefore, at load time, all checks on pointers |
| 1778 | * previously done by the verifier are invalidated and must be |
| 1779 | * performed again, if the helper is used in combination with |
| 1780 | * direct packet access. |
| 1781 | * Return |
| 1782 | * 0 on success, or a negative error in case of failure. |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1783 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1784 | * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1785 | * Description |
| 1786 | * Read the value of a perf event counter, and store it into *buf* |
| 1787 | * of size *buf_size*. This helper relies on a *map* of type |
| 1788 | * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event |
| 1789 | * counter is selected when *map* is updated with perf event file |
| 1790 | * descriptors. The *map* is an array whose size is the number of |
| 1791 | * available CPUs, and each cell contains a value relative to one |
| 1792 | * CPU. The value to retrieve is indicated by *flags*, that |
| 1793 | * contains the index of the CPU to look up, masked with |
| 1794 | * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to |
| 1795 | * **BPF_F_CURRENT_CPU** to indicate that the value for the |
| 1796 | * current CPU should be retrieved. |
| 1797 | * |
| 1798 | * This helper behaves in a way close to |
| 1799 | * **bpf_perf_event_read**\ () helper, save that instead of |
| 1800 | * just returning the value observed, it fills the *buf* |
| 1801 | * structure. This allows for additional data to be retrieved: in |
| 1802 | * particular, the enabled and running times (in *buf*\ |
| 1803 | * **->enabled** and *buf*\ **->running**, respectively) are |
| 1804 | * copied. In general, **bpf_perf_event_read_value**\ () is |
| 1805 | * recommended over **bpf_perf_event_read**\ (), which has some |
| 1806 | * ABI issues and provides fewer functionalities. |
| 1807 | * |
| 1808 | * These values are interesting, because hardware PMU (Performance |
| 1809 | * Monitoring Unit) counters are limited resources. When there are |
| 1810 | * more PMU based perf events opened than available counters, |
| 1811 | * kernel will multiplex these events so each event gets certain |
| 1812 | * percentage (but not all) of the PMU time. In case that |
| 1813 | * multiplexing happens, the number of samples or counter value |
| 1814 | * will not reflect the case compared to when no multiplexing |
| 1815 | * occurs. This makes comparison between different runs difficult. |
| 1816 | * Typically, the counter value should be normalized before |
| 1817 | * comparing to other experiments. The usual normalization is done |
| 1818 | * as follows. |
| 1819 | * |
| 1820 | * :: |
| 1821 | * |
| 1822 | * normalized_counter = counter * t_enabled / t_running |
| 1823 | * |
| 1824 | * Where t_enabled is the time enabled for event and t_running is |
| 1825 | * the time running for event since last normalization. The |
| 1826 | * enabled and running times are accumulated since the perf event |
| 1827 | * open. To achieve scaling factor between two invocations of an |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1828 | * eBPF program, users can use CPU id as the key (which is |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1829 | * typical for perf array usage model) to remember the previous |
| 1830 | * value and do the calculation inside the eBPF program. |
| 1831 | * Return |
| 1832 | * 0 on success, or a negative error in case of failure. |
| 1833 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1834 | * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1835 | * Description |
| 1836 | * For en eBPF program attached to a perf event, retrieve the |
| 1837 | * value of the event counter associated to *ctx* and store it in |
| 1838 | * the structure pointed by *buf* and of size *buf_size*. Enabled |
| 1839 | * and running times are also stored in the structure (see |
| 1840 | * description of helper **bpf_perf_event_read_value**\ () for |
| 1841 | * more details). |
| 1842 | * Return |
| 1843 | * 0 on success, or a negative error in case of failure. |
| 1844 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1845 | * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1846 | * Description |
| 1847 | * Emulate a call to **getsockopt()** on the socket associated to |
| 1848 | * *bpf_socket*, which must be a full socket. The *level* at |
| 1849 | * which the option resides and the name *optname* of the option |
| 1850 | * must be specified, see **getsockopt(2)** for more information. |
| 1851 | * The retrieved value is stored in the structure pointed by |
| 1852 | * *opval* and of length *optlen*. |
| 1853 | * |
Stanislav Fomichev | beecf11 | 2020-04-30 16:31:52 -0700 | [diff] [blame] | 1854 | * *bpf_socket* should be one of the following: |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1855 | * |
Stanislav Fomichev | beecf11 | 2020-04-30 16:31:52 -0700 | [diff] [blame] | 1856 | * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. |
| 1857 | * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** |
| 1858 | * and **BPF_CGROUP_INET6_CONNECT**. |
| 1859 | * |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1860 | * This helper actually implements a subset of **getsockopt()**. |
| 1861 | * It supports the following *level*\ s: |
| 1862 | * |
| 1863 | * * **IPPROTO_TCP**, which supports *optname* |
| 1864 | * **TCP_CONGESTION**. |
| 1865 | * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. |
| 1866 | * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. |
| 1867 | * Return |
| 1868 | * 0 on success, or a negative error in case of failure. |
| 1869 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1870 | * long bpf_override_return(struct pt_regs *regs, u64 rc) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1871 | * Description |
| 1872 | * Used for error injection, this helper uses kprobes to override |
| 1873 | * the return value of the probed function, and to set it to *rc*. |
| 1874 | * The first argument is the context *regs* on which the kprobe |
| 1875 | * works. |
| 1876 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 1877 | * This helper works by setting the PC (program counter) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1878 | * to an override function which is run in place of the original |
| 1879 | * probed function. This means the probed function is not run at |
| 1880 | * all. The replacement function just returns with the required |
| 1881 | * value. |
| 1882 | * |
| 1883 | * This helper has security implications, and thus is subject to |
| 1884 | * restrictions. It is only available if the kernel was compiled |
| 1885 | * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration |
| 1886 | * option, and in this case it only works on functions tagged with |
| 1887 | * **ALLOW_ERROR_INJECTION** in the kernel code. |
| 1888 | * |
| 1889 | * Also, the helper is only available for the architectures having |
| 1890 | * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, |
| 1891 | * x86 architecture is the only one to support this feature. |
| 1892 | * Return |
| 1893 | * 0 |
| 1894 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1895 | * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1896 | * Description |
| 1897 | * Attempt to set the value of the **bpf_sock_ops_cb_flags** field |
| 1898 | * for the full TCP socket associated to *bpf_sock_ops* to |
| 1899 | * *argval*. |
| 1900 | * |
| 1901 | * The primary use of this field is to determine if there should |
| 1902 | * be calls to eBPF programs of type |
| 1903 | * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP |
| 1904 | * code. A program of the same type can change its value, per |
| 1905 | * connection and as necessary, when the connection is |
| 1906 | * established. This field is directly accessible for reading, but |
| 1907 | * this helper must be used for updates in order to return an |
| 1908 | * error if an eBPF program tries to set a callback that is not |
| 1909 | * supported in the current kernel. |
| 1910 | * |
Viet Hoang Tran | 725721a | 2019-04-15 09:54:55 +0000 | [diff] [blame] | 1911 | * *argval* is a flag array which can combine these flags: |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1912 | * |
| 1913 | * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) |
| 1914 | * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) |
| 1915 | * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) |
Stanislav Fomichev | 23729ff | 2019-07-02 09:13:56 -0700 | [diff] [blame] | 1916 | * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1917 | * |
Viet Hoang Tran | 725721a | 2019-04-15 09:54:55 +0000 | [diff] [blame] | 1918 | * Therefore, this function can be used to clear a callback flag by |
| 1919 | * setting the appropriate bit to zero. e.g. to disable the RTO |
| 1920 | * callback: |
| 1921 | * |
| 1922 | * **bpf_sock_ops_cb_flags_set(bpf_sock,** |
| 1923 | * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** |
| 1924 | * |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 1925 | * Here are some examples of where one could call such eBPF |
| 1926 | * program: |
| 1927 | * |
| 1928 | * * When RTO fires. |
| 1929 | * * When a packet is retransmitted. |
| 1930 | * * When the connection terminates. |
| 1931 | * * When a packet is sent. |
| 1932 | * * When a packet is received. |
| 1933 | * Return |
| 1934 | * Code **-EINVAL** if the socket is not a full TCP socket; |
| 1935 | * otherwise, a positive number containing the bits that could not |
| 1936 | * be set is returned (which comes down to 0 if all bits were set |
| 1937 | * as required). |
| 1938 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1939 | * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1940 | * Description |
| 1941 | * This helper is used in programs implementing policies at the |
| 1942 | * socket level. If the message *msg* is allowed to pass (i.e. if |
| 1943 | * the verdict eBPF program returns **SK_PASS**), redirect it to |
| 1944 | * the socket referenced by *map* (of type |
| 1945 | * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and |
| 1946 | * egress interfaces can be used for redirection. The |
| 1947 | * **BPF_F_INGRESS** value in *flags* is used to make the |
| 1948 | * distinction (ingress path is selected if the flag is present, |
| 1949 | * egress path otherwise). This is the only flag supported for now. |
| 1950 | * Return |
| 1951 | * **SK_PASS** on success, or **SK_DROP** on error. |
| 1952 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1953 | * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1954 | * Description |
| 1955 | * For socket policies, apply the verdict of the eBPF program to |
| 1956 | * the next *bytes* (number of bytes) of message *msg*. |
| 1957 | * |
| 1958 | * For example, this helper can be used in the following cases: |
| 1959 | * |
| 1960 | * * A single **sendmsg**\ () or **sendfile**\ () system call |
| 1961 | * contains multiple logical messages that the eBPF program is |
| 1962 | * supposed to read and for which it should apply a verdict. |
| 1963 | * * An eBPF program only cares to read the first *bytes* of a |
| 1964 | * *msg*. If the message has a large payload, then setting up |
| 1965 | * and calling the eBPF program repeatedly for all bytes, even |
| 1966 | * though the verdict is already known, would create unnecessary |
| 1967 | * overhead. |
| 1968 | * |
| 1969 | * When called from within an eBPF program, the helper sets a |
| 1970 | * counter internal to the BPF infrastructure, that is used to |
| 1971 | * apply the last verdict to the next *bytes*. If *bytes* is |
| 1972 | * smaller than the current data being processed from a |
| 1973 | * **sendmsg**\ () or **sendfile**\ () system call, the first |
| 1974 | * *bytes* will be sent and the eBPF program will be re-run with |
| 1975 | * the pointer for start of data pointing to byte number *bytes* |
| 1976 | * **+ 1**. If *bytes* is larger than the current data being |
| 1977 | * processed, then the eBPF verdict will be applied to multiple |
| 1978 | * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are |
| 1979 | * consumed. |
| 1980 | * |
| 1981 | * Note that if a socket closes with the internal counter holding |
| 1982 | * a non-zero value, this is not a problem because data is not |
| 1983 | * being buffered for *bytes* and is sent as it is received. |
| 1984 | * Return |
| 1985 | * 0 |
| 1986 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 1987 | * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 1988 | * Description |
| 1989 | * For socket policies, prevent the execution of the verdict eBPF |
| 1990 | * program for message *msg* until *bytes* (byte number) have been |
| 1991 | * accumulated. |
| 1992 | * |
| 1993 | * This can be used when one needs a specific number of bytes |
| 1994 | * before a verdict can be assigned, even if the data spans |
| 1995 | * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme |
| 1996 | * case would be a user calling **sendmsg**\ () repeatedly with |
| 1997 | * 1-byte long message segments. Obviously, this is bad for |
| 1998 | * performance, but it is still valid. If the eBPF program needs |
| 1999 | * *bytes* bytes to validate a header, this helper can be used to |
| 2000 | * prevent the eBPF program to be called again until *bytes* have |
| 2001 | * been accumulated. |
| 2002 | * Return |
| 2003 | * 0 |
| 2004 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2005 | * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 2006 | * Description |
| 2007 | * For socket policies, pull in non-linear data from user space |
| 2008 | * for *msg* and set pointers *msg*\ **->data** and *msg*\ |
| 2009 | * **->data_end** to *start* and *end* bytes offsets into *msg*, |
| 2010 | * respectively. |
| 2011 | * |
| 2012 | * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a |
| 2013 | * *msg* it can only parse data that the (**data**, **data_end**) |
| 2014 | * pointers have already consumed. For **sendmsg**\ () hooks this |
| 2015 | * is likely the first scatterlist element. But for calls relying |
| 2016 | * on the **sendpage** handler (e.g. **sendfile**\ ()) this will |
| 2017 | * be the range (**0**, **0**) because the data is shared with |
| 2018 | * user space and by default the objective is to avoid allowing |
| 2019 | * user space to modify data while (or after) eBPF verdict is |
| 2020 | * being decided. This helper can be used to pull in data and to |
| 2021 | * set the start and end pointer to given values. Data will be |
| 2022 | * copied if necessary (i.e. if data was not linear and if start |
| 2023 | * and end pointers do not point to the same chunk). |
| 2024 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 2025 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | ab12704 | 2018-04-25 18:16:59 +0100 | [diff] [blame] | 2026 | * packet buffer. Therefore, at load time, all checks on pointers |
| 2027 | * previously done by the verifier are invalidated and must be |
| 2028 | * performed again, if the helper is used in combination with |
| 2029 | * direct packet access. |
| 2030 | * |
| 2031 | * All values for *flags* are reserved for future usage, and must |
| 2032 | * be left at zero. |
| 2033 | * Return |
| 2034 | * 0 on success, or a negative error in case of failure. |
| 2035 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2036 | * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 2037 | * Description |
| 2038 | * Bind the socket associated to *ctx* to the address pointed by |
| 2039 | * *addr*, of length *addr_len*. This allows for making outgoing |
| 2040 | * connection from the desired IP address, which can be useful for |
| 2041 | * example when all processes inside a cgroup should use one |
| 2042 | * single IP address on a host that has multiple IP configured. |
| 2043 | * |
| 2044 | * This helper works for IPv4 and IPv6, TCP and UDP sockets. The |
| 2045 | * domain (*addr*\ **->sa_family**) must be **AF_INET** (or |
Stanislav Fomichev | 8086fba | 2020-05-08 10:46:11 -0700 | [diff] [blame] | 2046 | * **AF_INET6**). It's advised to pass zero port (**sin_port** |
| 2047 | * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like |
| 2048 | * behavior and lets the kernel efficiently pick up an unused |
| 2049 | * port as long as 4-tuple is unique. Passing non-zero port might |
| 2050 | * lead to degraded performance. |
Quentin Monnet | 7aa79a8 | 2018-04-25 18:16:58 +0100 | [diff] [blame] | 2051 | * Return |
| 2052 | * 0 on success, or a negative error in case of failure. |
Quentin Monnet | 2d020dd | 2018-04-25 18:17:00 +0100 | [diff] [blame] | 2053 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2054 | * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) |
Quentin Monnet | 2d020dd | 2018-04-25 18:17:00 +0100 | [diff] [blame] | 2055 | * Description |
| 2056 | * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is |
Jesper Dangaard Brouer | c8741e2 | 2020-05-14 12:51:25 +0200 | [diff] [blame] | 2057 | * possible to both shrink and grow the packet tail. |
| 2058 | * Shrink done via *delta* being a negative integer. |
Quentin Monnet | 2d020dd | 2018-04-25 18:17:00 +0100 | [diff] [blame] | 2059 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 2060 | * A call to this helper is susceptible to change the underlying |
Quentin Monnet | 2d020dd | 2018-04-25 18:17:00 +0100 | [diff] [blame] | 2061 | * packet buffer. Therefore, at load time, all checks on pointers |
| 2062 | * previously done by the verifier are invalidated and must be |
| 2063 | * performed again, if the helper is used in combination with |
| 2064 | * direct packet access. |
| 2065 | * Return |
| 2066 | * 0 on success, or a negative error in case of failure. |
| 2067 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2068 | * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) |
Quentin Monnet | 2d020dd | 2018-04-25 18:17:00 +0100 | [diff] [blame] | 2069 | * Description |
| 2070 | * Retrieve the XFRM state (IP transform framework, see also |
| 2071 | * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. |
| 2072 | * |
| 2073 | * The retrieved value is stored in the **struct bpf_xfrm_state** |
| 2074 | * pointed by *xfrm_state* and of length *size*. |
| 2075 | * |
| 2076 | * All values for *flags* are reserved for future usage, and must |
| 2077 | * be left at zero. |
| 2078 | * |
| 2079 | * This helper is available only if the kernel was compiled with |
| 2080 | * **CONFIG_XFRM** configuration option. |
| 2081 | * Return |
| 2082 | * 0 on success, or a negative error in case of failure. |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2083 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2084 | * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2085 | * Description |
Quentin Monnet | 79552fb | 2018-04-30 11:39:04 +0100 | [diff] [blame] | 2086 | * Return a user or a kernel stack in bpf program provided buffer. |
| 2087 | * To achieve this, the helper needs *ctx*, which is a pointer |
| 2088 | * to the context on which the tracing program is executed. |
| 2089 | * To store the stacktrace, the bpf program provides *buf* with |
| 2090 | * a nonnegative *size*. |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2091 | * |
Quentin Monnet | 79552fb | 2018-04-30 11:39:04 +0100 | [diff] [blame] | 2092 | * The last argument, *flags*, holds the number of stack frames to |
| 2093 | * skip (from 0 to 255), masked with |
| 2094 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set |
| 2095 | * the following flags: |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2096 | * |
Quentin Monnet | 79552fb | 2018-04-30 11:39:04 +0100 | [diff] [blame] | 2097 | * **BPF_F_USER_STACK** |
| 2098 | * Collect a user space stack instead of a kernel stack. |
| 2099 | * **BPF_F_USER_BUILD_ID** |
| 2100 | * Collect buildid+offset instead of ips for user stack, |
| 2101 | * only valid if **BPF_F_USER_STACK** is also specified. |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2102 | * |
Quentin Monnet | 79552fb | 2018-04-30 11:39:04 +0100 | [diff] [blame] | 2103 | * **bpf_get_stack**\ () can collect up to |
| 2104 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject |
| 2105 | * to sufficient large buffer size. Note that |
| 2106 | * this limit can be controlled with the **sysctl** program, and |
| 2107 | * that it should be manually increased in order to profile long |
| 2108 | * user stacks (such as stacks for Java programs). To do so, use: |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2109 | * |
Quentin Monnet | 79552fb | 2018-04-30 11:39:04 +0100 | [diff] [blame] | 2110 | * :: |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2111 | * |
Quentin Monnet | 79552fb | 2018-04-30 11:39:04 +0100 | [diff] [blame] | 2112 | * # sysctl kernel.perf_event_max_stack=<new value> |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 2113 | * Return |
Quentin Monnet | 7a279e9 | 2018-05-29 12:27:44 +0100 | [diff] [blame] | 2114 | * A non-negative value equal to or less than *size* on success, |
| 2115 | * or a negative error in case of failure. |
Daniel Borkmann | 4e1ec56 | 2018-05-04 01:08:15 +0200 | [diff] [blame] | 2116 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2117 | * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) |
Daniel Borkmann | 4e1ec56 | 2018-05-04 01:08:15 +0200 | [diff] [blame] | 2118 | * Description |
| 2119 | * This helper is similar to **bpf_skb_load_bytes**\ () in that |
| 2120 | * it provides an easy way to load *len* bytes from *offset* |
| 2121 | * from the packet associated to *skb*, into the buffer pointed |
| 2122 | * by *to*. The difference to **bpf_skb_load_bytes**\ () is that |
| 2123 | * a fifth argument *start_header* exists in order to select a |
| 2124 | * base offset to start from. *start_header* can be one of: |
| 2125 | * |
| 2126 | * **BPF_HDR_START_MAC** |
| 2127 | * Base offset to load data from is *skb*'s mac header. |
| 2128 | * **BPF_HDR_START_NET** |
| 2129 | * Base offset to load data from is *skb*'s network header. |
| 2130 | * |
| 2131 | * In general, "direct packet access" is the preferred method to |
| 2132 | * access packet data, however, this helper is in particular useful |
| 2133 | * in socket filters where *skb*\ **->data** does not always point |
| 2134 | * to the start of the mac header and where "direct packet access" |
| 2135 | * is not available. |
Daniel Borkmann | 4e1ec56 | 2018-05-04 01:08:15 +0200 | [diff] [blame] | 2136 | * Return |
| 2137 | * 0 on success, or a negative error in case of failure. |
| 2138 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2139 | * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 2140 | * Description |
| 2141 | * Do FIB lookup in kernel tables using parameters in *params*. |
| 2142 | * If lookup is successful and result shows packet is to be |
| 2143 | * forwarded, the neighbor tables are searched for the nexthop. |
| 2144 | * If successful (ie., FIB lookup shows forwarding and nexthop |
David Ahern | fa898d7 | 2018-05-29 10:58:07 -0700 | [diff] [blame] | 2145 | * is resolved), the nexthop address is returned in ipv4_dst |
| 2146 | * or ipv6_dst based on family, smac is set to mac address of |
| 2147 | * egress device, dmac is set to nexthop mac address, rt_metric |
David Ahern | 4c79579 | 2018-06-26 16:21:18 -0700 | [diff] [blame] | 2148 | * is set to metric from route (IPv4/IPv6 only), and ifindex |
| 2149 | * is set to the device index of the nexthop from the FIB lookup. |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 2150 | * |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2151 | * *plen* argument is the size of the passed in struct. |
| 2152 | * *flags* argument can be a combination of one or more of the |
| 2153 | * following values: |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 2154 | * |
Quentin Monnet | 7a279e9 | 2018-05-29 12:27:44 +0100 | [diff] [blame] | 2155 | * **BPF_FIB_LOOKUP_DIRECT** |
| 2156 | * Do a direct table lookup vs full lookup using FIB |
| 2157 | * rules. |
| 2158 | * **BPF_FIB_LOOKUP_OUTPUT** |
| 2159 | * Perform lookup from an egress perspective (default is |
| 2160 | * ingress). |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 2161 | * |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2162 | * *ctx* is either **struct xdp_md** for XDP programs or |
| 2163 | * **struct sk_buff** tc cls_act programs. |
| 2164 | * Return |
David Ahern | 4c79579 | 2018-06-26 16:21:18 -0700 | [diff] [blame] | 2165 | * * < 0 if any input argument is invalid |
| 2166 | * * 0 on success (packet is forwarded, nexthop neighbor exists) |
| 2167 | * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the |
Quentin Monnet | 2bae79d | 2018-07-12 12:52:22 +0100 | [diff] [blame] | 2168 | * packet is not forwarded or needs assist from full stack |
John Fastabend | 8111038 | 2018-05-14 10:00:17 -0700 | [diff] [blame] | 2169 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2170 | * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) |
John Fastabend | 8111038 | 2018-05-14 10:00:17 -0700 | [diff] [blame] | 2171 | * Description |
| 2172 | * Add an entry to, or update a sockhash *map* referencing sockets. |
| 2173 | * The *skops* is used as a new value for the entry associated to |
| 2174 | * *key*. *flags* is one of: |
| 2175 | * |
| 2176 | * **BPF_NOEXIST** |
| 2177 | * The entry for *key* must not exist in the map. |
| 2178 | * **BPF_EXIST** |
| 2179 | * The entry for *key* must already exist in the map. |
| 2180 | * **BPF_ANY** |
| 2181 | * No condition on the existence of the entry for *key*. |
| 2182 | * |
| 2183 | * If the *map* has eBPF programs (parser and verdict), those will |
| 2184 | * be inherited by the socket being added. If the socket is |
| 2185 | * already attached to eBPF programs, this results in an error. |
| 2186 | * Return |
| 2187 | * 0 on success, or a negative error in case of failure. |
| 2188 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2189 | * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) |
John Fastabend | 8111038 | 2018-05-14 10:00:17 -0700 | [diff] [blame] | 2190 | * Description |
| 2191 | * This helper is used in programs implementing policies at the |
| 2192 | * socket level. If the message *msg* is allowed to pass (i.e. if |
| 2193 | * the verdict eBPF program returns **SK_PASS**), redirect it to |
| 2194 | * the socket referenced by *map* (of type |
| 2195 | * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and |
| 2196 | * egress interfaces can be used for redirection. The |
| 2197 | * **BPF_F_INGRESS** value in *flags* is used to make the |
| 2198 | * distinction (ingress path is selected if the flag is present, |
| 2199 | * egress path otherwise). This is the only flag supported for now. |
| 2200 | * Return |
| 2201 | * **SK_PASS** on success, or **SK_DROP** on error. |
| 2202 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2203 | * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) |
John Fastabend | 8111038 | 2018-05-14 10:00:17 -0700 | [diff] [blame] | 2204 | * Description |
| 2205 | * This helper is used in programs implementing policies at the |
| 2206 | * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. |
| 2207 | * if the verdeict eBPF program returns **SK_PASS**), redirect it |
| 2208 | * to the socket referenced by *map* (of type |
| 2209 | * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and |
| 2210 | * egress interfaces can be used for redirection. The |
| 2211 | * **BPF_F_INGRESS** value in *flags* is used to make the |
| 2212 | * distinction (ingress path is selected if the flag is present, |
| 2213 | * egress otherwise). This is the only flag supported for now. |
| 2214 | * Return |
| 2215 | * **SK_PASS** on success, or **SK_DROP** on error. |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2216 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2217 | * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2218 | * Description |
| 2219 | * Encapsulate the packet associated to *skb* within a Layer 3 |
| 2220 | * protocol header. This header is provided in the buffer at |
| 2221 | * address *hdr*, with *len* its size in bytes. *type* indicates |
| 2222 | * the protocol of the header and can be one of: |
| 2223 | * |
| 2224 | * **BPF_LWT_ENCAP_SEG6** |
| 2225 | * IPv6 encapsulation with Segment Routing Header |
| 2226 | * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, |
| 2227 | * the IPv6 header is computed by the kernel. |
| 2228 | * **BPF_LWT_ENCAP_SEG6_INLINE** |
| 2229 | * Only works if *skb* contains an IPv6 packet. Insert a |
| 2230 | * Segment Routing Header (**struct ipv6_sr_hdr**) inside |
| 2231 | * the IPv6 header. |
Peter Oskolkov | 3e0bd37 | 2019-02-13 11:53:35 -0800 | [diff] [blame] | 2232 | * **BPF_LWT_ENCAP_IP** |
| 2233 | * IP encapsulation (GRE/GUE/IPIP/etc). The outer header |
| 2234 | * must be IPv4 or IPv6, followed by zero or more |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2235 | * additional headers, up to **LWT_BPF_MAX_HEADROOM** |
| 2236 | * total bytes in all prepended headers. Please note that |
| 2237 | * if **skb_is_gso**\ (*skb*) is true, no more than two |
| 2238 | * headers can be prepended, and the inner header, if |
| 2239 | * present, should be either GRE or UDP/GUE. |
Peter Oskolkov | 3e0bd37 | 2019-02-13 11:53:35 -0800 | [diff] [blame] | 2240 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2241 | * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs |
| 2242 | * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can |
| 2243 | * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and |
| 2244 | * **BPF_PROG_TYPE_LWT_XMIT**. |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2245 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 2246 | * A call to this helper is susceptible to change the underlying |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2247 | * packet buffer. Therefore, at load time, all checks on pointers |
| 2248 | * previously done by the verifier are invalidated and must be |
| 2249 | * performed again, if the helper is used in combination with |
| 2250 | * direct packet access. |
| 2251 | * Return |
| 2252 | * 0 on success, or a negative error in case of failure. |
| 2253 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2254 | * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2255 | * Description |
| 2256 | * Store *len* bytes from address *from* into the packet |
| 2257 | * associated to *skb*, at *offset*. Only the flags, tag and TLVs |
| 2258 | * inside the outermost IPv6 Segment Routing Header can be |
| 2259 | * modified through this helper. |
| 2260 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 2261 | * A call to this helper is susceptible to change the underlying |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2262 | * packet buffer. Therefore, at load time, all checks on pointers |
| 2263 | * previously done by the verifier are invalidated and must be |
| 2264 | * performed again, if the helper is used in combination with |
| 2265 | * direct packet access. |
| 2266 | * Return |
| 2267 | * 0 on success, or a negative error in case of failure. |
| 2268 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2269 | * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2270 | * Description |
| 2271 | * Adjust the size allocated to TLVs in the outermost IPv6 |
| 2272 | * Segment Routing Header contained in the packet associated to |
| 2273 | * *skb*, at position *offset* by *delta* bytes. Only offsets |
| 2274 | * after the segments are accepted. *delta* can be as well |
| 2275 | * positive (growing) as negative (shrinking). |
| 2276 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 2277 | * A call to this helper is susceptible to change the underlying |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2278 | * packet buffer. Therefore, at load time, all checks on pointers |
| 2279 | * previously done by the verifier are invalidated and must be |
| 2280 | * performed again, if the helper is used in combination with |
| 2281 | * direct packet access. |
| 2282 | * Return |
| 2283 | * 0 on success, or a negative error in case of failure. |
| 2284 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2285 | * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2286 | * Description |
| 2287 | * Apply an IPv6 Segment Routing action of type *action* to the |
| 2288 | * packet associated to *skb*. Each action takes a parameter |
| 2289 | * contained at address *param*, and of length *param_len* bytes. |
| 2290 | * *action* can be one of: |
| 2291 | * |
| 2292 | * **SEG6_LOCAL_ACTION_END_X** |
| 2293 | * End.X action: Endpoint with Layer-3 cross-connect. |
| 2294 | * Type of *param*: **struct in6_addr**. |
| 2295 | * **SEG6_LOCAL_ACTION_END_T** |
| 2296 | * End.T action: Endpoint with specific IPv6 table lookup. |
| 2297 | * Type of *param*: **int**. |
| 2298 | * **SEG6_LOCAL_ACTION_END_B6** |
| 2299 | * End.B6 action: Endpoint bound to an SRv6 policy. |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2300 | * Type of *param*: **struct ipv6_sr_hdr**. |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2301 | * **SEG6_LOCAL_ACTION_END_B6_ENCAP** |
| 2302 | * End.B6.Encap action: Endpoint bound to an SRv6 |
| 2303 | * encapsulation policy. |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2304 | * Type of *param*: **struct ipv6_sr_hdr**. |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2305 | * |
Quentin Monnet | 32e7dc2 | 2019-05-10 15:51:23 +0100 | [diff] [blame] | 2306 | * A call to this helper is susceptible to change the underlying |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 2307 | * packet buffer. Therefore, at load time, all checks on pointers |
| 2308 | * previously done by the verifier are invalidated and must be |
| 2309 | * performed again, if the helper is used in combination with |
| 2310 | * direct packet access. |
| 2311 | * Return |
| 2312 | * 0 on success, or a negative error in case of failure. |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 2313 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2314 | * long bpf_rc_repeat(void *ctx) |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2315 | * Description |
| 2316 | * This helper is used in programs implementing IR decoding, to |
| 2317 | * report a successfully decoded repeat key message. This delays |
| 2318 | * the generation of a key up event for previously generated |
| 2319 | * key down event. |
| 2320 | * |
| 2321 | * Some IR protocols like NEC have a special IR message for |
| 2322 | * repeating last button, for when a button is held down. |
| 2323 | * |
| 2324 | * The *ctx* should point to the lirc sample as passed into |
| 2325 | * the program. |
| 2326 | * |
| 2327 | * This helper is only available is the kernel was compiled with |
| 2328 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2329 | * "**y**". |
| 2330 | * Return |
| 2331 | * 0 |
| 2332 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2333 | * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 2334 | * Description |
| 2335 | * This helper is used in programs implementing IR decoding, to |
| 2336 | * report a successfully decoded key press with *scancode*, |
| 2337 | * *toggle* value in the given *protocol*. The scancode will be |
| 2338 | * translated to a keycode using the rc keymap, and reported as |
| 2339 | * an input key down event. After a period a key up event is |
| 2340 | * generated. This period can be extended by calling either |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2341 | * **bpf_rc_keydown**\ () again with the same values, or calling |
| 2342 | * **bpf_rc_repeat**\ (). |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 2343 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 2344 | * Some protocols include a toggle bit, in case the button was |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 2345 | * released and pressed again between consecutive scancodes. |
| 2346 | * |
| 2347 | * The *ctx* should point to the lirc sample as passed into |
| 2348 | * the program. |
| 2349 | * |
| 2350 | * The *protocol* is the decoded protocol number (see |
| 2351 | * **enum rc_proto** for some predefined values). |
| 2352 | * |
| 2353 | * This helper is only available is the kernel was compiled with |
| 2354 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2355 | * "**y**". |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 2356 | * Return |
| 2357 | * 0 |
| 2358 | * |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2359 | * u64 bpf_skb_cgroup_id(struct sk_buff *skb) |
Daniel Borkmann | cb20b08 | 2018-06-02 23:06:36 +0200 | [diff] [blame] | 2360 | * Description |
| 2361 | * Return the cgroup v2 id of the socket associated with the *skb*. |
| 2362 | * This is roughly similar to the **bpf_get_cgroup_classid**\ () |
| 2363 | * helper for cgroup v1 by providing a tag resp. identifier that |
| 2364 | * can be matched on or used for map lookups e.g. to implement |
| 2365 | * policy. The cgroup v2 id of a given path in the hierarchy is |
| 2366 | * exposed in user space through the f_handle API in order to get |
| 2367 | * to the same 64-bit id. |
| 2368 | * |
| 2369 | * This helper can be used on TC egress path, but not on ingress, |
| 2370 | * and is available only if the kernel was compiled with the |
| 2371 | * **CONFIG_SOCK_CGROUP_DATA** configuration option. |
| 2372 | * Return |
| 2373 | * The id is returned or 0 in case the id could not be retrieved. |
Yonghong Song | bf6fa2c | 2018-06-03 15:59:41 -0700 | [diff] [blame] | 2374 | * |
| 2375 | * u64 bpf_get_current_cgroup_id(void) |
| 2376 | * Return |
| 2377 | * A 64-bit integer containing the current cgroup id based |
| 2378 | * on the cgroup within which the current task is running. |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 2379 | * |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2380 | * void *bpf_get_local_storage(void *map, u64 flags) |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 2381 | * Description |
| 2382 | * Get the pointer to the local storage area. |
| 2383 | * The type and the size of the local storage is defined |
| 2384 | * by the *map* argument. |
| 2385 | * The *flags* meaning is specific for each map type, |
| 2386 | * and has to be 0 for cgroup local storage. |
| 2387 | * |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2388 | * Depending on the BPF program type, a local storage area |
| 2389 | * can be shared between multiple instances of the BPF program, |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 2390 | * running simultaneously. |
| 2391 | * |
| 2392 | * A user should care about the synchronization by himself. |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2393 | * For example, by using the **BPF_STX_XADD** instruction to alter |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 2394 | * the shared data. |
| 2395 | * Return |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2396 | * A pointer to the local storage area. |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 2397 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2398 | * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 2399 | * Description |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2400 | * Select a **SO_REUSEPORT** socket from a |
| 2401 | * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. |
| 2402 | * It checks the selected socket is matching the incoming |
| 2403 | * request in the socket buffer. |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 2404 | * Return |
| 2405 | * 0 on success, or a negative error in case of failure. |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2406 | * |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2407 | * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) |
| 2408 | * Description |
| 2409 | * Return id of cgroup v2 that is ancestor of cgroup associated |
| 2410 | * with the *skb* at the *ancestor_level*. The root cgroup is at |
| 2411 | * *ancestor_level* zero and each step down the hierarchy |
| 2412 | * increments the level. If *ancestor_level* == level of cgroup |
| 2413 | * associated with *skb*, then return value will be same as that |
| 2414 | * of **bpf_skb_cgroup_id**\ (). |
| 2415 | * |
| 2416 | * The helper is useful to implement policies based on cgroups |
| 2417 | * that are upper in hierarchy than immediate cgroup associated |
| 2418 | * with *skb*. |
| 2419 | * |
| 2420 | * The format of returned id and helper limitations are same as in |
| 2421 | * **bpf_skb_cgroup_id**\ (). |
| 2422 | * Return |
| 2423 | * The id is returned or 0 in case the id could not be retrieved. |
| 2424 | * |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 2425 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2426 | * Description |
| 2427 | * Look for TCP socket matching *tuple*, optionally in a child |
| 2428 | * network namespace *netns*. The return value must be checked, |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2429 | * and if non-**NULL**, released via **bpf_sk_release**\ (). |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2430 | * |
| 2431 | * The *ctx* should point to the context of the program, such as |
| 2432 | * the skb or socket (depending on the hook in use). This is used |
| 2433 | * to determine the base network namespace for the lookup. |
| 2434 | * |
| 2435 | * *tuple_size* must be one of: |
| 2436 | * |
| 2437 | * **sizeof**\ (*tuple*\ **->ipv4**) |
| 2438 | * Look for an IPv4 socket. |
| 2439 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2440 | * Look for an IPv6 socket. |
| 2441 | * |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 2442 | * If the *netns* is a negative signed 32-bit integer, then the |
Randy Dunlap | bfdfa51 | 2020-07-15 18:29:11 -0700 | [diff] [blame] | 2443 | * socket lookup table in the netns associated with the *ctx* |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 2444 | * will be used. For the TC hooks, this is the netns of the device |
| 2445 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2446 | * If *netns* is any other signed 32-bit value greater than or |
| 2447 | * equal to zero then it specifies the ID of the netns relative to |
| 2448 | * the netns associated with the *ctx*. *netns* values beyond the |
| 2449 | * range of 32-bit integers are reserved for future use. |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2450 | * |
| 2451 | * All values for *flags* are reserved for future usage, and must |
| 2452 | * be left at zero. |
| 2453 | * |
| 2454 | * This helper is available only if the kernel was compiled with |
| 2455 | * **CONFIG_NET** configuration option. |
| 2456 | * Return |
Daniel Borkmann | 0bd7211 | 2018-12-11 10:26:33 +0100 | [diff] [blame] | 2457 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. |
| 2458 | * For sockets with reuseport option, the **struct bpf_sock** |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2459 | * result is from *reuse*\ **->socks**\ [] using the hash of the |
| 2460 | * tuple. |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2461 | * |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 2462 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2463 | * Description |
| 2464 | * Look for UDP socket matching *tuple*, optionally in a child |
| 2465 | * network namespace *netns*. The return value must be checked, |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2466 | * and if non-**NULL**, released via **bpf_sk_release**\ (). |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2467 | * |
| 2468 | * The *ctx* should point to the context of the program, such as |
| 2469 | * the skb or socket (depending on the hook in use). This is used |
| 2470 | * to determine the base network namespace for the lookup. |
| 2471 | * |
| 2472 | * *tuple_size* must be one of: |
| 2473 | * |
| 2474 | * **sizeof**\ (*tuple*\ **->ipv4**) |
| 2475 | * Look for an IPv4 socket. |
| 2476 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2477 | * Look for an IPv6 socket. |
| 2478 | * |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 2479 | * If the *netns* is a negative signed 32-bit integer, then the |
Randy Dunlap | bfdfa51 | 2020-07-15 18:29:11 -0700 | [diff] [blame] | 2480 | * socket lookup table in the netns associated with the *ctx* |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 2481 | * will be used. For the TC hooks, this is the netns of the device |
| 2482 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2483 | * If *netns* is any other signed 32-bit value greater than or |
| 2484 | * equal to zero then it specifies the ID of the netns relative to |
| 2485 | * the netns associated with the *ctx*. *netns* values beyond the |
| 2486 | * range of 32-bit integers are reserved for future use. |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2487 | * |
| 2488 | * All values for *flags* are reserved for future usage, and must |
| 2489 | * be left at zero. |
| 2490 | * |
| 2491 | * This helper is available only if the kernel was compiled with |
| 2492 | * **CONFIG_NET** configuration option. |
| 2493 | * Return |
Daniel Borkmann | 0bd7211 | 2018-12-11 10:26:33 +0100 | [diff] [blame] | 2494 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. |
| 2495 | * For sockets with reuseport option, the **struct bpf_sock** |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2496 | * result is from *reuse*\ **->socks**\ [] using the hash of the |
| 2497 | * tuple. |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2498 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2499 | * long bpf_sk_release(struct bpf_sock *sock) |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2500 | * Description |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2501 | * Release the reference held by *sock*. *sock* must be a |
| 2502 | * non-**NULL** pointer that was returned from |
| 2503 | * **bpf_sk_lookup_xxx**\ (). |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 2504 | * Return |
| 2505 | * 0 on success, or a negative error in case of failure. |
John Fastabend | 6fff607 | 2018-10-19 19:56:49 -0700 | [diff] [blame] | 2506 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2507 | * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2508 | * Description |
| 2509 | * Push an element *value* in *map*. *flags* is one of: |
| 2510 | * |
| 2511 | * **BPF_EXIST** |
| 2512 | * If the queue/stack is full, the oldest element is |
| 2513 | * removed to make room for this. |
| 2514 | * Return |
| 2515 | * 0 on success, or a negative error in case of failure. |
| 2516 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2517 | * long bpf_map_pop_elem(struct bpf_map *map, void *value) |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2518 | * Description |
| 2519 | * Pop an element from *map*. |
| 2520 | * Return |
| 2521 | * 0 on success, or a negative error in case of failure. |
| 2522 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2523 | * long bpf_map_peek_elem(struct bpf_map *map, void *value) |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2524 | * Description |
| 2525 | * Get an element from *map* without removing it. |
| 2526 | * Return |
| 2527 | * 0 on success, or a negative error in case of failure. |
| 2528 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2529 | * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) |
John Fastabend | 6fff607 | 2018-10-19 19:56:49 -0700 | [diff] [blame] | 2530 | * Description |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2531 | * For socket policies, insert *len* bytes into *msg* at offset |
John Fastabend | 6fff607 | 2018-10-19 19:56:49 -0700 | [diff] [blame] | 2532 | * *start*. |
| 2533 | * |
| 2534 | * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2535 | * *msg* it may want to insert metadata or options into the *msg*. |
John Fastabend | 6fff607 | 2018-10-19 19:56:49 -0700 | [diff] [blame] | 2536 | * This can later be read and used by any of the lower layer BPF |
| 2537 | * hooks. |
| 2538 | * |
| 2539 | * This helper may fail if under memory pressure (a malloc |
| 2540 | * fails) in these cases BPF programs will get an appropriate |
| 2541 | * error and BPF programs will need to handle them. |
John Fastabend | 6fff607 | 2018-10-19 19:56:49 -0700 | [diff] [blame] | 2542 | * Return |
| 2543 | * 0 on success, or a negative error in case of failure. |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 2544 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2545 | * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2546 | * Description |
Andrii Nakryiko | 5f0e541 | 2019-10-06 20:07:36 -0700 | [diff] [blame] | 2547 | * Will remove *len* bytes from a *msg* starting at byte *start*. |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 2548 | * This may result in **ENOMEM** errors under certain situations if |
| 2549 | * an allocation and copy are required due to a full ring buffer. |
| 2550 | * However, the helper will try to avoid doing the allocation |
| 2551 | * if possible. Other errors can occur if input parameters are |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2552 | * invalid either due to *start* byte not being valid part of *msg* |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 2553 | * payload and/or *pop* value being to large. |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 2554 | * Return |
Quentin Monnet | 90b1023 | 2018-12-03 12:13:35 +0000 | [diff] [blame] | 2555 | * 0 on success, or a negative error in case of failure. |
Sean Young | 01d3240 | 2018-12-06 13:01:03 +0000 | [diff] [blame] | 2556 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2557 | * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) |
Sean Young | 01d3240 | 2018-12-06 13:01:03 +0000 | [diff] [blame] | 2558 | * Description |
| 2559 | * This helper is used in programs implementing IR decoding, to |
| 2560 | * report a successfully decoded pointer movement. |
| 2561 | * |
| 2562 | * The *ctx* should point to the lirc sample as passed into |
| 2563 | * the program. |
| 2564 | * |
| 2565 | * This helper is only available is the kernel was compiled with |
| 2566 | * the **CONFIG_BPF_LIRC_MODE2** configuration option set to |
| 2567 | * "**y**". |
| 2568 | * Return |
| 2569 | * 0 |
Martin KaFai Lau | 46f8bc9 | 2019-02-09 23:22:20 -0800 | [diff] [blame] | 2570 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2571 | * long bpf_spin_lock(struct bpf_spin_lock *lock) |
Quentin Monnet | 0eb0978 | 2019-03-14 12:38:40 +0000 | [diff] [blame] | 2572 | * Description |
| 2573 | * Acquire a spinlock represented by the pointer *lock*, which is |
| 2574 | * stored as part of a value of a map. Taking the lock allows to |
| 2575 | * safely update the rest of the fields in that value. The |
| 2576 | * spinlock can (and must) later be released with a call to |
| 2577 | * **bpf_spin_unlock**\ (\ *lock*\ ). |
| 2578 | * |
| 2579 | * Spinlocks in BPF programs come with a number of restrictions |
| 2580 | * and constraints: |
| 2581 | * |
| 2582 | * * **bpf_spin_lock** objects are only allowed inside maps of |
| 2583 | * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this |
| 2584 | * list could be extended in the future). |
| 2585 | * * BTF description of the map is mandatory. |
| 2586 | * * The BPF program can take ONE lock at a time, since taking two |
| 2587 | * or more could cause dead locks. |
| 2588 | * * Only one **struct bpf_spin_lock** is allowed per map element. |
| 2589 | * * When the lock is taken, calls (either BPF to BPF or helpers) |
| 2590 | * are not allowed. |
| 2591 | * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not |
| 2592 | * allowed inside a spinlock-ed region. |
| 2593 | * * The BPF program MUST call **bpf_spin_unlock**\ () to release |
| 2594 | * the lock, on all execution paths, before it returns. |
| 2595 | * * The BPF program can access **struct bpf_spin_lock** only via |
| 2596 | * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () |
| 2597 | * helpers. Loading or storing data into the **struct |
| 2598 | * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. |
| 2599 | * * To use the **bpf_spin_lock**\ () helper, the BTF description |
| 2600 | * of the map value must be a struct and have **struct |
| 2601 | * bpf_spin_lock** *anyname*\ **;** field at the top level. |
| 2602 | * Nested lock inside another struct is not allowed. |
| 2603 | * * The **struct bpf_spin_lock** *lock* field in a map value must |
| 2604 | * be aligned on a multiple of 4 bytes in that value. |
| 2605 | * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy |
| 2606 | * the **bpf_spin_lock** field to user space. |
| 2607 | * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from |
| 2608 | * a BPF program, do not update the **bpf_spin_lock** field. |
| 2609 | * * **bpf_spin_lock** cannot be on the stack or inside a |
| 2610 | * networking packet (it can only be inside of a map values). |
| 2611 | * * **bpf_spin_lock** is available to root only. |
| 2612 | * * Tracing programs and socket filter programs cannot use |
| 2613 | * **bpf_spin_lock**\ () due to insufficient preemption checks |
| 2614 | * (but this may change in the future). |
| 2615 | * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. |
| 2616 | * Return |
| 2617 | * 0 |
| 2618 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2619 | * long bpf_spin_unlock(struct bpf_spin_lock *lock) |
Quentin Monnet | 0eb0978 | 2019-03-14 12:38:40 +0000 | [diff] [blame] | 2620 | * Description |
| 2621 | * Release the *lock* previously locked by a call to |
| 2622 | * **bpf_spin_lock**\ (\ *lock*\ ). |
| 2623 | * Return |
| 2624 | * 0 |
| 2625 | * |
Martin KaFai Lau | 46f8bc9 | 2019-02-09 23:22:20 -0800 | [diff] [blame] | 2626 | * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) |
| 2627 | * Description |
| 2628 | * This helper gets a **struct bpf_sock** pointer such |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2629 | * that all the fields in this **bpf_sock** can be accessed. |
Martin KaFai Lau | 46f8bc9 | 2019-02-09 23:22:20 -0800 | [diff] [blame] | 2630 | * Return |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2631 | * A **struct bpf_sock** pointer on success, or **NULL** in |
Martin KaFai Lau | 46f8bc9 | 2019-02-09 23:22:20 -0800 | [diff] [blame] | 2632 | * case of failure. |
Martin KaFai Lau | 655a51e | 2019-02-09 23:22:24 -0800 | [diff] [blame] | 2633 | * |
| 2634 | * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) |
| 2635 | * Description |
| 2636 | * This helper gets a **struct bpf_tcp_sock** pointer from a |
| 2637 | * **struct bpf_sock** pointer. |
Martin KaFai Lau | 655a51e | 2019-02-09 23:22:24 -0800 | [diff] [blame] | 2638 | * Return |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2639 | * A **struct bpf_tcp_sock** pointer on success, or **NULL** in |
Martin KaFai Lau | 655a51e | 2019-02-09 23:22:24 -0800 | [diff] [blame] | 2640 | * case of failure. |
brakmo | f7c917b | 2019-03-01 12:38:46 -0800 | [diff] [blame] | 2641 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2642 | * long bpf_skb_ecn_set_ce(struct sk_buff *skb) |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2643 | * Description |
| 2644 | * Set ECN (Explicit Congestion Notification) field of IP header |
| 2645 | * to **CE** (Congestion Encountered) if current value is **ECT** |
| 2646 | * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 |
| 2647 | * and IPv4. |
| 2648 | * Return |
| 2649 | * 1 if the **CE** flag is set (either by the current helper call |
| 2650 | * or because it was already present), 0 if it is not set. |
Martin KaFai Lau | dbafd7d | 2019-03-12 10:23:04 -0700 | [diff] [blame] | 2651 | * |
| 2652 | * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) |
| 2653 | * Description |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2654 | * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. |
| 2655 | * **bpf_sk_release**\ () is unnecessary and not allowed. |
Martin KaFai Lau | dbafd7d | 2019-03-12 10:23:04 -0700 | [diff] [blame] | 2656 | * Return |
Quentin Monnet | 62369db | 2019-03-14 12:38:39 +0000 | [diff] [blame] | 2657 | * A **struct bpf_sock** pointer on success, or **NULL** in |
Martin KaFai Lau | dbafd7d | 2019-03-12 10:23:04 -0700 | [diff] [blame] | 2658 | * case of failure. |
Lorenz Bauer | edbf8c0 | 2019-03-22 09:54:01 +0800 | [diff] [blame] | 2659 | * |
| 2660 | * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2661 | * Description |
| 2662 | * Look for TCP socket matching *tuple*, optionally in a child |
| 2663 | * network namespace *netns*. The return value must be checked, |
| 2664 | * and if non-**NULL**, released via **bpf_sk_release**\ (). |
| 2665 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2666 | * This function is identical to **bpf_sk_lookup_tcp**\ (), except |
| 2667 | * that it also returns timewait or request sockets. Use |
| 2668 | * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the |
| 2669 | * full structure. |
Lorenz Bauer | edbf8c0 | 2019-03-22 09:54:01 +0800 | [diff] [blame] | 2670 | * |
| 2671 | * This helper is available only if the kernel was compiled with |
| 2672 | * **CONFIG_NET** configuration option. |
| 2673 | * Return |
| 2674 | * Pointer to **struct bpf_sock**, or **NULL** in case of failure. |
| 2675 | * For sockets with reuseport option, the **struct bpf_sock** |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2676 | * result is from *reuse*\ **->socks**\ [] using the hash of the |
| 2677 | * tuple. |
Lorenz Bauer | 3990408 | 2019-03-22 09:54:02 +0800 | [diff] [blame] | 2678 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2679 | * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) |
Lorenz Bauer | 3990408 | 2019-03-22 09:54:02 +0800 | [diff] [blame] | 2680 | * Description |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2681 | * Check whether *iph* and *th* contain a valid SYN cookie ACK for |
| 2682 | * the listening socket in *sk*. |
Lorenz Bauer | 3990408 | 2019-03-22 09:54:02 +0800 | [diff] [blame] | 2683 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2684 | * *iph* points to the start of the IPv4 or IPv6 header, while |
| 2685 | * *iph_len* contains **sizeof**\ (**struct iphdr**) or |
| 2686 | * **sizeof**\ (**struct ip6hdr**). |
Lorenz Bauer | 3990408 | 2019-03-22 09:54:02 +0800 | [diff] [blame] | 2687 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2688 | * *th* points to the start of the TCP header, while *th_len* |
| 2689 | * contains **sizeof**\ (**struct tcphdr**). |
Lorenz Bauer | 3990408 | 2019-03-22 09:54:02 +0800 | [diff] [blame] | 2690 | * Return |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2691 | * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative |
| 2692 | * error otherwise. |
Andrey Ignatov | 808649f | 2019-02-27 13:28:48 -0800 | [diff] [blame] | 2693 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2694 | * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) |
Andrey Ignatov | 808649f | 2019-02-27 13:28:48 -0800 | [diff] [blame] | 2695 | * Description |
| 2696 | * Get name of sysctl in /proc/sys/ and copy it into provided by |
| 2697 | * program buffer *buf* of size *buf_len*. |
| 2698 | * |
| 2699 | * The buffer is always NUL terminated, unless it's zero-sized. |
| 2700 | * |
| 2701 | * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is |
| 2702 | * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name |
| 2703 | * only (e.g. "tcp_mem"). |
| 2704 | * Return |
| 2705 | * Number of character copied (not including the trailing NUL). |
| 2706 | * |
| 2707 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain |
| 2708 | * truncated name in this case). |
Andrey Ignatov | 1d11b30 | 2019-02-28 19:22:15 -0800 | [diff] [blame] | 2709 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2710 | * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) |
Andrey Ignatov | 1d11b30 | 2019-02-28 19:22:15 -0800 | [diff] [blame] | 2711 | * Description |
| 2712 | * Get current value of sysctl as it is presented in /proc/sys |
| 2713 | * (incl. newline, etc), and copy it as a string into provided |
| 2714 | * by program buffer *buf* of size *buf_len*. |
| 2715 | * |
| 2716 | * The whole value is copied, no matter what file position user |
| 2717 | * space issued e.g. sys_read at. |
| 2718 | * |
| 2719 | * The buffer is always NUL terminated, unless it's zero-sized. |
| 2720 | * Return |
| 2721 | * Number of character copied (not including the trailing NUL). |
| 2722 | * |
| 2723 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain |
| 2724 | * truncated name in this case). |
| 2725 | * |
| 2726 | * **-EINVAL** if current value was unavailable, e.g. because |
| 2727 | * sysctl is uninitialized and read returns -EIO for it. |
Andrey Ignatov | 4e63acd | 2019-03-07 18:38:43 -0800 | [diff] [blame] | 2728 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2729 | * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) |
Andrey Ignatov | 4e63acd | 2019-03-07 18:38:43 -0800 | [diff] [blame] | 2730 | * Description |
| 2731 | * Get new value being written by user space to sysctl (before |
| 2732 | * the actual write happens) and copy it as a string into |
| 2733 | * provided by program buffer *buf* of size *buf_len*. |
| 2734 | * |
| 2735 | * User space may write new value at file position > 0. |
| 2736 | * |
| 2737 | * The buffer is always NUL terminated, unless it's zero-sized. |
| 2738 | * Return |
| 2739 | * Number of character copied (not including the trailing NUL). |
| 2740 | * |
| 2741 | * **-E2BIG** if the buffer wasn't big enough (*buf* will contain |
| 2742 | * truncated name in this case). |
| 2743 | * |
| 2744 | * **-EINVAL** if sysctl is being read. |
| 2745 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2746 | * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) |
Andrey Ignatov | 4e63acd | 2019-03-07 18:38:43 -0800 | [diff] [blame] | 2747 | * Description |
| 2748 | * Override new value being written by user space to sysctl with |
| 2749 | * value provided by program in buffer *buf* of size *buf_len*. |
| 2750 | * |
| 2751 | * *buf* should contain a string in same form as provided by user |
| 2752 | * space on sysctl write. |
| 2753 | * |
| 2754 | * User space may write new value at file position > 0. To override |
| 2755 | * the whole sysctl value file position should be set to zero. |
| 2756 | * Return |
| 2757 | * 0 on success. |
| 2758 | * |
| 2759 | * **-E2BIG** if the *buf_len* is too big. |
| 2760 | * |
| 2761 | * **-EINVAL** if sysctl is being read. |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2762 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2763 | * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2764 | * Description |
| 2765 | * Convert the initial part of the string from buffer *buf* of |
| 2766 | * size *buf_len* to a long integer according to the given base |
| 2767 | * and save the result in *res*. |
| 2768 | * |
| 2769 | * The string may begin with an arbitrary amount of white space |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2770 | * (as determined by **isspace**\ (3)) followed by a single |
| 2771 | * optional '**-**' sign. |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2772 | * |
| 2773 | * Five least significant bits of *flags* encode base, other bits |
| 2774 | * are currently unused. |
| 2775 | * |
| 2776 | * Base must be either 8, 10, 16 or 0 to detect it automatically |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2777 | * similar to user space **strtol**\ (3). |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2778 | * Return |
| 2779 | * Number of characters consumed on success. Must be positive but |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2780 | * no more than *buf_len*. |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2781 | * |
| 2782 | * **-EINVAL** if no valid digits were found or unsupported base |
| 2783 | * was provided. |
| 2784 | * |
| 2785 | * **-ERANGE** if resulting value was out of range. |
| 2786 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2787 | * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2788 | * Description |
| 2789 | * Convert the initial part of the string from buffer *buf* of |
| 2790 | * size *buf_len* to an unsigned long integer according to the |
| 2791 | * given base and save the result in *res*. |
| 2792 | * |
| 2793 | * The string may begin with an arbitrary amount of white space |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2794 | * (as determined by **isspace**\ (3)). |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2795 | * |
| 2796 | * Five least significant bits of *flags* encode base, other bits |
| 2797 | * are currently unused. |
| 2798 | * |
| 2799 | * Base must be either 8, 10, 16 or 0 to detect it automatically |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2800 | * similar to user space **strtoul**\ (3). |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2801 | * Return |
| 2802 | * Number of characters consumed on success. Must be positive but |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2803 | * no more than *buf_len*. |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 2804 | * |
| 2805 | * **-EINVAL** if no valid digits were found or unsupported base |
| 2806 | * was provided. |
| 2807 | * |
| 2808 | * **-ERANGE** if resulting value was out of range. |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2809 | * |
| 2810 | * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) |
| 2811 | * Description |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2812 | * Get a bpf-local-storage from a *sk*. |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2813 | * |
| 2814 | * Logically, it could be thought of getting the value from |
| 2815 | * a *map* with *sk* as the **key**. From this |
| 2816 | * perspective, the usage is not much different from |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2817 | * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this |
| 2818 | * helper enforces the key must be a full socket and the map must |
| 2819 | * be a **BPF_MAP_TYPE_SK_STORAGE** also. |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2820 | * |
| 2821 | * Underneath, the value is stored locally at *sk* instead of |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2822 | * the *map*. The *map* is used as the bpf-local-storage |
| 2823 | * "type". The bpf-local-storage "type" (i.e. the *map*) is |
| 2824 | * searched against all bpf-local-storages residing at *sk*. |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2825 | * |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2826 | * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2827 | * used such that a new bpf-local-storage will be |
| 2828 | * created if one does not exist. *value* can be used |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2829 | * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2830 | * the initial value of a bpf-local-storage. If *value* is |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2831 | * **NULL**, the new bpf-local-storage will be zero initialized. |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2832 | * Return |
| 2833 | * A bpf-local-storage pointer is returned on success. |
| 2834 | * |
| 2835 | * **NULL** if not found or there was an error in adding |
| 2836 | * a new bpf-local-storage. |
| 2837 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2838 | * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2839 | * Description |
Quentin Monnet | 80867c5 | 2019-05-10 15:51:24 +0100 | [diff] [blame] | 2840 | * Delete a bpf-local-storage from a *sk*. |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 2841 | * Return |
| 2842 | * 0 on success. |
| 2843 | * |
| 2844 | * **-ENOENT** if the bpf-local-storage cannot be found. |
Yonghong Song | 8b401f9 | 2019-05-23 14:47:45 -0700 | [diff] [blame] | 2845 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2846 | * long bpf_send_signal(u32 sig) |
Yonghong Song | 8b401f9 | 2019-05-23 14:47:45 -0700 | [diff] [blame] | 2847 | * Description |
Yonghong Song | 8482941 | 2020-01-14 19:50:02 -0800 | [diff] [blame] | 2848 | * Send signal *sig* to the process of the current task. |
| 2849 | * The signal may be delivered to any of this process's threads. |
Yonghong Song | 8b401f9 | 2019-05-23 14:47:45 -0700 | [diff] [blame] | 2850 | * Return |
| 2851 | * 0 on success or successfully queued. |
| 2852 | * |
| 2853 | * **-EBUSY** if work queue under nmi is full. |
| 2854 | * |
| 2855 | * **-EINVAL** if *sig* is invalid. |
| 2856 | * |
| 2857 | * **-EPERM** if no permission to send the *sig*. |
| 2858 | * |
| 2859 | * **-EAGAIN** if bpf program can try again. |
Petar Penkov | 70d6624 | 2019-07-29 09:59:15 -0700 | [diff] [blame] | 2860 | * |
| 2861 | * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) |
| 2862 | * Description |
| 2863 | * Try to issue a SYN cookie for the packet with corresponding |
| 2864 | * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. |
| 2865 | * |
| 2866 | * *iph* points to the start of the IPv4 or IPv6 header, while |
| 2867 | * *iph_len* contains **sizeof**\ (**struct iphdr**) or |
| 2868 | * **sizeof**\ (**struct ip6hdr**). |
| 2869 | * |
| 2870 | * *th* points to the start of the TCP header, while *th_len* |
| 2871 | * contains the length of the TCP header. |
Petar Penkov | 70d6624 | 2019-07-29 09:59:15 -0700 | [diff] [blame] | 2872 | * Return |
| 2873 | * On success, lower 32 bits hold the generated SYN cookie in |
| 2874 | * followed by 16 bits which hold the MSS value for that cookie, |
| 2875 | * and the top 16 bits are unused. |
| 2876 | * |
| 2877 | * On failure, the returned value is one of the following: |
| 2878 | * |
| 2879 | * **-EINVAL** SYN cookie cannot be issued due to error |
| 2880 | * |
| 2881 | * **-ENOENT** SYN cookie should not be issued (no SYN flood) |
| 2882 | * |
| 2883 | * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies |
| 2884 | * |
| 2885 | * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 |
Alexei Starovoitov | a7658e1 | 2019-10-15 20:25:04 -0700 | [diff] [blame] | 2886 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2887 | * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) |
Alexei Starovoitov | a7658e1 | 2019-10-15 20:25:04 -0700 | [diff] [blame] | 2888 | * Description |
| 2889 | * Write raw *data* blob into a special BPF perf event held by |
| 2890 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf |
| 2891 | * event must have the following attributes: **PERF_SAMPLE_RAW** |
| 2892 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and |
| 2893 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. |
| 2894 | * |
| 2895 | * The *flags* are used to indicate the index in *map* for which |
| 2896 | * the value must be put, masked with **BPF_F_INDEX_MASK**. |
| 2897 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** |
| 2898 | * to indicate that the index of the current CPU core should be |
| 2899 | * used. |
| 2900 | * |
| 2901 | * The value to write, of *size*, is passed through eBPF stack and |
| 2902 | * pointed by *data*. |
| 2903 | * |
| 2904 | * *ctx* is a pointer to in-kernel struct sk_buff. |
| 2905 | * |
| 2906 | * This helper is similar to **bpf_perf_event_output**\ () but |
| 2907 | * restricted to raw_tracepoint bpf programs. |
| 2908 | * Return |
| 2909 | * 0 on success, or a negative error in case of failure. |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2910 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2911 | * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2912 | * Description |
| 2913 | * Safely attempt to read *size* bytes from user space address |
| 2914 | * *unsafe_ptr* and store the data in *dst*. |
| 2915 | * Return |
| 2916 | * 0 on success, or a negative error in case of failure. |
| 2917 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2918 | * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2919 | * Description |
| 2920 | * Safely attempt to read *size* bytes from kernel space address |
| 2921 | * *unsafe_ptr* and store the data in *dst*. |
| 2922 | * Return |
| 2923 | * 0 on success, or a negative error in case of failure. |
| 2924 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2925 | * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2926 | * Description |
| 2927 | * Copy a NUL terminated string from an unsafe user address |
| 2928 | * *unsafe_ptr* to *dst*. The *size* should include the |
| 2929 | * terminating NUL byte. In case the string length is smaller than |
| 2930 | * *size*, the target is not padded with further NUL bytes. If the |
| 2931 | * string length is larger than *size*, just *size*-1 bytes are |
| 2932 | * copied and the last byte is set to NUL. |
| 2933 | * |
| 2934 | * On success, the length of the copied string is returned. This |
| 2935 | * makes this helper useful in tracing programs for reading |
| 2936 | * strings, and more importantly to get its length at runtime. See |
| 2937 | * the following snippet: |
| 2938 | * |
| 2939 | * :: |
| 2940 | * |
| 2941 | * SEC("kprobe/sys_open") |
| 2942 | * void bpf_sys_open(struct pt_regs *ctx) |
| 2943 | * { |
| 2944 | * char buf[PATHLEN]; // PATHLEN is defined to 256 |
| 2945 | * int res = bpf_probe_read_user_str(buf, sizeof(buf), |
| 2946 | * ctx->di); |
| 2947 | * |
| 2948 | * // Consume buf, for example push it to |
| 2949 | * // userspace via bpf_perf_event_output(); we |
| 2950 | * // can use res (the string length) as event |
| 2951 | * // size, after checking its boundaries. |
| 2952 | * } |
| 2953 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 2954 | * In comparison, using **bpf_probe_read_user**\ () helper here |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2955 | * instead to read the string would require to estimate the length |
| 2956 | * at compile time, and would often result in copying more memory |
| 2957 | * than necessary. |
| 2958 | * |
| 2959 | * Another useful use case is when parsing individual process |
| 2960 | * arguments or individual environment variables navigating |
| 2961 | * *current*\ **->mm->arg_start** and *current*\ |
| 2962 | * **->mm->env_start**: using this helper and the return value, |
| 2963 | * one can quickly iterate at the right offset of the memory area. |
| 2964 | * Return |
| 2965 | * On success, the strictly positive length of the string, |
| 2966 | * including the trailing NUL character. On error, a negative |
| 2967 | * value. |
| 2968 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2969 | * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2970 | * Description |
| 2971 | * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 2972 | * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2973 | * Return |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 2974 | * On success, the strictly positive length of the string, including |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 2975 | * the trailing NUL character. On error, a negative value. |
Martin KaFai Lau | 206057f | 2020-01-08 16:45:51 -0800 | [diff] [blame] | 2976 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2977 | * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) |
Martin KaFai Lau | 206057f | 2020-01-08 16:45:51 -0800 | [diff] [blame] | 2978 | * Description |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 2979 | * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. |
Martin KaFai Lau | 206057f | 2020-01-08 16:45:51 -0800 | [diff] [blame] | 2980 | * *rcv_nxt* is the ack_seq to be sent out. |
| 2981 | * Return |
| 2982 | * 0 on success, or a negative error in case of failure. |
| 2983 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 2984 | * long bpf_send_signal_thread(u32 sig) |
Yonghong Song | 8482941 | 2020-01-14 19:50:02 -0800 | [diff] [blame] | 2985 | * Description |
| 2986 | * Send signal *sig* to the thread corresponding to the current task. |
| 2987 | * Return |
| 2988 | * 0 on success or successfully queued. |
| 2989 | * |
| 2990 | * **-EBUSY** if work queue under nmi is full. |
| 2991 | * |
| 2992 | * **-EINVAL** if *sig* is invalid. |
| 2993 | * |
| 2994 | * **-EPERM** if no permission to send the *sig*. |
| 2995 | * |
| 2996 | * **-EAGAIN** if bpf program can try again. |
Martin KaFai Lau | 5576b99 | 2020-01-22 15:36:46 -0800 | [diff] [blame] | 2997 | * |
| 2998 | * u64 bpf_jiffies64(void) |
| 2999 | * Description |
| 3000 | * Obtain the 64bit jiffies |
| 3001 | * Return |
| 3002 | * The 64 bit jiffies |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3003 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3004 | * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3005 | * Description |
| 3006 | * For an eBPF program attached to a perf event, retrieve the |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3007 | * branch records (**struct perf_branch_entry**) associated to *ctx* |
| 3008 | * and store it in the buffer pointed by *buf* up to size |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3009 | * *size* bytes. |
| 3010 | * Return |
| 3011 | * On success, number of bytes written to *buf*. On error, a |
| 3012 | * negative value. |
| 3013 | * |
| 3014 | * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3015 | * instead return the number of bytes required to store all the |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3016 | * branch entries. If this flag is set, *buf* may be NULL. |
| 3017 | * |
| 3018 | * **-EINVAL** if arguments invalid or **size** not a multiple |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3019 | * of **sizeof**\ (**struct perf_branch_entry**\ ). |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3020 | * |
| 3021 | * **-ENOENT** if architecture does not support branch records. |
Carlos Neira | b4490c5 | 2020-03-04 17:41:56 -0300 | [diff] [blame] | 3022 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3023 | * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) |
Carlos Neira | b4490c5 | 2020-03-04 17:41:56 -0300 | [diff] [blame] | 3024 | * Description |
| 3025 | * Returns 0 on success, values for *pid* and *tgid* as seen from the current |
| 3026 | * *namespace* will be returned in *nsdata*. |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3027 | * Return |
| 3028 | * 0 on success, or one of the following in case of failure: |
Carlos Neira | b4490c5 | 2020-03-04 17:41:56 -0300 | [diff] [blame] | 3029 | * |
| 3030 | * **-EINVAL** if dev and inum supplied don't match dev_t and inode number |
| 3031 | * with nsfs of current task, or if dev conversion to dev_t lost high bits. |
| 3032 | * |
| 3033 | * **-ENOENT** if pidns does not exists for the current task. |
| 3034 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3035 | * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) |
Eelco Chaudron | d831ee8 | 2020-03-06 08:59:23 +0000 | [diff] [blame] | 3036 | * Description |
| 3037 | * Write raw *data* blob into a special BPF perf event held by |
| 3038 | * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf |
| 3039 | * event must have the following attributes: **PERF_SAMPLE_RAW** |
| 3040 | * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and |
| 3041 | * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. |
| 3042 | * |
| 3043 | * The *flags* are used to indicate the index in *map* for which |
| 3044 | * the value must be put, masked with **BPF_F_INDEX_MASK**. |
| 3045 | * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** |
| 3046 | * to indicate that the index of the current CPU core should be |
| 3047 | * used. |
| 3048 | * |
| 3049 | * The value to write, of *size*, is passed through eBPF stack and |
| 3050 | * pointed by *data*. |
| 3051 | * |
| 3052 | * *ctx* is a pointer to in-kernel struct xdp_buff. |
| 3053 | * |
| 3054 | * This helper is similar to **bpf_perf_eventoutput**\ () but |
| 3055 | * restricted to raw_tracepoint bpf programs. |
| 3056 | * Return |
| 3057 | * 0 on success, or a negative error in case of failure. |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 3058 | * |
| 3059 | * u64 bpf_get_netns_cookie(void *ctx) |
| 3060 | * Description |
| 3061 | * Retrieve the cookie (generated by the kernel) of the network |
| 3062 | * namespace the input *ctx* is associated with. The network |
| 3063 | * namespace cookie remains stable for its lifetime and provides |
| 3064 | * a global identifier that can be assumed unique. If *ctx* is |
| 3065 | * NULL, then the helper returns the cookie for the initial |
| 3066 | * network namespace. The cookie itself is very similar to that |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3067 | * of **bpf_get_socket_cookie**\ () helper, but for network |
| 3068 | * namespaces instead of sockets. |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 3069 | * Return |
| 3070 | * A 8-byte long opaque number. |
Daniel Borkmann | 0f09abd | 2020-03-27 16:58:54 +0100 | [diff] [blame] | 3071 | * |
| 3072 | * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) |
| 3073 | * Description |
| 3074 | * Return id of cgroup v2 that is ancestor of the cgroup associated |
| 3075 | * with the current task at the *ancestor_level*. The root cgroup |
| 3076 | * is at *ancestor_level* zero and each step down the hierarchy |
| 3077 | * increments the level. If *ancestor_level* == level of cgroup |
| 3078 | * associated with the current task, then return value will be the |
| 3079 | * same as that of **bpf_get_current_cgroup_id**\ (). |
| 3080 | * |
| 3081 | * The helper is useful to implement policies based on cgroups |
| 3082 | * that are upper in hierarchy than immediate cgroup associated |
| 3083 | * with the current task. |
| 3084 | * |
| 3085 | * The format of returned id and helper limitations are same as in |
| 3086 | * **bpf_get_current_cgroup_id**\ (). |
| 3087 | * Return |
| 3088 | * The id is returned or 0 in case the id could not be retrieved. |
Joe Stringer | cf7fbe6 | 2020-03-29 15:53:38 -0700 | [diff] [blame] | 3089 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3090 | * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) |
Joe Stringer | cf7fbe6 | 2020-03-29 15:53:38 -0700 | [diff] [blame] | 3091 | * Description |
Jakub Sitnicki | e9ddbb7 | 2020-07-17 12:35:23 +0200 | [diff] [blame] | 3092 | * Helper is overloaded depending on BPF program type. This |
| 3093 | * description applies to **BPF_PROG_TYPE_SCHED_CLS** and |
| 3094 | * **BPF_PROG_TYPE_SCHED_ACT** programs. |
| 3095 | * |
Joe Stringer | cf7fbe6 | 2020-03-29 15:53:38 -0700 | [diff] [blame] | 3096 | * Assign the *sk* to the *skb*. When combined with appropriate |
| 3097 | * routing configuration to receive the packet towards the socket, |
| 3098 | * will cause *skb* to be delivered to the specified socket. |
| 3099 | * Subsequent redirection of *skb* via **bpf_redirect**\ (), |
| 3100 | * **bpf_clone_redirect**\ () or other methods outside of BPF may |
| 3101 | * interfere with successful delivery to the socket. |
| 3102 | * |
| 3103 | * This operation is only valid from TC ingress path. |
| 3104 | * |
| 3105 | * The *flags* argument must be zero. |
| 3106 | * Return |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3107 | * 0 on success, or a negative error in case of failure: |
Joe Stringer | cf7fbe6 | 2020-03-29 15:53:38 -0700 | [diff] [blame] | 3108 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3109 | * **-EINVAL** if specified *flags* are not supported. |
| 3110 | * |
| 3111 | * **-ENOENT** if the socket is unavailable for assignment. |
| 3112 | * |
| 3113 | * **-ENETUNREACH** if the socket is unreachable (wrong netns). |
| 3114 | * |
| 3115 | * **-EOPNOTSUPP** if the operation is not supported, for example |
| 3116 | * a call from outside of TC ingress. |
| 3117 | * |
| 3118 | * **-ESOCKTNOSUPPORT** if the socket type is not supported |
| 3119 | * (reuseport). |
Maciej Żenczykowski | 71d1921 | 2020-04-26 09:15:25 -0700 | [diff] [blame] | 3120 | * |
Jakub Sitnicki | e9ddbb7 | 2020-07-17 12:35:23 +0200 | [diff] [blame] | 3121 | * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) |
| 3122 | * Description |
| 3123 | * Helper is overloaded depending on BPF program type. This |
| 3124 | * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. |
| 3125 | * |
| 3126 | * Select the *sk* as a result of a socket lookup. |
| 3127 | * |
| 3128 | * For the operation to succeed passed socket must be compatible |
| 3129 | * with the packet description provided by the *ctx* object. |
| 3130 | * |
| 3131 | * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must |
| 3132 | * be an exact match. While IP family (**AF_INET** or |
| 3133 | * **AF_INET6**) must be compatible, that is IPv6 sockets |
| 3134 | * that are not v6-only can be selected for IPv4 packets. |
| 3135 | * |
| 3136 | * Only TCP listeners and UDP unconnected sockets can be |
| 3137 | * selected. *sk* can also be NULL to reset any previous |
| 3138 | * selection. |
| 3139 | * |
| 3140 | * *flags* argument can combination of following values: |
| 3141 | * |
| 3142 | * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous |
| 3143 | * socket selection, potentially done by a BPF program |
| 3144 | * that ran before us. |
| 3145 | * |
| 3146 | * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip |
| 3147 | * load-balancing within reuseport group for the socket |
| 3148 | * being selected. |
| 3149 | * |
| 3150 | * On success *ctx->sk* will point to the selected socket. |
| 3151 | * |
| 3152 | * Return |
| 3153 | * 0 on success, or a negative errno in case of failure. |
| 3154 | * |
| 3155 | * * **-EAFNOSUPPORT** if socket family (*sk->family*) is |
| 3156 | * not compatible with packet family (*ctx->family*). |
| 3157 | * |
| 3158 | * * **-EEXIST** if socket has been already selected, |
| 3159 | * potentially by another program, and |
| 3160 | * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. |
| 3161 | * |
| 3162 | * * **-EINVAL** if unsupported flags were specified. |
| 3163 | * |
| 3164 | * * **-EPROTOTYPE** if socket L4 protocol |
| 3165 | * (*sk->protocol*) doesn't match packet protocol |
| 3166 | * (*ctx->protocol*). |
| 3167 | * |
| 3168 | * * **-ESOCKTNOSUPPORT** if socket is not in allowed |
| 3169 | * state (TCP listening or UDP unconnected). |
| 3170 | * |
Maciej Żenczykowski | 71d1921 | 2020-04-26 09:15:25 -0700 | [diff] [blame] | 3171 | * u64 bpf_ktime_get_boot_ns(void) |
| 3172 | * Description |
| 3173 | * Return the time elapsed since system boot, in nanoseconds. |
| 3174 | * Does include the time the system was suspended. |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3175 | * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) |
Maciej Żenczykowski | 71d1921 | 2020-04-26 09:15:25 -0700 | [diff] [blame] | 3176 | * Return |
| 3177 | * Current *ktime*. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3178 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3179 | * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3180 | * Description |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3181 | * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print |
| 3182 | * out the format string. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3183 | * The *m* represents the seq_file. The *fmt* and *fmt_size* are for |
| 3184 | * the format string itself. The *data* and *data_len* are format string |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3185 | * arguments. The *data* are a **u64** array and corresponding format string |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3186 | * values are stored in the array. For strings and pointers where pointees |
| 3187 | * are accessed, only the pointer values are stored in the *data* array. |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3188 | * The *data_len* is the size of *data* in bytes. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3189 | * |
| 3190 | * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. |
| 3191 | * Reading kernel memory may fail due to either invalid address or |
| 3192 | * valid address but requiring a major memory fault. If reading kernel memory |
| 3193 | * fails, the string for **%s** will be an empty string, and the ip |
| 3194 | * address for **%p{i,I}{4,6}** will be 0. Not returning error to |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3195 | * bpf program is consistent with what **bpf_trace_printk**\ () does for now. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3196 | * Return |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3197 | * 0 on success, or a negative error in case of failure: |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3198 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3199 | * **-EBUSY** if per-CPU memory copy buffer is busy, can try again |
| 3200 | * by returning 1 from bpf program. |
| 3201 | * |
| 3202 | * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. |
| 3203 | * |
| 3204 | * **-E2BIG** if *fmt* contains too many format specifiers. |
| 3205 | * |
| 3206 | * **-EOVERFLOW** if an overflow happened: The same object will be tried again. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3207 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3208 | * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3209 | * Description |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3210 | * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3211 | * The *m* represents the seq_file. The *data* and *len* represent the |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3212 | * data to write in bytes. |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3213 | * Return |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3214 | * 0 on success, or a negative error in case of failure: |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3215 | * |
Quentin Monnet | ab8d780 | 2020-05-11 17:15:35 +0100 | [diff] [blame] | 3216 | * **-EOVERFLOW** if an overflow happened: The same object will be tried again. |
Andrey Ignatov | f307fa2 | 2020-05-14 13:03:47 -0700 | [diff] [blame] | 3217 | * |
| 3218 | * u64 bpf_sk_cgroup_id(struct bpf_sock *sk) |
| 3219 | * Description |
| 3220 | * Return the cgroup v2 id of the socket *sk*. |
| 3221 | * |
| 3222 | * *sk* must be a non-**NULL** pointer to a full socket, e.g. one |
| 3223 | * returned from **bpf_sk_lookup_xxx**\ (), |
| 3224 | * **bpf_sk_fullsock**\ (), etc. The format of returned id is |
| 3225 | * same as in **bpf_skb_cgroup_id**\ (). |
| 3226 | * |
| 3227 | * This helper is available only if the kernel was compiled with |
| 3228 | * the **CONFIG_SOCK_CGROUP_DATA** configuration option. |
| 3229 | * Return |
| 3230 | * The id is returned or 0 in case the id could not be retrieved. |
| 3231 | * |
| 3232 | * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level) |
| 3233 | * Description |
| 3234 | * Return id of cgroup v2 that is ancestor of cgroup associated |
| 3235 | * with the *sk* at the *ancestor_level*. The root cgroup is at |
| 3236 | * *ancestor_level* zero and each step down the hierarchy |
| 3237 | * increments the level. If *ancestor_level* == level of cgroup |
| 3238 | * associated with *sk*, then return value will be same as that |
| 3239 | * of **bpf_sk_cgroup_id**\ (). |
| 3240 | * |
| 3241 | * The helper is useful to implement policies based on cgroups |
| 3242 | * that are upper in hierarchy than immediate cgroup associated |
| 3243 | * with *sk*. |
| 3244 | * |
| 3245 | * The format of returned id and helper limitations are same as in |
| 3246 | * **bpf_sk_cgroup_id**\ (). |
| 3247 | * Return |
| 3248 | * The id is returned or 0 in case the id could not be retrieved. |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3249 | * |
Andrii Nakryiko | e1613b5 | 2020-07-27 15:47:15 -0700 | [diff] [blame] | 3250 | * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3251 | * Description |
| 3252 | * Copy *size* bytes from *data* into a ring buffer *ringbuf*. |
Quentin Monnet | bcc7f55 | 2020-06-23 16:39:35 +0100 | [diff] [blame] | 3253 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification |
| 3254 | * of new data availability is sent. |
| 3255 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification |
| 3256 | * of new data availability is sent unconditionally. |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3257 | * Return |
Quentin Monnet | bcc7f55 | 2020-06-23 16:39:35 +0100 | [diff] [blame] | 3258 | * 0 on success, or a negative error in case of failure. |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3259 | * |
| 3260 | * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) |
| 3261 | * Description |
| 3262 | * Reserve *size* bytes of payload in a ring buffer *ringbuf*. |
| 3263 | * Return |
| 3264 | * Valid pointer with *size* bytes of memory available; NULL, |
| 3265 | * otherwise. |
| 3266 | * |
| 3267 | * void bpf_ringbuf_submit(void *data, u64 flags) |
| 3268 | * Description |
| 3269 | * Submit reserved ring buffer sample, pointed to by *data*. |
Quentin Monnet | bcc7f55 | 2020-06-23 16:39:35 +0100 | [diff] [blame] | 3270 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification |
| 3271 | * of new data availability is sent. |
| 3272 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification |
| 3273 | * of new data availability is sent unconditionally. |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3274 | * Return |
| 3275 | * Nothing. Always succeeds. |
| 3276 | * |
| 3277 | * void bpf_ringbuf_discard(void *data, u64 flags) |
| 3278 | * Description |
| 3279 | * Discard reserved ring buffer sample, pointed to by *data*. |
Quentin Monnet | bcc7f55 | 2020-06-23 16:39:35 +0100 | [diff] [blame] | 3280 | * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification |
| 3281 | * of new data availability is sent. |
| 3282 | * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification |
| 3283 | * of new data availability is sent unconditionally. |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3284 | * Return |
| 3285 | * Nothing. Always succeeds. |
| 3286 | * |
| 3287 | * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) |
| 3288 | * Description |
| 3289 | * Query various characteristics of provided ring buffer. What |
| 3290 | * exactly is queries is determined by *flags*: |
Quentin Monnet | bcc7f55 | 2020-06-23 16:39:35 +0100 | [diff] [blame] | 3291 | * |
| 3292 | * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. |
| 3293 | * * **BPF_RB_RING_SIZE**: The size of ring buffer. |
| 3294 | * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). |
| 3295 | * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). |
| 3296 | * |
| 3297 | * Data returned is just a momentary snapshot of actual values |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3298 | * and could be inaccurate, so this facility should be used to |
| 3299 | * power heuristics and for reporting, not to make 100% correct |
| 3300 | * calculation. |
| 3301 | * Return |
Quentin Monnet | bcc7f55 | 2020-06-23 16:39:35 +0100 | [diff] [blame] | 3302 | * Requested value, or 0, if *flags* are not recognized. |
Daniel Borkmann | 7cdec54 | 2020-06-02 16:58:33 +0200 | [diff] [blame] | 3303 | * |
Andrii Nakryiko | bdb7b79 | 2020-06-22 20:22:21 -0700 | [diff] [blame] | 3304 | * long bpf_csum_level(struct sk_buff *skb, u64 level) |
Daniel Borkmann | 7cdec54 | 2020-06-02 16:58:33 +0200 | [diff] [blame] | 3305 | * Description |
| 3306 | * Change the skbs checksum level by one layer up or down, or |
| 3307 | * reset it entirely to none in order to have the stack perform |
| 3308 | * checksum validation. The level is applicable to the following |
| 3309 | * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of |
| 3310 | * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | |
| 3311 | * through **bpf_skb_adjust_room**\ () helper with passing in |
| 3312 | * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call |
| 3313 | * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since |
| 3314 | * the UDP header is removed. Similarly, an encap of the latter |
| 3315 | * into the former could be accompanied by a helper call to |
| 3316 | * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the |
| 3317 | * skb is still intended to be processed in higher layers of the |
| 3318 | * stack instead of just egressing at tc. |
| 3319 | * |
| 3320 | * There are three supported level settings at this time: |
| 3321 | * |
| 3322 | * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs |
| 3323 | * with CHECKSUM_UNNECESSARY. |
| 3324 | * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs |
| 3325 | * with CHECKSUM_UNNECESSARY. |
| 3326 | * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and |
| 3327 | * sets CHECKSUM_NONE to force checksum validation by the stack. |
| 3328 | * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current |
| 3329 | * skb->csum_level. |
| 3330 | * Return |
| 3331 | * 0 on success, or a negative error in case of failure. In the |
| 3332 | * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level |
| 3333 | * is returned or the error code -EACCES in case the skb is not |
| 3334 | * subject to CHECKSUM_UNNECESSARY. |
Yonghong Song | af7ec13 | 2020-06-23 16:08:09 -0700 | [diff] [blame] | 3335 | * |
| 3336 | * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) |
| 3337 | * Description |
| 3338 | * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. |
| 3339 | * Return |
| 3340 | * *sk* if casting is valid, or NULL otherwise. |
Yonghong Song | 478cfbd | 2020-06-23 16:08:11 -0700 | [diff] [blame] | 3341 | * |
| 3342 | * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) |
| 3343 | * Description |
| 3344 | * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. |
| 3345 | * Return |
| 3346 | * *sk* if casting is valid, or NULL otherwise. |
| 3347 | * |
| 3348 | * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) |
| 3349 | * Description |
| 3350 | * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. |
| 3351 | * Return |
| 3352 | * *sk* if casting is valid, or NULL otherwise. |
| 3353 | * |
| 3354 | * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) |
| 3355 | * Description |
| 3356 | * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. |
| 3357 | * Return |
| 3358 | * *sk* if casting is valid, or NULL otherwise. |
Yonghong Song | 0d4fad3 | 2020-06-23 16:08:15 -0700 | [diff] [blame] | 3359 | * |
| 3360 | * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) |
| 3361 | * Description |
| 3362 | * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. |
| 3363 | * Return |
| 3364 | * *sk* if casting is valid, or NULL otherwise. |
Song Liu | fa28dcb | 2020-06-29 23:28:44 -0700 | [diff] [blame] | 3365 | * |
| 3366 | * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) |
| 3367 | * Description |
| 3368 | * Return a user or a kernel stack in bpf program provided buffer. |
| 3369 | * To achieve this, the helper needs *task*, which is a valid |
| 3370 | * pointer to struct task_struct. To store the stacktrace, the |
| 3371 | * bpf program provides *buf* with a nonnegative *size*. |
| 3372 | * |
| 3373 | * The last argument, *flags*, holds the number of stack frames to |
| 3374 | * skip (from 0 to 255), masked with |
| 3375 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set |
| 3376 | * the following flags: |
| 3377 | * |
| 3378 | * **BPF_F_USER_STACK** |
| 3379 | * Collect a user space stack instead of a kernel stack. |
| 3380 | * **BPF_F_USER_BUILD_ID** |
| 3381 | * Collect buildid+offset instead of ips for user stack, |
| 3382 | * only valid if **BPF_F_USER_STACK** is also specified. |
| 3383 | * |
| 3384 | * **bpf_get_task_stack**\ () can collect up to |
| 3385 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject |
| 3386 | * to sufficient large buffer size. Note that |
| 3387 | * this limit can be controlled with the **sysctl** program, and |
| 3388 | * that it should be manually increased in order to profile long |
| 3389 | * user stacks (such as stacks for Java programs). To do so, use: |
| 3390 | * |
| 3391 | * :: |
| 3392 | * |
| 3393 | * # sysctl kernel.perf_event_max_stack=<new value> |
| 3394 | * Return |
| 3395 | * A non-negative value equal to or less than *size* on success, |
| 3396 | * or a negative error in case of failure. |
| 3397 | * |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 3398 | */ |
| 3399 | #define __BPF_FUNC_MAPPER(FN) \ |
| 3400 | FN(unspec), \ |
| 3401 | FN(map_lookup_elem), \ |
| 3402 | FN(map_update_elem), \ |
| 3403 | FN(map_delete_elem), \ |
| 3404 | FN(probe_read), \ |
| 3405 | FN(ktime_get_ns), \ |
| 3406 | FN(trace_printk), \ |
| 3407 | FN(get_prandom_u32), \ |
| 3408 | FN(get_smp_processor_id), \ |
| 3409 | FN(skb_store_bytes), \ |
| 3410 | FN(l3_csum_replace), \ |
| 3411 | FN(l4_csum_replace), \ |
| 3412 | FN(tail_call), \ |
| 3413 | FN(clone_redirect), \ |
| 3414 | FN(get_current_pid_tgid), \ |
| 3415 | FN(get_current_uid_gid), \ |
| 3416 | FN(get_current_comm), \ |
| 3417 | FN(get_cgroup_classid), \ |
| 3418 | FN(skb_vlan_push), \ |
| 3419 | FN(skb_vlan_pop), \ |
| 3420 | FN(skb_get_tunnel_key), \ |
| 3421 | FN(skb_set_tunnel_key), \ |
| 3422 | FN(perf_event_read), \ |
| 3423 | FN(redirect), \ |
| 3424 | FN(get_route_realm), \ |
| 3425 | FN(perf_event_output), \ |
| 3426 | FN(skb_load_bytes), \ |
| 3427 | FN(get_stackid), \ |
| 3428 | FN(csum_diff), \ |
| 3429 | FN(skb_get_tunnel_opt), \ |
| 3430 | FN(skb_set_tunnel_opt), \ |
| 3431 | FN(skb_change_proto), \ |
| 3432 | FN(skb_change_type), \ |
| 3433 | FN(skb_under_cgroup), \ |
| 3434 | FN(get_hash_recalc), \ |
| 3435 | FN(get_current_task), \ |
| 3436 | FN(probe_write_user), \ |
| 3437 | FN(current_task_under_cgroup), \ |
| 3438 | FN(skb_change_tail), \ |
| 3439 | FN(skb_pull_data), \ |
| 3440 | FN(csum_update), \ |
| 3441 | FN(set_hash_invalid), \ |
Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 3442 | FN(get_numa_node_id), \ |
Martin KaFai Lau | 17bedab | 2016-12-07 15:53:11 -0800 | [diff] [blame] | 3443 | FN(skb_change_head), \ |
Gianluca Borello | a5e8c07 | 2017-01-18 17:55:49 +0000 | [diff] [blame] | 3444 | FN(xdp_adjust_head), \ |
Chenbo Feng | 91b8270 | 2017-03-22 17:27:34 -0700 | [diff] [blame] | 3445 | FN(probe_read_str), \ |
Chenbo Feng | 6acc5c2 | 2017-03-22 17:27:35 -0700 | [diff] [blame] | 3446 | FN(get_socket_cookie), \ |
Daniel Borkmann | ded092c | 2017-06-11 00:50:47 +0200 | [diff] [blame] | 3447 | FN(get_socket_uid), \ |
Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 3448 | FN(set_hash), \ |
Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 3449 | FN(setsockopt), \ |
John Fastabend | 97f91a7 | 2017-07-17 09:29:18 -0700 | [diff] [blame] | 3450 | FN(skb_adjust_room), \ |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 3451 | FN(redirect_map), \ |
| 3452 | FN(sk_redirect_map), \ |
| 3453 | FN(sock_map_update), \ |
Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 3454 | FN(xdp_adjust_meta), \ |
Yonghong Song | 4bebdc7 | 2017-10-05 09:19:22 -0700 | [diff] [blame] | 3455 | FN(perf_event_read_value), \ |
Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 3456 | FN(perf_prog_read_value), \ |
Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 3457 | FN(getsockopt), \ |
Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 3458 | FN(override_return), \ |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 3459 | FN(sock_ops_cb_flags_set), \ |
John Fastabend | 2a10031 | 2018-03-18 12:57:15 -0700 | [diff] [blame] | 3460 | FN(msg_redirect_map), \ |
John Fastabend | 91843d5 | 2018-03-18 12:57:20 -0700 | [diff] [blame] | 3461 | FN(msg_apply_bytes), \ |
John Fastabend | 015632b | 2018-03-18 12:57:25 -0700 | [diff] [blame] | 3462 | FN(msg_cork_bytes), \ |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 3463 | FN(msg_pull_data), \ |
Nikita V. Shirokov | b32cc5b | 2018-04-17 21:42:13 -0700 | [diff] [blame] | 3464 | FN(bind), \ |
Eyal Birger | 12bed76 | 2018-04-24 17:50:29 +0300 | [diff] [blame] | 3465 | FN(xdp_adjust_tail), \ |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 3466 | FN(skb_get_xfrm_state), \ |
Daniel Borkmann | 4e1ec56 | 2018-05-04 01:08:15 +0200 | [diff] [blame] | 3467 | FN(get_stack), \ |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 3468 | FN(skb_load_bytes_relative), \ |
John Fastabend | 8111038 | 2018-05-14 10:00:17 -0700 | [diff] [blame] | 3469 | FN(fib_lookup), \ |
| 3470 | FN(sock_hash_update), \ |
| 3471 | FN(msg_redirect_hash), \ |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 3472 | FN(sk_redirect_hash), \ |
| 3473 | FN(lwt_push_encap), \ |
| 3474 | FN(lwt_seg6_store_bytes), \ |
| 3475 | FN(lwt_seg6_adjust_srh), \ |
Sean Young | f4364dc | 2018-05-27 12:24:09 +0100 | [diff] [blame] | 3476 | FN(lwt_seg6_action), \ |
| 3477 | FN(rc_repeat), \ |
Daniel Borkmann | cb20b08 | 2018-06-02 23:06:36 +0200 | [diff] [blame] | 3478 | FN(rc_keydown), \ |
Yonghong Song | bf6fa2c | 2018-06-03 15:59:41 -0700 | [diff] [blame] | 3479 | FN(skb_cgroup_id), \ |
Roman Gushchin | cd33943 | 2018-08-02 14:27:24 -0700 | [diff] [blame] | 3480 | FN(get_current_cgroup_id), \ |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 3481 | FN(get_local_storage), \ |
Andrey Ignatov | 7723628 | 2018-08-12 10:49:27 -0700 | [diff] [blame] | 3482 | FN(sk_select_reuseport), \ |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 3483 | FN(skb_ancestor_cgroup_id), \ |
| 3484 | FN(sk_lookup_tcp), \ |
| 3485 | FN(sk_lookup_udp), \ |
Mauricio Vasquez B | f1a2e44 | 2018-10-18 15:16:25 +0200 | [diff] [blame] | 3486 | FN(sk_release), \ |
| 3487 | FN(map_push_elem), \ |
| 3488 | FN(map_pop_elem), \ |
John Fastabend | 6fff607 | 2018-10-19 19:56:49 -0700 | [diff] [blame] | 3489 | FN(map_peek_elem), \ |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 3490 | FN(msg_push_data), \ |
Sean Young | 01d3240 | 2018-12-06 13:01:03 +0000 | [diff] [blame] | 3491 | FN(msg_pop_data), \ |
Alexei Starovoitov | d83525c | 2019-01-31 15:40:04 -0800 | [diff] [blame] | 3492 | FN(rc_pointer_rel), \ |
| 3493 | FN(spin_lock), \ |
Martin KaFai Lau | 46f8bc9 | 2019-02-09 23:22:20 -0800 | [diff] [blame] | 3494 | FN(spin_unlock), \ |
Martin KaFai Lau | 655a51e | 2019-02-09 23:22:24 -0800 | [diff] [blame] | 3495 | FN(sk_fullsock), \ |
brakmo | f7c917b | 2019-03-01 12:38:46 -0800 | [diff] [blame] | 3496 | FN(tcp_sock), \ |
Martin KaFai Lau | dbafd7d | 2019-03-12 10:23:04 -0700 | [diff] [blame] | 3497 | FN(skb_ecn_set_ce), \ |
Lorenz Bauer | edbf8c0 | 2019-03-22 09:54:01 +0800 | [diff] [blame] | 3498 | FN(get_listener_sock), \ |
Lorenz Bauer | 3990408 | 2019-03-22 09:54:02 +0800 | [diff] [blame] | 3499 | FN(skc_lookup_tcp), \ |
Andrey Ignatov | 808649f | 2019-02-27 13:28:48 -0800 | [diff] [blame] | 3500 | FN(tcp_check_syncookie), \ |
Andrey Ignatov | 1d11b30 | 2019-02-28 19:22:15 -0800 | [diff] [blame] | 3501 | FN(sysctl_get_name), \ |
Andrey Ignatov | 4e63acd | 2019-03-07 18:38:43 -0800 | [diff] [blame] | 3502 | FN(sysctl_get_current_value), \ |
| 3503 | FN(sysctl_get_new_value), \ |
Andrey Ignatov | d7a4cb9 | 2019-03-18 17:55:26 -0700 | [diff] [blame] | 3504 | FN(sysctl_set_new_value), \ |
| 3505 | FN(strtol), \ |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 3506 | FN(strtoul), \ |
| 3507 | FN(sk_storage_get), \ |
Yonghong Song | 8b401f9 | 2019-05-23 14:47:45 -0700 | [diff] [blame] | 3508 | FN(sk_storage_delete), \ |
Petar Penkov | 70d6624 | 2019-07-29 09:59:15 -0700 | [diff] [blame] | 3509 | FN(send_signal), \ |
Alexei Starovoitov | a7658e1 | 2019-10-15 20:25:04 -0700 | [diff] [blame] | 3510 | FN(tcp_gen_syncookie), \ |
Daniel Borkmann | 6ae08ae | 2019-11-02 00:17:59 +0100 | [diff] [blame] | 3511 | FN(skb_output), \ |
| 3512 | FN(probe_read_user), \ |
| 3513 | FN(probe_read_kernel), \ |
| 3514 | FN(probe_read_user_str), \ |
Martin KaFai Lau | 206057f | 2020-01-08 16:45:51 -0800 | [diff] [blame] | 3515 | FN(probe_read_kernel_str), \ |
Yonghong Song | 8482941 | 2020-01-14 19:50:02 -0800 | [diff] [blame] | 3516 | FN(tcp_send_ack), \ |
Martin KaFai Lau | 5576b99 | 2020-01-22 15:36:46 -0800 | [diff] [blame] | 3517 | FN(send_signal_thread), \ |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3518 | FN(jiffies64), \ |
Carlos Neira | b4490c5 | 2020-03-04 17:41:56 -0300 | [diff] [blame] | 3519 | FN(read_branch_records), \ |
Eelco Chaudron | d831ee8 | 2020-03-06 08:59:23 +0000 | [diff] [blame] | 3520 | FN(get_ns_current_pid_tgid), \ |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 3521 | FN(xdp_output), \ |
Daniel Borkmann | 0f09abd | 2020-03-27 16:58:54 +0100 | [diff] [blame] | 3522 | FN(get_netns_cookie), \ |
Joe Stringer | cf7fbe6 | 2020-03-29 15:53:38 -0700 | [diff] [blame] | 3523 | FN(get_current_ancestor_cgroup_id), \ |
Maciej Żenczykowski | 71d1921 | 2020-04-26 09:15:25 -0700 | [diff] [blame] | 3524 | FN(sk_assign), \ |
Yonghong Song | 492e639 | 2020-05-09 10:59:14 -0700 | [diff] [blame] | 3525 | FN(ktime_get_boot_ns), \ |
| 3526 | FN(seq_printf), \ |
Andrey Ignatov | f307fa2 | 2020-05-14 13:03:47 -0700 | [diff] [blame] | 3527 | FN(seq_write), \ |
| 3528 | FN(sk_cgroup_id), \ |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3529 | FN(sk_ancestor_cgroup_id), \ |
| 3530 | FN(ringbuf_output), \ |
| 3531 | FN(ringbuf_reserve), \ |
| 3532 | FN(ringbuf_submit), \ |
| 3533 | FN(ringbuf_discard), \ |
Daniel Borkmann | 7cdec54 | 2020-06-02 16:58:33 +0200 | [diff] [blame] | 3534 | FN(ringbuf_query), \ |
Yonghong Song | af7ec13 | 2020-06-23 16:08:09 -0700 | [diff] [blame] | 3535 | FN(csum_level), \ |
Yonghong Song | 478cfbd | 2020-06-23 16:08:11 -0700 | [diff] [blame] | 3536 | FN(skc_to_tcp6_sock), \ |
| 3537 | FN(skc_to_tcp_sock), \ |
| 3538 | FN(skc_to_tcp_timewait_sock), \ |
Yonghong Song | 0d4fad3 | 2020-06-23 16:08:15 -0700 | [diff] [blame] | 3539 | FN(skc_to_tcp_request_sock), \ |
Song Liu | fa28dcb | 2020-06-29 23:28:44 -0700 | [diff] [blame] | 3540 | FN(skc_to_udp6_sock), \ |
| 3541 | FN(get_task_stack), \ |
| 3542 | /* */ |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 3543 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 3544 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
| 3545 | * function eBPF program intends to call |
| 3546 | */ |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 3547 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 3548 | enum bpf_func_id { |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 3549 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN) |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 3550 | __BPF_FUNC_MAX_ID, |
| 3551 | }; |
Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 3552 | #undef __BPF_ENUM_FN |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 3553 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 3554 | /* All flags used by eBPF helper functions, placed here. */ |
| 3555 | |
| 3556 | /* BPF_FUNC_skb_store_bytes flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3557 | enum { |
| 3558 | BPF_F_RECOMPUTE_CSUM = (1ULL << 0), |
| 3559 | BPF_F_INVALIDATE_HASH = (1ULL << 1), |
| 3560 | }; |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 3561 | |
| 3562 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. |
| 3563 | * First 4 bits are for passing the header field size. |
| 3564 | */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3565 | enum { |
| 3566 | BPF_F_HDR_FIELD_MASK = 0xfULL, |
| 3567 | }; |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 3568 | |
| 3569 | /* BPF_FUNC_l4_csum_replace flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3570 | enum { |
| 3571 | BPF_F_PSEUDO_HDR = (1ULL << 4), |
| 3572 | BPF_F_MARK_MANGLED_0 = (1ULL << 5), |
| 3573 | BPF_F_MARK_ENFORCE = (1ULL << 6), |
| 3574 | }; |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 3575 | |
| 3576 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3577 | enum { |
| 3578 | BPF_F_INGRESS = (1ULL << 0), |
| 3579 | }; |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 3580 | |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 3581 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3582 | enum { |
| 3583 | BPF_F_TUNINFO_IPV6 = (1ULL << 0), |
| 3584 | }; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 3585 | |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 3586 | /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3587 | enum { |
| 3588 | BPF_F_SKIP_FIELD_MASK = 0xffULL, |
| 3589 | BPF_F_USER_STACK = (1ULL << 8), |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 3590 | /* flags used by BPF_FUNC_get_stackid only. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3591 | BPF_F_FAST_STACK_CMP = (1ULL << 9), |
| 3592 | BPF_F_REUSE_STACKID = (1ULL << 10), |
Yonghong Song | c195651e | 2018-04-28 22:28:08 -0700 | [diff] [blame] | 3593 | /* flags used by BPF_FUNC_get_stack only. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3594 | BPF_F_USER_BUILD_ID = (1ULL << 11), |
| 3595 | }; |
Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 3596 | |
Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 3597 | /* BPF_FUNC_skb_set_tunnel_key flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3598 | enum { |
| 3599 | BPF_F_ZERO_CSUM_TX = (1ULL << 1), |
| 3600 | BPF_F_DONT_FRAGMENT = (1ULL << 2), |
| 3601 | BPF_F_SEQ_NUMBER = (1ULL << 3), |
| 3602 | }; |
Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 3603 | |
Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 3604 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and |
| 3605 | * BPF_FUNC_perf_event_read_value flags. |
| 3606 | */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3607 | enum { |
| 3608 | BPF_F_INDEX_MASK = 0xffffffffULL, |
| 3609 | BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 3610 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3611 | BPF_F_CTXLEN_MASK = (0xfffffULL << 32), |
| 3612 | }; |
Daniel Borkmann | 1e33759 | 2016-04-18 21:01:23 +0200 | [diff] [blame] | 3613 | |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 3614 | /* Current network namespace */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3615 | enum { |
| 3616 | BPF_F_CURRENT_NETNS = (-1L), |
| 3617 | }; |
Joe Stringer | f71c614 | 2018-11-30 15:32:20 -0800 | [diff] [blame] | 3618 | |
Daniel Borkmann | 7cdec54 | 2020-06-02 16:58:33 +0200 | [diff] [blame] | 3619 | /* BPF_FUNC_csum_level level values. */ |
| 3620 | enum { |
| 3621 | BPF_CSUM_LEVEL_QUERY, |
| 3622 | BPF_CSUM_LEVEL_INC, |
| 3623 | BPF_CSUM_LEVEL_DEC, |
| 3624 | BPF_CSUM_LEVEL_RESET, |
| 3625 | }; |
| 3626 | |
Willem de Bruijn | 2278f6c | 2019-03-22 14:32:55 -0400 | [diff] [blame] | 3627 | /* BPF_FUNC_skb_adjust_room flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3628 | enum { |
| 3629 | BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), |
| 3630 | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), |
| 3631 | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), |
| 3632 | BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), |
| 3633 | BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), |
Daniel Borkmann | 836e66c | 2020-06-02 16:58:32 +0200 | [diff] [blame] | 3634 | BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3635 | }; |
Willem de Bruijn | 2278f6c | 2019-03-22 14:32:55 -0400 | [diff] [blame] | 3636 | |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3637 | enum { |
| 3638 | BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, |
| 3639 | BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, |
| 3640 | }; |
Alan Maguire | 58dfc90 | 2019-04-09 15:06:41 +0100 | [diff] [blame] | 3641 | |
Alan Maguire | bfb35c2 | 2019-04-12 12:27:34 +0100 | [diff] [blame] | 3642 | #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ |
Alan Maguire | 58dfc90 | 2019-04-09 15:06:41 +0100 | [diff] [blame] | 3643 | BPF_ADJ_ROOM_ENCAP_L2_MASK) \ |
| 3644 | << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) |
Willem de Bruijn | 868d523 | 2019-03-22 14:32:56 -0400 | [diff] [blame] | 3645 | |
Andrey Ignatov | 808649f | 2019-02-27 13:28:48 -0800 | [diff] [blame] | 3646 | /* BPF_FUNC_sysctl_get_name flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3647 | enum { |
| 3648 | BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), |
| 3649 | }; |
Andrey Ignatov | 808649f | 2019-02-27 13:28:48 -0800 | [diff] [blame] | 3650 | |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 3651 | /* BPF_FUNC_sk_storage_get flags */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3652 | enum { |
| 3653 | BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0), |
| 3654 | }; |
Martin KaFai Lau | 6ac99e8 | 2019-04-26 16:39:39 -0700 | [diff] [blame] | 3655 | |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3656 | /* BPF_FUNC_read_branch_records flags. */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 3657 | enum { |
| 3658 | BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), |
| 3659 | }; |
Daniel Xu | fff7b64 | 2020-02-17 19:04:31 -0800 | [diff] [blame] | 3660 | |
Andrii Nakryiko | 457f443 | 2020-05-29 00:54:20 -0700 | [diff] [blame] | 3661 | /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and |
| 3662 | * BPF_FUNC_bpf_ringbuf_output flags. |
| 3663 | */ |
| 3664 | enum { |
| 3665 | BPF_RB_NO_WAKEUP = (1ULL << 0), |
| 3666 | BPF_RB_FORCE_WAKEUP = (1ULL << 1), |
| 3667 | }; |
| 3668 | |
| 3669 | /* BPF_FUNC_bpf_ringbuf_query flags */ |
| 3670 | enum { |
| 3671 | BPF_RB_AVAIL_DATA = 0, |
| 3672 | BPF_RB_RING_SIZE = 1, |
| 3673 | BPF_RB_CONS_POS = 2, |
| 3674 | BPF_RB_PROD_POS = 3, |
| 3675 | }; |
| 3676 | |
| 3677 | /* BPF ring buffer constants */ |
| 3678 | enum { |
| 3679 | BPF_RINGBUF_BUSY_BIT = (1U << 31), |
| 3680 | BPF_RINGBUF_DISCARD_BIT = (1U << 30), |
| 3681 | BPF_RINGBUF_HDR_SZ = 8, |
| 3682 | }; |
| 3683 | |
Jakub Sitnicki | e9ddbb7 | 2020-07-17 12:35:23 +0200 | [diff] [blame] | 3684 | /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ |
| 3685 | enum { |
| 3686 | BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), |
| 3687 | BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), |
| 3688 | }; |
| 3689 | |
Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 3690 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ |
| 3691 | enum bpf_adj_room_mode { |
| 3692 | BPF_ADJ_ROOM_NET, |
Willem de Bruijn | 14aa319 | 2019-03-22 14:32:54 -0400 | [diff] [blame] | 3693 | BPF_ADJ_ROOM_MAC, |
Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 3694 | }; |
| 3695 | |
Daniel Borkmann | 4e1ec56 | 2018-05-04 01:08:15 +0200 | [diff] [blame] | 3696 | /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ |
| 3697 | enum bpf_hdr_start_off { |
| 3698 | BPF_HDR_START_MAC, |
| 3699 | BPF_HDR_START_NET, |
| 3700 | }; |
| 3701 | |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 3702 | /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ |
| 3703 | enum bpf_lwt_encap_mode { |
| 3704 | BPF_LWT_ENCAP_SEG6, |
Peter Oskolkov | 3e0bd37 | 2019-02-13 11:53:35 -0800 | [diff] [blame] | 3705 | BPF_LWT_ENCAP_SEG6_INLINE, |
| 3706 | BPF_LWT_ENCAP_IP, |
Mathieu Xhonneux | fe94cc2 | 2018-05-20 14:58:14 +0100 | [diff] [blame] | 3707 | }; |
| 3708 | |
Daniel Borkmann | b7df9ada | 2018-12-01 01:18:53 +0100 | [diff] [blame] | 3709 | #define __bpf_md_ptr(type, name) \ |
| 3710 | union { \ |
| 3711 | type name; \ |
| 3712 | __u64 :64; \ |
| 3713 | } __attribute__((aligned(8))) |
| 3714 | |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 3715 | /* user accessible mirror of in-kernel sk_buff. |
| 3716 | * new fields can only be added to the end of this structure |
| 3717 | */ |
| 3718 | struct __sk_buff { |
| 3719 | __u32 len; |
| 3720 | __u32 pkt_type; |
| 3721 | __u32 mark; |
| 3722 | __u32 queue_mapping; |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 3723 | __u32 protocol; |
| 3724 | __u32 vlan_present; |
| 3725 | __u32 vlan_tci; |
Michal Sekletar | 27cd545 | 2015-03-24 14:48:41 +0100 | [diff] [blame] | 3726 | __u32 vlan_proto; |
Daniel Borkmann | bcad571 | 2015-04-03 20:52:24 +0200 | [diff] [blame] | 3727 | __u32 priority; |
Alexei Starovoitov | 37e82c2 | 2015-05-27 15:30:39 -0700 | [diff] [blame] | 3728 | __u32 ingress_ifindex; |
| 3729 | __u32 ifindex; |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 3730 | __u32 tc_index; |
| 3731 | __u32 cb[5]; |
Daniel Borkmann | ba7591d | 2015-08-01 00:46:29 +0200 | [diff] [blame] | 3732 | __u32 hash; |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 3733 | __u32 tc_classid; |
Alexei Starovoitov | 969bf05 | 2016-05-05 19:49:10 -0700 | [diff] [blame] | 3734 | __u32 data; |
| 3735 | __u32 data_end; |
Daniel Borkmann | b1d9fc4 | 2017-04-19 23:01:17 +0200 | [diff] [blame] | 3736 | __u32 napi_id; |
John Fastabend | 8a31db5 | 2017-08-15 22:33:09 -0700 | [diff] [blame] | 3737 | |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 3738 | /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ |
John Fastabend | 8a31db5 | 2017-08-15 22:33:09 -0700 | [diff] [blame] | 3739 | __u32 family; |
| 3740 | __u32 remote_ip4; /* Stored in network byte order */ |
| 3741 | __u32 local_ip4; /* Stored in network byte order */ |
| 3742 | __u32 remote_ip6[4]; /* Stored in network byte order */ |
| 3743 | __u32 local_ip6[4]; /* Stored in network byte order */ |
| 3744 | __u32 remote_port; /* Stored in network byte order */ |
| 3745 | __u32 local_port; /* stored in host byte order */ |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 3746 | /* ... here. */ |
| 3747 | |
| 3748 | __u32 data_meta; |
Daniel Borkmann | b7df9ada | 2018-12-01 01:18:53 +0100 | [diff] [blame] | 3749 | __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); |
Vlad Dumitrescu | f11216b | 2018-11-22 14:39:16 -0500 | [diff] [blame] | 3750 | __u64 tstamp; |
Petar Penkov | e3da08d | 2018-12-02 20:18:19 -0500 | [diff] [blame] | 3751 | __u32 wire_len; |
Eric Dumazet | d9ff286 | 2019-01-23 09:22:27 -0800 | [diff] [blame] | 3752 | __u32 gso_segs; |
Martin KaFai Lau | 46f8bc9 | 2019-02-09 23:22:20 -0800 | [diff] [blame] | 3753 | __bpf_md_ptr(struct bpf_sock *, sk); |
Willem de Bruijn | cf62089 | 2020-03-03 15:05:01 -0500 | [diff] [blame] | 3754 | __u32 gso_size; |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 3755 | }; |
| 3756 | |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 3757 | struct bpf_tunnel_key { |
| 3758 | __u32 tunnel_id; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 3759 | union { |
| 3760 | __u32 remote_ipv4; |
| 3761 | __u32 remote_ipv6[4]; |
| 3762 | }; |
| 3763 | __u8 tunnel_tos; |
| 3764 | __u8 tunnel_ttl; |
Daniel Borkmann | 1fbc2e0 | 2018-06-02 23:06:37 +0200 | [diff] [blame] | 3765 | __u16 tunnel_ext; /* Padding, future use. */ |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 3766 | __u32 tunnel_label; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 3767 | }; |
| 3768 | |
Eyal Birger | 12bed76 | 2018-04-24 17:50:29 +0300 | [diff] [blame] | 3769 | /* user accessible mirror of in-kernel xfrm_state. |
| 3770 | * new fields can only be added to the end of this structure |
| 3771 | */ |
| 3772 | struct bpf_xfrm_state { |
| 3773 | __u32 reqid; |
| 3774 | __u32 spi; /* Stored in network byte order */ |
| 3775 | __u16 family; |
Daniel Borkmann | 1fbc2e0 | 2018-06-02 23:06:37 +0200 | [diff] [blame] | 3776 | __u16 ext; /* Padding, future use. */ |
Eyal Birger | 12bed76 | 2018-04-24 17:50:29 +0300 | [diff] [blame] | 3777 | union { |
| 3778 | __u32 remote_ipv4; /* Stored in network byte order */ |
| 3779 | __u32 remote_ipv6[4]; /* Stored in network byte order */ |
| 3780 | }; |
| 3781 | }; |
| 3782 | |
Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 3783 | /* Generic BPF return codes which all BPF program types may support. |
| 3784 | * The values are binary compatible with their TC_ACT_* counter-part to |
| 3785 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT |
| 3786 | * programs. |
| 3787 | * |
| 3788 | * XDP is handled seprately, see XDP_*. |
| 3789 | */ |
| 3790 | enum bpf_ret_code { |
| 3791 | BPF_OK = 0, |
| 3792 | /* 1 reserved */ |
| 3793 | BPF_DROP = 2, |
| 3794 | /* 3-6 reserved */ |
| 3795 | BPF_REDIRECT = 7, |
Peter Oskolkov | 3e0bd37 | 2019-02-13 11:53:35 -0800 | [diff] [blame] | 3796 | /* >127 are reserved for prog type specific return codes. |
| 3797 | * |
| 3798 | * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and |
| 3799 | * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been |
| 3800 | * changed and should be routed based on its new L3 header. |
| 3801 | * (This is an L3 redirect, as opposed to L2 redirect |
| 3802 | * represented by BPF_REDIRECT above). |
| 3803 | */ |
| 3804 | BPF_LWT_REROUTE = 128, |
Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 3805 | }; |
| 3806 | |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 3807 | struct bpf_sock { |
| 3808 | __u32 bound_dev_if; |
David Ahern | aa4c103 | 2016-12-01 08:48:06 -0800 | [diff] [blame] | 3809 | __u32 family; |
| 3810 | __u32 type; |
| 3811 | __u32 protocol; |
David Ahern | 482dca9 | 2017-08-31 15:05:44 -0700 | [diff] [blame] | 3812 | __u32 mark; |
| 3813 | __u32 priority; |
Martin KaFai Lau | aa65d69 | 2019-02-09 23:22:21 -0800 | [diff] [blame] | 3814 | /* IP address also allows 1 and 2 bytes access */ |
| 3815 | __u32 src_ip4; |
| 3816 | __u32 src_ip6[4]; |
| 3817 | __u32 src_port; /* host byte order */ |
| 3818 | __u32 dst_port; /* network byte order */ |
| 3819 | __u32 dst_ip4; |
| 3820 | __u32 dst_ip6[4]; |
| 3821 | __u32 state; |
Amritha Nambiar | c3c16f2 | 2020-05-26 17:34:36 -0700 | [diff] [blame] | 3822 | __s32 rx_queue_mapping; |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 3823 | }; |
| 3824 | |
Martin KaFai Lau | 655a51e | 2019-02-09 23:22:24 -0800 | [diff] [blame] | 3825 | struct bpf_tcp_sock { |
| 3826 | __u32 snd_cwnd; /* Sending congestion window */ |
| 3827 | __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ |
| 3828 | __u32 rtt_min; |
| 3829 | __u32 snd_ssthresh; /* Slow start size threshold */ |
| 3830 | __u32 rcv_nxt; /* What we want to receive next */ |
| 3831 | __u32 snd_nxt; /* Next sequence we send */ |
| 3832 | __u32 snd_una; /* First byte we want an ack for */ |
| 3833 | __u32 mss_cache; /* Cached effective mss, not including SACKS */ |
| 3834 | __u32 ecn_flags; /* ECN status bits. */ |
| 3835 | __u32 rate_delivered; /* saved rate sample: packets delivered */ |
| 3836 | __u32 rate_interval_us; /* saved rate sample: time elapsed */ |
| 3837 | __u32 packets_out; /* Packets which are "in flight" */ |
| 3838 | __u32 retrans_out; /* Retransmitted packets out */ |
| 3839 | __u32 total_retrans; /* Total retransmits for entire connection */ |
| 3840 | __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn |
| 3841 | * total number of segments in. |
| 3842 | */ |
| 3843 | __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn |
| 3844 | * total number of data segments in. |
| 3845 | */ |
| 3846 | __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut |
| 3847 | * The total number of segments sent. |
| 3848 | */ |
| 3849 | __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut |
| 3850 | * total number of data segments sent. |
| 3851 | */ |
| 3852 | __u32 lost_out; /* Lost packets */ |
| 3853 | __u32 sacked_out; /* SACK'd packets */ |
| 3854 | __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived |
| 3855 | * sum(delta(rcv_nxt)), or how many bytes |
| 3856 | * were acked. |
| 3857 | */ |
| 3858 | __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked |
| 3859 | * sum(delta(snd_una)), or how many bytes |
| 3860 | * were acked. |
| 3861 | */ |
Stanislav Fomichev | 0357746 | 2019-07-02 09:13:58 -0700 | [diff] [blame] | 3862 | __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups |
| 3863 | * total number of DSACK blocks received |
| 3864 | */ |
| 3865 | __u32 delivered; /* Total data packets delivered incl. rexmits */ |
| 3866 | __u32 delivered_ce; /* Like the above but only ECE marked packets */ |
Stanislav Fomichev | c2cb5e8 | 2019-07-02 09:13:59 -0700 | [diff] [blame] | 3867 | __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ |
Martin KaFai Lau | 655a51e | 2019-02-09 23:22:24 -0800 | [diff] [blame] | 3868 | }; |
| 3869 | |
Joe Stringer | 6acc9b4 | 2018-10-02 13:35:36 -0700 | [diff] [blame] | 3870 | struct bpf_sock_tuple { |
| 3871 | union { |
| 3872 | struct { |
| 3873 | __be32 saddr; |
| 3874 | __be32 daddr; |
| 3875 | __be16 sport; |
| 3876 | __be16 dport; |
| 3877 | } ipv4; |
| 3878 | struct { |
| 3879 | __be32 saddr[4]; |
| 3880 | __be32 daddr[4]; |
| 3881 | __be16 sport; |
| 3882 | __be16 dport; |
| 3883 | } ipv6; |
| 3884 | }; |
| 3885 | }; |
| 3886 | |
Jonathan Lemon | fada7fd | 2019-06-06 13:59:40 -0700 | [diff] [blame] | 3887 | struct bpf_xdp_sock { |
| 3888 | __u32 queue_id; |
| 3889 | }; |
| 3890 | |
Martin KaFai Lau | 17bedab | 2016-12-07 15:53:11 -0800 | [diff] [blame] | 3891 | #define XDP_PACKET_HEADROOM 256 |
| 3892 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 3893 | /* User return codes for XDP prog type. |
| 3894 | * A valid XDP program must return one of these defined values. All other |
Daniel Borkmann | 9beb8be | 2017-09-09 01:40:35 +0200 | [diff] [blame] | 3895 | * return codes are reserved for future use. Unknown return codes will |
| 3896 | * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 3897 | */ |
| 3898 | enum xdp_action { |
| 3899 | XDP_ABORTED = 0, |
| 3900 | XDP_DROP, |
| 3901 | XDP_PASS, |
Brenden Blanco | 6ce96ca | 2016-07-19 12:16:53 -0700 | [diff] [blame] | 3902 | XDP_TX, |
John Fastabend | 814abfa | 2017-07-17 09:27:07 -0700 | [diff] [blame] | 3903 | XDP_REDIRECT, |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 3904 | }; |
| 3905 | |
| 3906 | /* user accessible metadata for XDP packet hook |
| 3907 | * new fields must be added to the end of this structure |
| 3908 | */ |
| 3909 | struct xdp_md { |
| 3910 | __u32 data; |
| 3911 | __u32 data_end; |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 3912 | __u32 data_meta; |
Jesper Dangaard Brouer | daaf24c | 2018-01-11 17:39:09 +0100 | [diff] [blame] | 3913 | /* Below access go through struct xdp_rxq_info */ |
Jesper Dangaard Brouer | 02dd329 | 2018-01-03 11:26:14 +0100 | [diff] [blame] | 3914 | __u32 ingress_ifindex; /* rxq->dev->ifindex */ |
| 3915 | __u32 rx_queue_index; /* rxq->queue_index */ |
David Ahern | 64b5902 | 2020-05-29 16:07:14 -0600 | [diff] [blame] | 3916 | |
| 3917 | __u32 egress_ifindex; /* txq->dev->ifindex */ |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 3918 | }; |
| 3919 | |
Jesper Dangaard Brouer | 281920b | 2020-06-09 15:31:46 +0200 | [diff] [blame] | 3920 | /* DEVMAP map-value layout |
| 3921 | * |
| 3922 | * The struct data-layout of map-value is a configuration interface. |
| 3923 | * New members can only be added to the end of this structure. |
| 3924 | */ |
| 3925 | struct bpf_devmap_val { |
| 3926 | __u32 ifindex; /* device index */ |
| 3927 | union { |
| 3928 | int fd; /* prog fd on map write */ |
| 3929 | __u32 id; /* prog id on map read */ |
| 3930 | } bpf_prog; |
| 3931 | }; |
| 3932 | |
Lorenzo Bianconi | 644bfe5 | 2020-07-14 15:56:37 +0200 | [diff] [blame] | 3933 | /* CPUMAP map-value layout |
| 3934 | * |
| 3935 | * The struct data-layout of map-value is a configuration interface. |
| 3936 | * New members can only be added to the end of this structure. |
| 3937 | */ |
| 3938 | struct bpf_cpumap_val { |
| 3939 | __u32 qsize; /* queue size to remote target CPU */ |
Lorenzo Bianconi | 9216477 | 2020-07-14 15:56:38 +0200 | [diff] [blame] | 3940 | union { |
| 3941 | int fd; /* prog fd on map write */ |
| 3942 | __u32 id; /* prog id on map read */ |
| 3943 | } bpf_prog; |
Lorenzo Bianconi | 644bfe5 | 2020-07-14 15:56:37 +0200 | [diff] [blame] | 3944 | }; |
| 3945 | |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 3946 | enum sk_action { |
John Fastabend | bfa64075 | 2017-10-27 09:45:53 -0700 | [diff] [blame] | 3947 | SK_DROP = 0, |
| 3948 | SK_PASS, |
John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 3949 | }; |
| 3950 | |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 3951 | /* user accessible metadata for SK_MSG packet hook, new fields must |
| 3952 | * be added to the end of this structure |
| 3953 | */ |
| 3954 | struct sk_msg_md { |
Daniel Borkmann | b7df9ada | 2018-12-01 01:18:53 +0100 | [diff] [blame] | 3955 | __bpf_md_ptr(void *, data); |
| 3956 | __bpf_md_ptr(void *, data_end); |
John Fastabend | 303def3 | 2018-05-17 14:16:58 -0700 | [diff] [blame] | 3957 | |
| 3958 | __u32 family; |
| 3959 | __u32 remote_ip4; /* Stored in network byte order */ |
| 3960 | __u32 local_ip4; /* Stored in network byte order */ |
| 3961 | __u32 remote_ip6[4]; /* Stored in network byte order */ |
| 3962 | __u32 local_ip6[4]; /* Stored in network byte order */ |
| 3963 | __u32 remote_port; /* Stored in network byte order */ |
| 3964 | __u32 local_port; /* stored in host byte order */ |
John Fastabend | 3bdbd02 | 2018-12-16 15:47:04 -0800 | [diff] [blame] | 3965 | __u32 size; /* Total size of sk_msg */ |
John Fastabend | 13d70f5 | 2020-05-24 09:51:15 -0700 | [diff] [blame] | 3966 | |
| 3967 | __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ |
John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 3968 | }; |
| 3969 | |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 3970 | struct sk_reuseport_md { |
| 3971 | /* |
| 3972 | * Start of directly accessible data. It begins from |
| 3973 | * the tcp/udp header. |
| 3974 | */ |
Daniel Borkmann | b7df9ada | 2018-12-01 01:18:53 +0100 | [diff] [blame] | 3975 | __bpf_md_ptr(void *, data); |
| 3976 | /* End of directly accessible data */ |
| 3977 | __bpf_md_ptr(void *, data_end); |
Martin KaFai Lau | 2dbb9b9 | 2018-08-08 01:01:25 -0700 | [diff] [blame] | 3978 | /* |
| 3979 | * Total length of packet (starting from the tcp/udp header). |
| 3980 | * Note that the directly accessible bytes (data_end - data) |
| 3981 | * could be less than this "len". Those bytes could be |
| 3982 | * indirectly read by a helper "bpf_skb_load_bytes()". |
| 3983 | */ |
| 3984 | __u32 len; |
| 3985 | /* |
| 3986 | * Eth protocol in the mac header (network byte order). e.g. |
| 3987 | * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) |
| 3988 | */ |
| 3989 | __u32 eth_protocol; |
| 3990 | __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ |
| 3991 | __u32 bind_inany; /* Is sock bound to an INANY address? */ |
| 3992 | __u32 hash; /* A hash of the packet 4 tuples */ |
| 3993 | }; |
| 3994 | |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 3995 | #define BPF_TAG_SIZE 8 |
| 3996 | |
| 3997 | struct bpf_prog_info { |
| 3998 | __u32 type; |
| 3999 | __u32 id; |
| 4000 | __u8 tag[BPF_TAG_SIZE]; |
| 4001 | __u32 jited_prog_len; |
| 4002 | __u32 xlated_prog_len; |
| 4003 | __aligned_u64 jited_prog_insns; |
| 4004 | __aligned_u64 xlated_prog_insns; |
Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 4005 | __u64 load_time; /* ns since boottime */ |
| 4006 | __u32 created_by_uid; |
| 4007 | __u32 nr_map_ids; |
| 4008 | __aligned_u64 map_ids; |
Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 4009 | char name[BPF_OBJ_NAME_LEN]; |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 4010 | __u32 ifindex; |
Jiri Olsa | b85fab0 | 2018-04-25 19:41:06 +0200 | [diff] [blame] | 4011 | __u32 gpl_compatible:1; |
Baruch Siach | 0472301 | 2019-06-28 07:08:45 +0300 | [diff] [blame] | 4012 | __u32 :31; /* alignment pad */ |
Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 4013 | __u64 netns_dev; |
| 4014 | __u64 netns_ino; |
Sandipan Das | dbecd73 | 2018-05-24 12:26:48 +0530 | [diff] [blame] | 4015 | __u32 nr_jited_ksyms; |
Sandipan Das | 815581c | 2018-05-24 12:26:52 +0530 | [diff] [blame] | 4016 | __u32 nr_jited_func_lens; |
Sandipan Das | dbecd73 | 2018-05-24 12:26:48 +0530 | [diff] [blame] | 4017 | __aligned_u64 jited_ksyms; |
Sandipan Das | 815581c | 2018-05-24 12:26:52 +0530 | [diff] [blame] | 4018 | __aligned_u64 jited_func_lens; |
Yonghong Song | 838e969 | 2018-11-19 15:29:11 -0800 | [diff] [blame] | 4019 | __u32 btf_id; |
| 4020 | __u32 func_info_rec_size; |
| 4021 | __aligned_u64 func_info; |
Yonghong Song | 11d8b82 | 2018-12-10 14:14:08 -0800 | [diff] [blame] | 4022 | __u32 nr_func_info; |
| 4023 | __u32 nr_line_info; |
Martin KaFai Lau | c454a46 | 2018-12-07 16:42:25 -0800 | [diff] [blame] | 4024 | __aligned_u64 line_info; |
| 4025 | __aligned_u64 jited_line_info; |
Yonghong Song | 11d8b82 | 2018-12-10 14:14:08 -0800 | [diff] [blame] | 4026 | __u32 nr_jited_line_info; |
Martin KaFai Lau | c454a46 | 2018-12-07 16:42:25 -0800 | [diff] [blame] | 4027 | __u32 line_info_rec_size; |
| 4028 | __u32 jited_line_info_rec_size; |
Song Liu | c872bdb | 2018-12-12 09:37:46 -0800 | [diff] [blame] | 4029 | __u32 nr_prog_tags; |
| 4030 | __aligned_u64 prog_tags; |
Alexei Starovoitov | 5f8f8b9 | 2019-02-25 14:28:40 -0800 | [diff] [blame] | 4031 | __u64 run_time_ns; |
| 4032 | __u64 run_cnt; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 4033 | } __attribute__((aligned(8))); |
| 4034 | |
| 4035 | struct bpf_map_info { |
| 4036 | __u32 type; |
| 4037 | __u32 id; |
| 4038 | __u32 key_size; |
| 4039 | __u32 value_size; |
| 4040 | __u32 max_entries; |
| 4041 | __u32 map_flags; |
Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 4042 | char name[BPF_OBJ_NAME_LEN]; |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 4043 | __u32 ifindex; |
Martin KaFai Lau | 85d33df | 2020-01-08 16:35:05 -0800 | [diff] [blame] | 4044 | __u32 btf_vmlinux_value_type_id; |
Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 4045 | __u64 netns_dev; |
| 4046 | __u64 netns_ino; |
Martin KaFai Lau | 78958fc | 2018-05-04 14:49:51 -0700 | [diff] [blame] | 4047 | __u32 btf_id; |
Martin KaFai Lau | 9b2cf32 | 2018-05-22 14:57:21 -0700 | [diff] [blame] | 4048 | __u32 btf_key_type_id; |
| 4049 | __u32 btf_value_type_id; |
Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 4050 | } __attribute__((aligned(8))); |
| 4051 | |
Martin KaFai Lau | 62dab84 | 2018-05-04 14:49:52 -0700 | [diff] [blame] | 4052 | struct bpf_btf_info { |
| 4053 | __aligned_u64 btf; |
| 4054 | __u32 btf_size; |
| 4055 | __u32 id; |
| 4056 | } __attribute__((aligned(8))); |
| 4057 | |
Andrii Nakryiko | f2e10bf | 2020-04-28 17:16:08 -0700 | [diff] [blame] | 4058 | struct bpf_link_info { |
| 4059 | __u32 type; |
| 4060 | __u32 id; |
| 4061 | __u32 prog_id; |
| 4062 | union { |
| 4063 | struct { |
| 4064 | __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ |
| 4065 | __u32 tp_name_len; /* in/out: tp_name buffer len */ |
| 4066 | } raw_tracepoint; |
| 4067 | struct { |
| 4068 | __u32 attach_type; |
| 4069 | } tracing; |
| 4070 | struct { |
| 4071 | __u64 cgroup_id; |
| 4072 | __u32 attach_type; |
| 4073 | } cgroup; |
Jakub Sitnicki | 7f045a4 | 2020-05-31 10:28:38 +0200 | [diff] [blame] | 4074 | struct { |
| 4075 | __u32 netns_ino; |
| 4076 | __u32 attach_type; |
| 4077 | } netns; |
Andrii Nakryiko | c1931c9 | 2020-07-21 23:45:59 -0700 | [diff] [blame] | 4078 | struct { |
| 4079 | __u32 ifindex; |
| 4080 | } xdp; |
Andrii Nakryiko | f2e10bf | 2020-04-28 17:16:08 -0700 | [diff] [blame] | 4081 | }; |
| 4082 | } __attribute__((aligned(8))); |
| 4083 | |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 4084 | /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed |
| 4085 | * by user and intended to be used by socket (e.g. to bind to, depends on |
Randy Dunlap | bfdfa51 | 2020-07-15 18:29:11 -0700 | [diff] [blame] | 4086 | * attach type). |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 4087 | */ |
| 4088 | struct bpf_sock_addr { |
| 4089 | __u32 user_family; /* Allows 4-byte read, but no write. */ |
| 4090 | __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. |
| 4091 | * Stored in network byte order. |
| 4092 | */ |
Stanislav Fomichev | d4ecfeb | 2019-07-15 09:39:53 -0700 | [diff] [blame] | 4093 | __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 4094 | * Stored in network byte order. |
| 4095 | */ |
Andrey Ignatov | 7aebfa1 | 2020-05-13 18:50:27 -0700 | [diff] [blame] | 4096 | __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 4097 | * Stored in network byte order |
| 4098 | */ |
| 4099 | __u32 family; /* Allows 4-byte read, but no write */ |
| 4100 | __u32 type; /* Allows 4-byte read, but no write */ |
| 4101 | __u32 protocol; /* Allows 4-byte read, but no write */ |
Stanislav Fomichev | 600c70b | 2019-07-01 10:38:39 -0700 | [diff] [blame] | 4102 | __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 4103 | * Stored in network byte order. |
| 4104 | */ |
Stanislav Fomichev | d4ecfeb | 2019-07-15 09:39:53 -0700 | [diff] [blame] | 4105 | __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 4106 | * Stored in network byte order. |
| 4107 | */ |
Stanislav Fomichev | fb85c4a | 2019-06-12 10:30:37 -0700 | [diff] [blame] | 4108 | __bpf_md_ptr(struct bpf_sock *, sk); |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 4109 | }; |
| 4110 | |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 4111 | /* User bpf_sock_ops struct to access socket values and specify request ops |
| 4112 | * and their replies. |
| 4113 | * Some of this fields are in network (bigendian) byte order and may need |
| 4114 | * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). |
| 4115 | * New fields can only be added at the end of this structure |
| 4116 | */ |
| 4117 | struct bpf_sock_ops { |
| 4118 | __u32 op; |
| 4119 | union { |
Lawrence Brakmo | de525be | 2018-01-25 16:14:09 -0800 | [diff] [blame] | 4120 | __u32 args[4]; /* Optionally passed to bpf program */ |
| 4121 | __u32 reply; /* Returned by bpf program */ |
| 4122 | __u32 replylong[4]; /* Optionally returned by bpf prog */ |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 4123 | }; |
| 4124 | __u32 family; |
| 4125 | __u32 remote_ip4; /* Stored in network byte order */ |
| 4126 | __u32 local_ip4; /* Stored in network byte order */ |
| 4127 | __u32 remote_ip6[4]; /* Stored in network byte order */ |
| 4128 | __u32 local_ip6[4]; /* Stored in network byte order */ |
| 4129 | __u32 remote_port; /* Stored in network byte order */ |
| 4130 | __u32 local_port; /* stored in host byte order */ |
Lawrence Brakmo | f19397a | 2017-12-01 10:15:04 -0800 | [diff] [blame] | 4131 | __u32 is_fullsock; /* Some TCP fields are only valid if |
| 4132 | * there is a full socket. If not, the |
| 4133 | * fields read as zero. |
| 4134 | */ |
| 4135 | __u32 snd_cwnd; |
| 4136 | __u32 srtt_us; /* Averaged RTT << 3 in usecs */ |
Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 4137 | __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ |
Lawrence Brakmo | 44f0e43 | 2018-01-25 16:14:12 -0800 | [diff] [blame] | 4138 | __u32 state; |
| 4139 | __u32 rtt_min; |
| 4140 | __u32 snd_ssthresh; |
| 4141 | __u32 rcv_nxt; |
| 4142 | __u32 snd_nxt; |
| 4143 | __u32 snd_una; |
| 4144 | __u32 mss_cache; |
| 4145 | __u32 ecn_flags; |
| 4146 | __u32 rate_delivered; |
| 4147 | __u32 rate_interval_us; |
| 4148 | __u32 packets_out; |
| 4149 | __u32 retrans_out; |
| 4150 | __u32 total_retrans; |
| 4151 | __u32 segs_in; |
| 4152 | __u32 data_segs_in; |
| 4153 | __u32 segs_out; |
| 4154 | __u32 data_segs_out; |
| 4155 | __u32 lost_out; |
| 4156 | __u32 sacked_out; |
| 4157 | __u32 sk_txhash; |
| 4158 | __u64 bytes_received; |
| 4159 | __u64 bytes_acked; |
Stanislav Fomichev | 1314ef5 | 2019-06-12 10:30:38 -0700 | [diff] [blame] | 4160 | __bpf_md_ptr(struct bpf_sock *, sk); |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 4161 | }; |
| 4162 | |
Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 4163 | /* Definitions for bpf_sock_ops_cb_flags */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 4164 | enum { |
| 4165 | BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), |
| 4166 | BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), |
| 4167 | BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), |
| 4168 | BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), |
| 4169 | /* Mask of all currently supported cb flags */ |
| 4170 | BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF, |
| 4171 | }; |
Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 4172 | |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 4173 | /* List of known BPF sock_ops operators. |
| 4174 | * New entries can only be added at the end |
| 4175 | */ |
| 4176 | enum { |
| 4177 | BPF_SOCK_OPS_VOID, |
Lawrence Brakmo | 8550f32 | 2017-06-30 20:02:42 -0700 | [diff] [blame] | 4178 | BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or |
| 4179 | * -1 if default value should be used |
| 4180 | */ |
Lawrence Brakmo | 13d3b1e | 2017-06-30 20:02:44 -0700 | [diff] [blame] | 4181 | BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized |
| 4182 | * window (in packets) or -1 if default |
| 4183 | * value should be used |
| 4184 | */ |
Lawrence Brakmo | 9872a4b | 2017-06-30 20:02:47 -0700 | [diff] [blame] | 4185 | BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an |
| 4186 | * active connection is initialized |
| 4187 | */ |
| 4188 | BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an |
| 4189 | * active connection is |
| 4190 | * established |
| 4191 | */ |
| 4192 | BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a |
| 4193 | * passive connection is |
| 4194 | * established |
| 4195 | */ |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 4196 | BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control |
| 4197 | * needs ECN |
| 4198 | */ |
Lawrence Brakmo | e6546ef | 2017-10-20 11:05:39 -0700 | [diff] [blame] | 4199 | BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is |
| 4200 | * based on the path and may be |
| 4201 | * dependent on the congestion control |
| 4202 | * algorithm. In general it indicates |
| 4203 | * a congestion threshold. RTTs above |
| 4204 | * this indicate congestion |
| 4205 | */ |
Lawrence Brakmo | f89013f | 2018-01-25 16:14:11 -0800 | [diff] [blame] | 4206 | BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. |
| 4207 | * Arg1: value of icsk_retransmits |
| 4208 | * Arg2: value of icsk_rto |
| 4209 | * Arg3: whether RTO has expired |
| 4210 | */ |
Lawrence Brakmo | a31ad29 | 2018-01-25 16:14:14 -0800 | [diff] [blame] | 4211 | BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. |
| 4212 | * Arg1: sequence number of 1st byte |
| 4213 | * Arg2: # segments |
| 4214 | * Arg3: return value of |
| 4215 | * tcp_transmit_skb (0 => success) |
| 4216 | */ |
Lawrence Brakmo | d448749 | 2018-01-25 16:14:15 -0800 | [diff] [blame] | 4217 | BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. |
| 4218 | * Arg1: old_state |
| 4219 | * Arg2: new_state |
| 4220 | */ |
Andrey Ignatov | f333ee0 | 2018-07-11 17:33:32 -0700 | [diff] [blame] | 4221 | BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after |
| 4222 | * socket transition to LISTEN state. |
| 4223 | */ |
Stanislav Fomichev | 23729ff | 2019-07-02 09:13:56 -0700 | [diff] [blame] | 4224 | BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. |
| 4225 | */ |
Lawrence Brakmo | d448749 | 2018-01-25 16:14:15 -0800 | [diff] [blame] | 4226 | }; |
| 4227 | |
| 4228 | /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect |
| 4229 | * changes between the TCP and BPF versions. Ideally this should never happen. |
| 4230 | * If it does, we need to add code to convert them before calling |
| 4231 | * the BPF sock_ops function. |
| 4232 | */ |
| 4233 | enum { |
| 4234 | BPF_TCP_ESTABLISHED = 1, |
| 4235 | BPF_TCP_SYN_SENT, |
| 4236 | BPF_TCP_SYN_RECV, |
| 4237 | BPF_TCP_FIN_WAIT1, |
| 4238 | BPF_TCP_FIN_WAIT2, |
| 4239 | BPF_TCP_TIME_WAIT, |
| 4240 | BPF_TCP_CLOSE, |
| 4241 | BPF_TCP_CLOSE_WAIT, |
| 4242 | BPF_TCP_LAST_ACK, |
| 4243 | BPF_TCP_LISTEN, |
| 4244 | BPF_TCP_CLOSING, /* Now a valid state */ |
| 4245 | BPF_TCP_NEW_SYN_RECV, |
| 4246 | |
| 4247 | BPF_TCP_MAX_STATES /* Leave at the end! */ |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 4248 | }; |
| 4249 | |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 4250 | enum { |
| 4251 | TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ |
| 4252 | TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ |
| 4253 | }; |
Lawrence Brakmo | fc74781 | 2017-06-30 20:02:51 -0700 | [diff] [blame] | 4254 | |
Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 4255 | struct bpf_perf_event_value { |
| 4256 | __u64 counter; |
| 4257 | __u64 enabled; |
| 4258 | __u64 running; |
| 4259 | }; |
| 4260 | |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 4261 | enum { |
| 4262 | BPF_DEVCG_ACC_MKNOD = (1ULL << 0), |
| 4263 | BPF_DEVCG_ACC_READ = (1ULL << 1), |
| 4264 | BPF_DEVCG_ACC_WRITE = (1ULL << 2), |
| 4265 | }; |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 4266 | |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 4267 | enum { |
| 4268 | BPF_DEVCG_DEV_BLOCK = (1ULL << 0), |
| 4269 | BPF_DEVCG_DEV_CHAR = (1ULL << 1), |
| 4270 | }; |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 4271 | |
| 4272 | struct bpf_cgroup_dev_ctx { |
Yonghong Song | 06ef0cc | 2017-12-18 10:13:44 -0800 | [diff] [blame] | 4273 | /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ |
| 4274 | __u32 access_type; |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 4275 | __u32 major; |
| 4276 | __u32 minor; |
| 4277 | }; |
| 4278 | |
Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 4279 | struct bpf_raw_tracepoint_args { |
| 4280 | __u64 args[0]; |
| 4281 | }; |
| 4282 | |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4283 | /* DIRECT: Skip the FIB rules and go to FIB table associated with device |
| 4284 | * OUTPUT: Do lookup from egress perspective; default is ingress |
| 4285 | */ |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 4286 | enum { |
| 4287 | BPF_FIB_LOOKUP_DIRECT = (1U << 0), |
| 4288 | BPF_FIB_LOOKUP_OUTPUT = (1U << 1), |
| 4289 | }; |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4290 | |
David Ahern | 4c79579 | 2018-06-26 16:21:18 -0700 | [diff] [blame] | 4291 | enum { |
| 4292 | BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ |
| 4293 | BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ |
| 4294 | BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ |
| 4295 | BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ |
| 4296 | BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ |
| 4297 | BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ |
| 4298 | BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ |
| 4299 | BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ |
| 4300 | BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ |
| 4301 | }; |
| 4302 | |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4303 | struct bpf_fib_lookup { |
David Ahern | fa898d7 | 2018-05-29 10:58:07 -0700 | [diff] [blame] | 4304 | /* input: network family for lookup (AF_INET, AF_INET6) |
| 4305 | * output: network family of egress nexthop |
| 4306 | */ |
| 4307 | __u8 family; |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4308 | |
| 4309 | /* set if lookup is to consider L4 data - e.g., FIB rules */ |
| 4310 | __u8 l4_protocol; |
| 4311 | __be16 sport; |
| 4312 | __be16 dport; |
| 4313 | |
| 4314 | /* total length of packet from network header - used for MTU check */ |
| 4315 | __u16 tot_len; |
David Ahern | 4c79579 | 2018-06-26 16:21:18 -0700 | [diff] [blame] | 4316 | |
| 4317 | /* input: L3 device index for lookup |
| 4318 | * output: device index from FIB lookup |
| 4319 | */ |
| 4320 | __u32 ifindex; |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4321 | |
| 4322 | union { |
| 4323 | /* inputs to lookup */ |
| 4324 | __u8 tos; /* AF_INET */ |
David Ahern | bd3a08a | 2018-06-03 08:15:19 -0700 | [diff] [blame] | 4325 | __be32 flowinfo; /* AF_INET6, flow_label + priority */ |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4326 | |
David Ahern | fa898d7 | 2018-05-29 10:58:07 -0700 | [diff] [blame] | 4327 | /* output: metric of fib result (IPv4/IPv6 only) */ |
| 4328 | __u32 rt_metric; |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4329 | }; |
| 4330 | |
| 4331 | union { |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4332 | __be32 ipv4_src; |
| 4333 | __u32 ipv6_src[4]; /* in6_addr; network order */ |
| 4334 | }; |
| 4335 | |
David Ahern | fa898d7 | 2018-05-29 10:58:07 -0700 | [diff] [blame] | 4336 | /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in |
| 4337 | * network header. output: bpf_fib_lookup sets to gateway address |
| 4338 | * if FIB lookup returns gateway route |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4339 | */ |
| 4340 | union { |
David Ahern | 87f5fc7 | 2018-05-09 20:34:26 -0700 | [diff] [blame] | 4341 | __be32 ipv4_dst; |
| 4342 | __u32 ipv6_dst[4]; /* in6_addr; network order */ |
| 4343 | }; |
| 4344 | |
| 4345 | /* output */ |
| 4346 | __be16 h_vlan_proto; |
| 4347 | __be16 h_vlan_TCI; |
| 4348 | __u8 smac[6]; /* ETH_ALEN */ |
| 4349 | __u8 dmac[6]; /* ETH_ALEN */ |
| 4350 | }; |
| 4351 | |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 4352 | enum bpf_task_fd_type { |
| 4353 | BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ |
| 4354 | BPF_FD_TYPE_TRACEPOINT, /* tp name */ |
| 4355 | BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ |
| 4356 | BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ |
| 4357 | BPF_FD_TYPE_UPROBE, /* filename + offset */ |
| 4358 | BPF_FD_TYPE_URETPROBE, /* filename + offset */ |
| 4359 | }; |
| 4360 | |
Andrii Nakryiko | 1aae4bd | 2020-03-02 16:32:31 -0800 | [diff] [blame] | 4361 | enum { |
| 4362 | BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), |
| 4363 | BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), |
| 4364 | BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), |
| 4365 | }; |
Stanislav Fomichev | 086f956 | 2019-07-25 15:52:25 -0700 | [diff] [blame] | 4366 | |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 4367 | struct bpf_flow_keys { |
| 4368 | __u16 nhoff; |
| 4369 | __u16 thoff; |
| 4370 | __u16 addr_proto; /* ETH_P_* of valid addrs */ |
| 4371 | __u8 is_frag; |
| 4372 | __u8 is_first_frag; |
| 4373 | __u8 is_encap; |
| 4374 | __u8 ip_proto; |
| 4375 | __be16 n_proto; |
| 4376 | __be16 sport; |
| 4377 | __be16 dport; |
| 4378 | union { |
| 4379 | struct { |
| 4380 | __be32 ipv4_src; |
| 4381 | __be32 ipv4_dst; |
| 4382 | }; |
| 4383 | struct { |
| 4384 | __u32 ipv6_src[4]; /* in6_addr; network order */ |
| 4385 | __u32 ipv6_dst[4]; /* in6_addr; network order */ |
| 4386 | }; |
| 4387 | }; |
Stanislav Fomichev | 086f956 | 2019-07-25 15:52:25 -0700 | [diff] [blame] | 4388 | __u32 flags; |
Stanislav Fomichev | 71c99e3 | 2019-07-25 15:52:30 -0700 | [diff] [blame] | 4389 | __be32 flow_label; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 4390 | }; |
| 4391 | |
Yonghong Song | 838e969 | 2018-11-19 15:29:11 -0800 | [diff] [blame] | 4392 | struct bpf_func_info { |
Martin KaFai Lau | d30d42e | 2018-12-05 17:35:44 -0800 | [diff] [blame] | 4393 | __u32 insn_off; |
Yonghong Song | 838e969 | 2018-11-19 15:29:11 -0800 | [diff] [blame] | 4394 | __u32 type_id; |
| 4395 | }; |
| 4396 | |
Martin KaFai Lau | c454a46 | 2018-12-07 16:42:25 -0800 | [diff] [blame] | 4397 | #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) |
| 4398 | #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) |
| 4399 | |
| 4400 | struct bpf_line_info { |
| 4401 | __u32 insn_off; |
| 4402 | __u32 file_name_off; |
| 4403 | __u32 line_off; |
| 4404 | __u32 line_col; |
| 4405 | }; |
| 4406 | |
Alexei Starovoitov | d83525c | 2019-01-31 15:40:04 -0800 | [diff] [blame] | 4407 | struct bpf_spin_lock { |
| 4408 | __u32 val; |
| 4409 | }; |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame] | 4410 | |
| 4411 | struct bpf_sysctl { |
| 4412 | __u32 write; /* Sysctl is being read (= 0) or written (= 1). |
| 4413 | * Allows 1,2,4-byte read, but no write. |
| 4414 | */ |
Andrey Ignatov | e1550bf | 2019-03-07 18:50:52 -0800 | [diff] [blame] | 4415 | __u32 file_pos; /* Sysctl file position to read from, write to. |
| 4416 | * Allows 1,2,4-byte read an 4-byte write. |
| 4417 | */ |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame] | 4418 | }; |
| 4419 | |
Stanislav Fomichev | 0d01da6 | 2019-06-27 13:38:47 -0700 | [diff] [blame] | 4420 | struct bpf_sockopt { |
| 4421 | __bpf_md_ptr(struct bpf_sock *, sk); |
| 4422 | __bpf_md_ptr(void *, optval); |
| 4423 | __bpf_md_ptr(void *, optval_end); |
| 4424 | |
| 4425 | __s32 level; |
| 4426 | __s32 optname; |
| 4427 | __s32 optlen; |
| 4428 | __s32 retval; |
| 4429 | }; |
| 4430 | |
Carlos Neira | b4490c5 | 2020-03-04 17:41:56 -0300 | [diff] [blame] | 4431 | struct bpf_pidns_info { |
| 4432 | __u32 pid; |
| 4433 | __u32 tgid; |
| 4434 | }; |
Jakub Sitnicki | e9ddbb7 | 2020-07-17 12:35:23 +0200 | [diff] [blame] | 4435 | |
| 4436 | /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ |
| 4437 | struct bpf_sk_lookup { |
| 4438 | __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ |
| 4439 | |
| 4440 | __u32 family; /* Protocol family (AF_INET, AF_INET6) */ |
| 4441 | __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ |
| 4442 | __u32 remote_ip4; /* Network byte order */ |
| 4443 | __u32 remote_ip6[4]; /* Network byte order */ |
| 4444 | __u32 remote_port; /* Network byte order */ |
| 4445 | __u32 local_ip4; /* Network byte order */ |
| 4446 | __u32 local_ip6[4]; /* Network byte order */ |
| 4447 | __u32 local_port; /* Host byte order */ |
| 4448 | }; |
| 4449 | |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 4450 | #endif /* _UAPI__LINUX_BPF_H__ */ |