blob: 223be01dc466438849e2d4438daf75afdbd23c54 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Daniel Borkmannd8599002019-04-09 23:20:13 +020010 * Copyright (C) 2019 Isovalent, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000011 */
12
Yonghong Songb4269952018-11-29 15:31:45 -080013#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070014#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080015#endif
Wang Nan1b76c132015-07-01 02:13:51 +000016#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000017#include <stdio.h>
18#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080019#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000020#include <inttypes.h>
Andrii Nakryiko8ab9da52019-12-23 10:03:05 -080021#include <limits.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000022#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000023#include <unistd.h>
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -030024#include <endian.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000025#include <fcntl.h>
26#include <errno.h>
Toke Høiland-Jørgensen113e6b72020-02-17 18:17:01 +010027#include <ctype.h>
Wang Nan1b76c132015-07-01 02:13:51 +000028#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080029#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000030#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000031#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070032#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080033#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000034#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080035#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070036#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020037#include <linux/ring_buffer.h>
Andrii Nakryiko5e61f272019-10-04 15:40:34 -070038#include <linux/version.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070039#include <sys/epoll.h>
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -070040#include <sys/ioctl.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070041#include <sys/mman.h>
Joe Stringerf3675402017-01-26 13:19:56 -080042#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070045#include <sys/utsname.h>
Toke Høiland-Jørgensendc3a2d22019-12-16 19:12:04 +010046#include <sys/resource.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070047#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000048#include <libelf.h>
49#include <gelf.h>
Andrii Nakryiko166750b2019-12-13 17:47:08 -080050#include <zlib.h>
Wang Nan1b76c132015-07-01 02:13:51 +000051
52#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000053#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070054#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030055#include "str_error.h"
Andrii Nakryikod7c4b392019-05-10 14:13:15 -070056#include "libbpf_internal.h"
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070057#include "hashmap.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000058
Andrii Nakryiko1d1a3bc2020-01-10 10:19:16 -080059/* make sure libbpf doesn't use kernel-only integer typedefs */
60#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
61
Wang Nan9b161372016-07-18 06:01:08 +000062#ifndef EM_BPF
63#define EM_BPF 247
64#endif
65
Joe Stringerf3675402017-01-26 13:19:56 -080066#ifndef BPF_FS_MAGIC
67#define BPF_FS_MAGIC 0xcafe4a11
68#endif
69
Andrey Ignatovff466b52019-04-06 22:37:34 -070070/* vsprintf() in __base_pr() uses nonliteral format string. It may break
71 * compilation if user enables corresponding warning. Disable it explicitly.
72 */
73#pragma GCC diagnostic ignored "-Wformat-nonliteral"
74
Wang Nanb3f59d62015-07-01 02:13:52 +000075#define __printf(a, b) __attribute__((format(printf, a, b)))
76
Martin KaFai Lau590a0082020-01-08 16:35:14 -080077static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
78static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
79 int idx);
80static const struct btf_type *
81skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
82
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080083static int __base_pr(enum libbpf_print_level level, const char *format,
84 va_list args)
Wang Nanb3f59d62015-07-01 02:13:52 +000085{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080086 if (level == LIBBPF_DEBUG)
87 return 0;
88
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080089 return vfprintf(stderr, format, args);
Wang Nanb3f59d62015-07-01 02:13:52 +000090}
91
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080092static libbpf_print_fn_t __libbpf_pr = __base_pr;
Wang Nanb3f59d62015-07-01 02:13:52 +000093
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070094libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
Wang Nanb3f59d62015-07-01 02:13:52 +000095{
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070096 libbpf_print_fn_t old_print_fn = __libbpf_pr;
97
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080098 __libbpf_pr = fn;
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070099 return old_print_fn;
Wang Nanb3f59d62015-07-01 02:13:52 +0000100}
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000101
Yonghong Song8461ef82019-02-01 16:14:14 -0800102__printf(2, 3)
103void libbpf_print(enum libbpf_print_level level, const char *format, ...)
104{
105 va_list args;
106
Yonghong Song6f1ae8b2019-02-01 16:14:17 -0800107 if (!__libbpf_pr)
108 return;
109
Yonghong Song8461ef82019-02-01 16:14:14 -0800110 va_start(args, format);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -0800111 __libbpf_pr(level, format, args);
Yonghong Song8461ef82019-02-01 16:14:14 -0800112 va_end(args);
113}
114
Toke Høiland-Jørgensendc3a2d22019-12-16 19:12:04 +0100115static void pr_perm_msg(int err)
116{
117 struct rlimit limit;
118 char buf[100];
119
120 if (err != -EPERM || geteuid() != 0)
121 return;
122
123 err = getrlimit(RLIMIT_MEMLOCK, &limit);
124 if (err)
125 return;
126
127 if (limit.rlim_cur == RLIM_INFINITY)
128 return;
129
130 if (limit.rlim_cur < 1024)
Toke Høiland-Jørgensenb5c7d0d2019-12-19 10:02:36 +0100131 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
Toke Høiland-Jørgensendc3a2d22019-12-16 19:12:04 +0100132 else if (limit.rlim_cur < 1024*1024)
133 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
134 else
135 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
136
137 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
138 buf);
139}
140
Wang Nan6371ca3b2015-11-06 13:49:37 +0000141#define STRERR_BUFSIZE 128
142
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000143/* Copied from tools/perf/util/util.h */
144#ifndef zfree
145# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
146#endif
147
148#ifndef zclose
149# define zclose(fd) ({ \
150 int ___err = 0; \
151 if ((fd) >= 0) \
152 ___err = close((fd)); \
153 fd = -1; \
154 ___err; })
155#endif
156
157#ifdef HAVE_LIBELF_MMAP_SUPPORT
158# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
159#else
160# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
161#endif
162
Song Liu34be16462019-03-11 22:30:38 -0700163static inline __u64 ptr_to_u64(const void *ptr)
164{
165 return (__u64) (unsigned long) ptr;
166}
167
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800168struct bpf_capabilities {
169 /* v4.14: kernel support for program & map names. */
170 __u32 name:1;
Daniel Borkmann8837fe52019-04-24 00:45:56 +0200171 /* v5.2: kernel support for global data sections. */
172 __u32 global_data:1;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -0700173 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
174 __u32 btf_func:1;
175 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
176 __u32 btf_datasec:1;
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -0800177 /* BPF_F_MMAPABLE is supported for arrays */
178 __u32 array_mmap:1;
Alexei Starovoitov2d3eb672020-01-09 22:41:19 -0800179 /* BTF_FUNC_GLOBAL is supported */
180 __u32 btf_func_global:1;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800181};
182
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800183enum reloc_type {
184 RELO_LD64,
185 RELO_CALL,
186 RELO_DATA,
187 RELO_EXTERN,
188};
189
190struct reloc_desc {
191 enum reloc_type type;
192 int insn_idx;
193 int map_idx;
194 int sym_off;
195};
196
Wang Nana5b8bd42015-07-01 02:14:00 +0000197/*
198 * bpf_prog should be a better name but it has been used in
199 * linux/filter.h.
200 */
201struct bpf_program {
202 /* Index in elf obj file, for relocation use. */
203 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700204 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700205 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000206 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800207 /* section_name with / replaced by _; makes recursive pinning
208 * in bpf_object__pin_programs easier
209 */
210 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000211 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800212 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000213 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000214
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800215 struct reloc_desc *reloc_desc;
Wang Nan34090912015-07-01 02:14:02 +0000216 int nr_reloc;
Alexei Starovoitovda11b412019-04-01 21:27:47 -0700217 int log_level;
Wang Nan55cffde2015-07-01 02:14:07 +0000218
Wang Nanb5805632015-11-16 12:10:09 +0000219 struct {
220 int nr;
221 int *fds;
222 } instances;
223 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000224
225 struct bpf_object *obj;
226 void *priv;
227 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700228
229 enum bpf_attach_type expected_attach_type;
Alexei Starovoitov12a86542019-10-30 15:32:12 -0700230 __u32 attach_btf_id;
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -0800231 __u32 attach_prog_fd;
Yonghong Song2993e052018-11-19 15:29:16 -0800232 void *func_info;
233 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800234 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800235
236 struct bpf_capabilities *caps;
Martin KaFai Lau3d650142018-12-07 16:42:31 -0800237
238 void *line_info;
239 __u32 line_info_rec_size;
240 __u32 line_info_cnt;
Jiong Wang04656192019-05-24 23:25:19 +0100241 __u32 prog_flags;
Wang Nana5b8bd42015-07-01 02:14:00 +0000242};
243
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800244struct bpf_struct_ops {
245 const char *tname;
246 const struct btf_type *type;
247 struct bpf_program **progs;
248 __u32 *kern_func_off;
249 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
250 void *data;
251 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
252 * btf_vmlinux's format.
253 * struct bpf_struct_ops_tcp_congestion_ops {
254 * [... some other kernel fields ...]
255 * struct tcp_congestion_ops data;
256 * }
257 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
258 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
259 * from "data".
260 */
261 void *kern_vdata;
262 __u32 type_id;
263};
264
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800265#define DATA_SEC ".data"
266#define BSS_SEC ".bss"
267#define RODATA_SEC ".rodata"
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -0800268#define KCONFIG_SEC ".kconfig"
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800269#define STRUCT_OPS_SEC ".struct_ops"
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800270
Daniel Borkmannd8599002019-04-09 23:20:13 +0200271enum libbpf_map_type {
272 LIBBPF_MAP_UNSPEC,
273 LIBBPF_MAP_DATA,
274 LIBBPF_MAP_BSS,
275 LIBBPF_MAP_RODATA,
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -0800276 LIBBPF_MAP_KCONFIG,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200277};
278
279static const char * const libbpf_type_to_btf_name[] = {
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800280 [LIBBPF_MAP_DATA] = DATA_SEC,
281 [LIBBPF_MAP_BSS] = BSS_SEC,
282 [LIBBPF_MAP_RODATA] = RODATA_SEC,
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -0800283 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200284};
285
Wang Nan9d759a92015-11-27 08:47:35 +0000286struct bpf_map {
Wang Nan561bbcc2015-11-27 08:47:36 +0000287 char *name;
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -0800288 int fd;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700289 int sec_idx;
290 size_t sec_offset;
David Beckettf0307a72018-05-16 14:02:49 -0700291 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800292 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000293 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700294 __u32 btf_key_type_id;
295 __u32 btf_value_type_id;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800296 __u32 btf_vmlinux_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000297 void *priv;
298 bpf_map_clear_priv_t clear_priv;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200299 enum libbpf_map_type libbpf_type;
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800300 void *mmaped;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800301 struct bpf_struct_ops *st_ops;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +0100302 char *pin_path;
303 bool pinned;
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +0100304 bool reused;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200305};
306
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800307enum extern_type {
308 EXT_UNKNOWN,
309 EXT_CHAR,
310 EXT_BOOL,
311 EXT_INT,
312 EXT_TRISTATE,
313 EXT_CHAR_ARR,
314};
315
316struct extern_desc {
317 const char *name;
318 int sym_idx;
319 int btf_id;
320 enum extern_type type;
321 int sz;
322 int align;
323 int data_off;
324 bool is_signed;
325 bool is_weak;
326 bool is_set;
327};
328
Wang Nan9a208ef2015-07-01 02:14:10 +0000329static LIST_HEAD(bpf_objects_list);
330
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000331struct bpf_object {
Daniel Borkmannd8599002019-04-09 23:20:13 +0200332 char name[BPF_OBJ_NAME_LEN];
Wang Nancb1e5e92015-07-01 02:13:57 +0000333 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700334 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000335
Wang Nana5b8bd42015-07-01 02:14:00 +0000336 struct bpf_program *programs;
337 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000338 struct bpf_map *maps;
339 size_t nr_maps;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700340 size_t maps_cap;
Wang Nan9d759a92015-11-27 08:47:35 +0000341
Andrii Nakryiko8601fd42019-12-18 16:28:35 -0800342 char *kconfig;
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800343 struct extern_desc *externs;
344 int nr_extern;
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -0800345 int kconfig_map_idx;
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800346
Wang Nan52d33522015-07-01 02:14:04 +0000347 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700348 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000349
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000350 /*
351 * Information when doing elf related work. Only valid if fd
352 * is valid.
353 */
354 struct {
355 int fd;
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700356 const void *obj_buf;
Wang Nan6c956392015-07-01 02:13:54 +0000357 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000358 Elf *elf;
359 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000360 Elf_Data *symbols;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200361 Elf_Data *data;
362 Elf_Data *rodata;
363 Elf_Data *bss;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800364 Elf_Data *st_ops_data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000365 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000366 struct {
367 GElf_Shdr shdr;
368 Elf_Data *data;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -0800369 } *reloc_sects;
370 int nr_reloc_sects;
Wang Nan666810e2016-01-25 09:55:49 +0000371 int maps_shndx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700372 int btf_maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800373 int text_shndx;
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800374 int symbols_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200375 int data_shndx;
376 int rodata_shndx;
377 int bss_shndx;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800378 int st_ops_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000379 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000380 /*
381 * All loaded bpf_object is linked in a list, which is
382 * hidden to caller. bpf_objects__<func> handlers deal with
383 * all objects.
384 */
385 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000386
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700387 struct btf *btf;
KP Singha6ed02c2020-01-17 22:28:25 +0100388 /* Parse and load BTF vmlinux if any of the programs in the object need
389 * it at load time.
390 */
391 struct btf *btf_vmlinux;
Yonghong Song2993e052018-11-19 15:29:16 -0800392 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700393
Wang Nan10931d22016-11-26 07:03:26 +0000394 void *priv;
395 bpf_object_clear_priv_t clear_priv;
396
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800397 struct bpf_capabilities caps;
398
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000399 char path[];
400};
401#define obj_elf_valid(o) ((o)->efile.elf)
402
Joe Stringer29cd77f2018-10-02 13:35:39 -0700403void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000404{
Wang Nanb5805632015-11-16 12:10:09 +0000405 int i;
406
Wang Nan55cffde2015-07-01 02:14:07 +0000407 if (!prog)
408 return;
409
Wang Nanb5805632015-11-16 12:10:09 +0000410 /*
411 * If the object is opened but the program was never loaded,
412 * it is possible that prog->instances.nr == -1.
413 */
414 if (prog->instances.nr > 0) {
415 for (i = 0; i < prog->instances.nr; i++)
416 zclose(prog->instances.fds[i]);
417 } else if (prog->instances.nr != -1) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800418 pr_warn("Internal error: instances.nr is %d\n",
419 prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +0000420 }
421
422 prog->instances.nr = -1;
423 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800424
Yonghong Song2993e052018-11-19 15:29:16 -0800425 zfree(&prog->func_info);
Prashant Bhole07a09d12018-12-17 16:57:50 +0900426 zfree(&prog->line_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000427}
428
Wang Nana5b8bd42015-07-01 02:14:00 +0000429static void bpf_program__exit(struct bpf_program *prog)
430{
431 if (!prog)
432 return;
433
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000434 if (prog->clear_priv)
435 prog->clear_priv(prog, prog->priv);
436
437 prog->priv = NULL;
438 prog->clear_priv = NULL;
439
Wang Nan55cffde2015-07-01 02:14:07 +0000440 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700441 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000442 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800443 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000444 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000445 zfree(&prog->reloc_desc);
446
447 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000448 prog->insns_cnt = 0;
449 prog->idx = -1;
450}
451
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800452static char *__bpf_program__pin_name(struct bpf_program *prog)
453{
454 char *name, *p;
455
456 name = p = strdup(prog->section_name);
457 while ((p = strchr(p, '/')))
458 *p = '_';
459
460 return name;
461}
462
Wang Nana5b8bd42015-07-01 02:14:00 +0000463static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700464bpf_program__init(void *data, size_t size, char *section_name, int idx,
465 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000466{
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700467 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
468
469 if (size == 0 || size % bpf_insn_sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800470 pr_warn("corrupted section '%s', size: %zu\n",
471 section_name, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000472 return -EINVAL;
473 }
474
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -0800475 memset(prog, 0, sizeof(*prog));
Wang Nana5b8bd42015-07-01 02:14:00 +0000476
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700477 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000478 if (!prog->section_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800479 pr_warn("failed to alloc name for prog under section(%d) %s\n",
480 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000481 goto errout;
482 }
483
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800484 prog->pin_name = __bpf_program__pin_name(prog);
485 if (!prog->pin_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800486 pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
487 idx, section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800488 goto errout;
489 }
490
Wang Nana5b8bd42015-07-01 02:14:00 +0000491 prog->insns = malloc(size);
492 if (!prog->insns) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800493 pr_warn("failed to alloc insns for prog under section %s\n",
494 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000495 goto errout;
496 }
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700497 prog->insns_cnt = size / bpf_insn_sz;
498 memcpy(prog->insns, data, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000499 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000500 prog->instances.fds = NULL;
501 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800502 prog->type = BPF_PROG_TYPE_UNSPEC;
Wang Nana5b8bd42015-07-01 02:14:00 +0000503
504 return 0;
505errout:
506 bpf_program__exit(prog);
507 return -ENOMEM;
508}
509
510static int
511bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700512 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000513{
514 struct bpf_program prog, *progs;
515 int nr_progs, err;
516
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700517 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000518 if (err)
519 return err;
520
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800521 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000522 progs = obj->programs;
523 nr_progs = obj->nr_programs;
524
Jakub Kicinski531b0142018-07-10 14:43:05 -0700525 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000526 if (!progs) {
527 /*
528 * In this case the original obj->programs
529 * is still valid, so don't need special treat for
530 * bpf_close_object().
531 */
Kefeng Wangbe180102019-10-21 13:55:32 +0800532 pr_warn("failed to alloc a new program under section '%s'\n",
533 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000534 bpf_program__exit(&prog);
535 return -ENOMEM;
536 }
537
538 pr_debug("found program %s\n", prog.section_name);
539 obj->programs = progs;
540 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000541 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000542 progs[nr_progs] = prog;
543 return 0;
544}
545
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700546static int
547bpf_object__init_prog_names(struct bpf_object *obj)
548{
549 Elf_Data *symbols = obj->efile.symbols;
550 struct bpf_program *prog;
551 size_t pi, si;
552
553 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800554 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700555
556 prog = &obj->programs[pi];
557
558 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
559 si++) {
560 GElf_Sym sym;
561
562 if (!gelf_getsym(symbols, si, &sym))
563 continue;
564 if (sym.st_shndx != prog->idx)
565 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000566 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
567 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700568
569 name = elf_strptr(obj->efile.elf,
570 obj->efile.strtabidx,
571 sym.st_name);
572 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800573 pr_warn("failed to get sym name string for prog %s\n",
574 prog->section_name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700575 return -LIBBPF_ERRNO__LIBELF;
576 }
577 }
578
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700579 if (!name && prog->idx == obj->efile.text_shndx)
580 name = ".text";
581
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700582 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800583 pr_warn("failed to find sym for prog %s\n",
584 prog->section_name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700585 return -EINVAL;
586 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700587
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700588 prog->name = strdup(name);
589 if (!prog->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800590 pr_warn("failed to allocate memory for prog sym %s\n",
591 name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700592 return -ENOMEM;
593 }
594 }
595
596 return 0;
597}
598
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700599static __u32 get_kernel_version(void)
600{
601 __u32 major, minor, patch;
602 struct utsname info;
603
604 uname(&info);
605 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
606 return 0;
607 return KERNEL_VERSION(major, minor, patch);
608}
609
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800610static const struct btf_member *
611find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
612{
613 struct btf_member *m;
614 int i;
615
616 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
617 if (btf_member_bit_offset(t, i) == bit_offset)
618 return m;
619 }
620
621 return NULL;
622}
623
624static const struct btf_member *
625find_member_by_name(const struct btf *btf, const struct btf_type *t,
626 const char *name)
627{
628 struct btf_member *m;
629 int i;
630
631 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
632 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
633 return m;
634 }
635
636 return NULL;
637}
638
639#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
KP Singha6ed02c2020-01-17 22:28:25 +0100640static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
641 const char *name, __u32 kind);
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800642
643static int
644find_struct_ops_kern_types(const struct btf *btf, const char *tname,
645 const struct btf_type **type, __u32 *type_id,
646 const struct btf_type **vtype, __u32 *vtype_id,
647 const struct btf_member **data_member)
648{
649 const struct btf_type *kern_type, *kern_vtype;
650 const struct btf_member *kern_data_member;
651 __s32 kern_vtype_id, kern_type_id;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800652 __u32 i;
653
654 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
655 if (kern_type_id < 0) {
656 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
657 tname);
658 return kern_type_id;
659 }
660 kern_type = btf__type_by_id(btf, kern_type_id);
661
662 /* Find the corresponding "map_value" type that will be used
663 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
664 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
665 * btf_vmlinux.
666 */
KP Singha6ed02c2020-01-17 22:28:25 +0100667 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
668 tname, BTF_KIND_STRUCT);
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800669 if (kern_vtype_id < 0) {
KP Singha6ed02c2020-01-17 22:28:25 +0100670 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
671 STRUCT_OPS_VALUE_PREFIX, tname);
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800672 return kern_vtype_id;
673 }
674 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
675
676 /* Find "struct tcp_congestion_ops" from
677 * struct bpf_struct_ops_tcp_congestion_ops {
678 * [ ... ]
679 * struct tcp_congestion_ops data;
680 * }
681 */
682 kern_data_member = btf_members(kern_vtype);
683 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
684 if (kern_data_member->type == kern_type_id)
685 break;
686 }
687 if (i == btf_vlen(kern_vtype)) {
KP Singha6ed02c2020-01-17 22:28:25 +0100688 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
689 tname, STRUCT_OPS_VALUE_PREFIX, tname);
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800690 return -EINVAL;
691 }
692
693 *type = kern_type;
694 *type_id = kern_type_id;
695 *vtype = kern_vtype;
696 *vtype_id = kern_vtype_id;
697 *data_member = kern_data_member;
698
699 return 0;
700}
701
702static bool bpf_map__is_struct_ops(const struct bpf_map *map)
703{
704 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
705}
706
707/* Init the map's fields that depend on kern_btf */
708static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
709 const struct btf *btf,
710 const struct btf *kern_btf)
711{
712 const struct btf_member *member, *kern_member, *kern_data_member;
713 const struct btf_type *type, *kern_type, *kern_vtype;
714 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
715 struct bpf_struct_ops *st_ops;
716 void *data, *kern_data;
717 const char *tname;
718 int err;
719
720 st_ops = map->st_ops;
721 type = st_ops->type;
722 tname = st_ops->tname;
723 err = find_struct_ops_kern_types(kern_btf, tname,
724 &kern_type, &kern_type_id,
725 &kern_vtype, &kern_vtype_id,
726 &kern_data_member);
727 if (err)
728 return err;
729
730 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
731 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
732
733 map->def.value_size = kern_vtype->size;
734 map->btf_vmlinux_value_type_id = kern_vtype_id;
735
736 st_ops->kern_vdata = calloc(1, kern_vtype->size);
737 if (!st_ops->kern_vdata)
738 return -ENOMEM;
739
740 data = st_ops->data;
741 kern_data_off = kern_data_member->offset / 8;
742 kern_data = st_ops->kern_vdata + kern_data_off;
743
744 member = btf_members(type);
745 for (i = 0; i < btf_vlen(type); i++, member++) {
746 const struct btf_type *mtype, *kern_mtype;
747 __u32 mtype_id, kern_mtype_id;
748 void *mdata, *kern_mdata;
749 __s64 msize, kern_msize;
750 __u32 moff, kern_moff;
751 __u32 kern_member_idx;
752 const char *mname;
753
754 mname = btf__name_by_offset(btf, member->name_off);
755 kern_member = find_member_by_name(kern_btf, kern_type, mname);
756 if (!kern_member) {
757 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
758 map->name, mname);
759 return -ENOTSUP;
760 }
761
762 kern_member_idx = kern_member - btf_members(kern_type);
763 if (btf_member_bitfield_size(type, i) ||
764 btf_member_bitfield_size(kern_type, kern_member_idx)) {
765 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
766 map->name, mname);
767 return -ENOTSUP;
768 }
769
770 moff = member->offset / 8;
771 kern_moff = kern_member->offset / 8;
772
773 mdata = data + moff;
774 kern_mdata = kern_data + kern_moff;
775
776 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
777 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
778 &kern_mtype_id);
779 if (BTF_INFO_KIND(mtype->info) !=
780 BTF_INFO_KIND(kern_mtype->info)) {
781 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
782 map->name, mname, BTF_INFO_KIND(mtype->info),
783 BTF_INFO_KIND(kern_mtype->info));
784 return -ENOTSUP;
785 }
786
787 if (btf_is_ptr(mtype)) {
788 struct bpf_program *prog;
789
790 mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
791 kern_mtype = skip_mods_and_typedefs(kern_btf,
792 kern_mtype->type,
793 &kern_mtype_id);
794 if (!btf_is_func_proto(mtype) ||
795 !btf_is_func_proto(kern_mtype)) {
796 pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
797 map->name, mname);
798 return -ENOTSUP;
799 }
800
801 prog = st_ops->progs[i];
802 if (!prog) {
803 pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
804 map->name, mname);
805 continue;
806 }
807
808 prog->attach_btf_id = kern_type_id;
809 prog->expected_attach_type = kern_member_idx;
810
811 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
812
813 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
814 map->name, mname, prog->name, moff,
815 kern_moff);
816
817 continue;
818 }
819
820 msize = btf__resolve_size(btf, mtype_id);
821 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
822 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
823 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
824 map->name, mname, (ssize_t)msize,
825 (ssize_t)kern_msize);
826 return -ENOTSUP;
827 }
828
829 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
830 map->name, mname, (unsigned int)msize,
831 moff, kern_moff);
832 memcpy(kern_mdata, mdata, msize);
833 }
834
835 return 0;
836}
837
838static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
839{
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800840 struct bpf_map *map;
841 size_t i;
842 int err;
843
844 for (i = 0; i < obj->nr_maps; i++) {
845 map = &obj->maps[i];
846
847 if (!bpf_map__is_struct_ops(map))
848 continue;
849
KP Singha6ed02c2020-01-17 22:28:25 +0100850 err = bpf_map__init_kern_struct_ops(map, obj->btf,
851 obj->btf_vmlinux);
852 if (err)
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800853 return err;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800854 }
855
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800856 return 0;
857}
858
859static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
860{
861 const struct btf_type *type, *datasec;
862 const struct btf_var_secinfo *vsi;
863 struct bpf_struct_ops *st_ops;
864 const char *tname, *var_name;
865 __s32 type_id, datasec_id;
866 const struct btf *btf;
867 struct bpf_map *map;
868 __u32 i;
869
870 if (obj->efile.st_ops_shndx == -1)
871 return 0;
872
873 btf = obj->btf;
874 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
875 BTF_KIND_DATASEC);
876 if (datasec_id < 0) {
877 pr_warn("struct_ops init: DATASEC %s not found\n",
878 STRUCT_OPS_SEC);
879 return -EINVAL;
880 }
881
882 datasec = btf__type_by_id(btf, datasec_id);
883 vsi = btf_var_secinfos(datasec);
884 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
885 type = btf__type_by_id(obj->btf, vsi->type);
886 var_name = btf__name_by_offset(obj->btf, type->name_off);
887
888 type_id = btf__resolve_type(obj->btf, vsi->type);
889 if (type_id < 0) {
890 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
891 vsi->type, STRUCT_OPS_SEC);
892 return -EINVAL;
893 }
894
895 type = btf__type_by_id(obj->btf, type_id);
896 tname = btf__name_by_offset(obj->btf, type->name_off);
897 if (!tname[0]) {
898 pr_warn("struct_ops init: anonymous type is not supported\n");
899 return -ENOTSUP;
900 }
901 if (!btf_is_struct(type)) {
902 pr_warn("struct_ops init: %s is not a struct\n", tname);
903 return -EINVAL;
904 }
905
906 map = bpf_object__add_map(obj);
907 if (IS_ERR(map))
908 return PTR_ERR(map);
909
910 map->sec_idx = obj->efile.st_ops_shndx;
911 map->sec_offset = vsi->offset;
912 map->name = strdup(var_name);
913 if (!map->name)
914 return -ENOMEM;
915
916 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
917 map->def.key_size = sizeof(int);
918 map->def.value_size = type->size;
919 map->def.max_entries = 1;
920
921 map->st_ops = calloc(1, sizeof(*map->st_ops));
922 if (!map->st_ops)
923 return -ENOMEM;
924 st_ops = map->st_ops;
925 st_ops->data = malloc(type->size);
926 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
927 st_ops->kern_func_off = malloc(btf_vlen(type) *
928 sizeof(*st_ops->kern_func_off));
929 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
930 return -ENOMEM;
931
932 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
933 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
934 var_name, STRUCT_OPS_SEC);
935 return -EINVAL;
936 }
937
938 memcpy(st_ops->data,
939 obj->efile.st_ops_data->d_buf + vsi->offset,
940 type->size);
941 st_ops->tname = tname;
942 st_ops->type = type;
943 st_ops->type_id = type_id;
944
945 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
946 tname, type_id, var_name, vsi->offset);
947 }
948
949 return 0;
950}
951
Wang Nan6c956392015-07-01 02:13:54 +0000952static struct bpf_object *bpf_object__new(const char *path,
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700953 const void *obj_buf,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700954 size_t obj_buf_sz,
955 const char *obj_name)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000956{
957 struct bpf_object *obj;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200958 char *end;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000959
960 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
961 if (!obj) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800962 pr_warn("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000963 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000964 }
965
966 strcpy(obj->path, path);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700967 if (obj_name) {
968 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
969 obj->name[sizeof(obj->name) - 1] = 0;
970 } else {
971 /* Using basename() GNU version which doesn't modify arg. */
972 strncpy(obj->name, basename((void *)path),
973 sizeof(obj->name) - 1);
974 end = strchr(obj->name, '.');
975 if (end)
976 *end = 0;
977 }
Wang Nan6c956392015-07-01 02:13:54 +0000978
Daniel Borkmannd8599002019-04-09 23:20:13 +0200979 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000980 /*
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700981 * Caller of this function should also call
Wang Nan6c956392015-07-01 02:13:54 +0000982 * bpf_object__elf_finish() after data collection to return
983 * obj_buf to user. If not, we should duplicate the buffer to
984 * avoid user freeing them before elf finish.
985 */
986 obj->efile.obj_buf = obj_buf;
987 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000988 obj->efile.maps_shndx = -1;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700989 obj->efile.btf_maps_shndx = -1;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200990 obj->efile.data_shndx = -1;
991 obj->efile.rodata_shndx = -1;
992 obj->efile.bss_shndx = -1;
Martin KaFai Lau590a0082020-01-08 16:35:14 -0800993 obj->efile.st_ops_shndx = -1;
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -0800994 obj->kconfig_map_idx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000995
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700996 obj->kern_version = get_kernel_version();
Wang Nan52d33522015-07-01 02:14:04 +0000997 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000998
999 INIT_LIST_HEAD(&obj->list);
1000 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001001 return obj;
1002}
1003
1004static void bpf_object__elf_finish(struct bpf_object *obj)
1005{
1006 if (!obj_elf_valid(obj))
1007 return;
1008
1009 if (obj->efile.elf) {
1010 elf_end(obj->efile.elf);
1011 obj->efile.elf = NULL;
1012 }
Wang Nanbec7d682015-07-01 02:13:59 +00001013 obj->efile.symbols = NULL;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001014 obj->efile.data = NULL;
1015 obj->efile.rodata = NULL;
1016 obj->efile.bss = NULL;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08001017 obj->efile.st_ops_data = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +00001018
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08001019 zfree(&obj->efile.reloc_sects);
1020 obj->efile.nr_reloc_sects = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001021 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +00001022 obj->efile.obj_buf = NULL;
1023 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001024}
1025
1026static int bpf_object__elf_init(struct bpf_object *obj)
1027{
1028 int err = 0;
1029 GElf_Ehdr *ep;
1030
1031 if (obj_elf_valid(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001032 pr_warn("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001033 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001034 }
1035
Wang Nan6c956392015-07-01 02:13:54 +00001036 if (obj->efile.obj_buf_sz > 0) {
1037 /*
1038 * obj_buf should have been validated by
1039 * bpf_object__open_buffer().
1040 */
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07001041 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
Wang Nan6c956392015-07-01 02:13:54 +00001042 obj->efile.obj_buf_sz);
1043 } else {
1044 obj->efile.fd = open(obj->path, O_RDONLY);
1045 if (obj->efile.fd < 0) {
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -07001046 char errmsg[STRERR_BUFSIZE], *cp;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001047
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -07001048 err = -errno;
1049 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08001050 pr_warn("failed to open %s: %s\n", obj->path, cp);
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -07001051 return err;
Wang Nan6c956392015-07-01 02:13:54 +00001052 }
1053
1054 obj->efile.elf = elf_begin(obj->efile.fd,
Andrii Nakryiko76e10222019-05-29 10:36:10 -07001055 LIBBPF_ELF_C_READ_MMAP, NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001056 }
1057
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001058 if (!obj->efile.elf) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001059 pr_warn("failed to open %s as ELF file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001060 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001061 goto errout;
1062 }
1063
1064 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001065 pr_warn("failed to get EHDR from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001066 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001067 goto errout;
1068 }
1069 ep = &obj->efile.ehdr;
1070
Wang Nan9b161372016-07-18 06:01:08 +00001071 /* Old LLVM set e_machine to EM_NONE */
Andrii Nakryiko76e10222019-05-29 10:36:10 -07001072 if (ep->e_type != ET_REL ||
1073 (ep->e_machine && ep->e_machine != EM_BPF)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001074 pr_warn("%s is not an eBPF object file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001075 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001076 goto errout;
1077 }
1078
1079 return 0;
1080errout:
1081 bpf_object__elf_finish(obj);
1082 return err;
1083}
1084
Andrii Nakryiko12ef5632019-05-29 10:36:05 -07001085static int bpf_object__check_endianness(struct bpf_object *obj)
Wang Nancc4228d2015-07-01 02:13:55 +00001086{
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -03001087#if __BYTE_ORDER == __LITTLE_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -07001088 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1089 return 0;
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -03001090#elif __BYTE_ORDER == __BIG_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -07001091 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1092 return 0;
1093#else
1094# error "Unrecognized __BYTE_ORDER__"
1095#endif
Kefeng Wangbe180102019-10-21 13:55:32 +08001096 pr_warn("endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001097 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +00001098}
1099
Wang Nancb1e5e92015-07-01 02:13:57 +00001100static int
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001101bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
Wang Nancb1e5e92015-07-01 02:13:57 +00001102{
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001103 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
Wang Nancb1e5e92015-07-01 02:13:57 +00001104 pr_debug("license of %s is %s\n", obj->path, obj->license);
1105 return 0;
1106}
1107
John Fastabend54b86252019-10-18 07:41:26 -07001108static int
1109bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1110{
1111 __u32 kver;
1112
1113 if (size != sizeof(kver)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001114 pr_warn("invalid kver section in %s\n", obj->path);
John Fastabend54b86252019-10-18 07:41:26 -07001115 return -LIBBPF_ERRNO__FORMAT;
1116 }
1117 memcpy(&kver, data, sizeof(kver));
1118 obj->kern_version = kver;
1119 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1120 return 0;
1121}
1122
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08001123static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1124{
1125 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1126 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1127 return true;
1128 return false;
1129}
1130
Daniel Borkmann1713d682019-04-09 23:20:14 +02001131static int bpf_object_search_section_size(const struct bpf_object *obj,
1132 const char *name, size_t *d_size)
1133{
1134 const GElf_Ehdr *ep = &obj->efile.ehdr;
1135 Elf *elf = obj->efile.elf;
1136 Elf_Scn *scn = NULL;
1137 int idx = 0;
1138
1139 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1140 const char *sec_name;
1141 Elf_Data *data;
1142 GElf_Shdr sh;
1143
1144 idx++;
1145 if (gelf_getshdr(scn, &sh) != &sh) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001146 pr_warn("failed to get section(%d) header from %s\n",
1147 idx, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +02001148 return -EIO;
1149 }
1150
1151 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1152 if (!sec_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001153 pr_warn("failed to get section(%d) name from %s\n",
1154 idx, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +02001155 return -EIO;
1156 }
1157
1158 if (strcmp(name, sec_name))
1159 continue;
1160
1161 data = elf_getdata(scn, 0);
1162 if (!data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001163 pr_warn("failed to get section(%d) data from %s(%s)\n",
1164 idx, name, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +02001165 return -EIO;
1166 }
1167
1168 *d_size = data->d_size;
1169 return 0;
1170 }
1171
1172 return -ENOENT;
1173}
1174
1175int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1176 __u32 *size)
1177{
1178 int ret = -ENOENT;
1179 size_t d_size;
1180
1181 *size = 0;
1182 if (!name) {
1183 return -EINVAL;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08001184 } else if (!strcmp(name, DATA_SEC)) {
Daniel Borkmann1713d682019-04-09 23:20:14 +02001185 if (obj->efile.data)
1186 *size = obj->efile.data->d_size;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08001187 } else if (!strcmp(name, BSS_SEC)) {
Daniel Borkmann1713d682019-04-09 23:20:14 +02001188 if (obj->efile.bss)
1189 *size = obj->efile.bss->d_size;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08001190 } else if (!strcmp(name, RODATA_SEC)) {
Daniel Borkmann1713d682019-04-09 23:20:14 +02001191 if (obj->efile.rodata)
1192 *size = obj->efile.rodata->d_size;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08001193 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1194 if (obj->efile.st_ops_data)
1195 *size = obj->efile.st_ops_data->d_size;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001196 } else {
1197 ret = bpf_object_search_section_size(obj, name, &d_size);
1198 if (!ret)
1199 *size = d_size;
1200 }
1201
1202 return *size ? 0 : ret;
1203}
1204
1205int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1206 __u32 *off)
1207{
1208 Elf_Data *symbols = obj->efile.symbols;
1209 const char *sname;
1210 size_t si;
1211
1212 if (!name || !off)
1213 return -EINVAL;
1214
1215 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1216 GElf_Sym sym;
1217
1218 if (!gelf_getsym(symbols, si, &sym))
1219 continue;
1220 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1221 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1222 continue;
1223
1224 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1225 sym.st_name);
1226 if (!sname) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001227 pr_warn("failed to get sym name string for var %s\n",
1228 name);
Daniel Borkmann1713d682019-04-09 23:20:14 +02001229 return -EIO;
1230 }
1231 if (strcmp(name, sname) == 0) {
1232 *off = sym.st_value;
1233 return 0;
1234 }
1235 }
1236
1237 return -ENOENT;
1238}
1239
Andrii Nakryikobf829272019-06-17 12:26:53 -07001240static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
Daniel Borkmannd8599002019-04-09 23:20:13 +02001241{
Andrii Nakryikobf829272019-06-17 12:26:53 -07001242 struct bpf_map *new_maps;
1243 size_t new_cap;
1244 int i;
1245
1246 if (obj->nr_maps < obj->maps_cap)
1247 return &obj->maps[obj->nr_maps++];
1248
Ivan Khoronzhuk95064972019-06-26 13:38:37 +03001249 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001250 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
1251 if (!new_maps) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001252 pr_warn("alloc maps for object failed\n");
Andrii Nakryikobf829272019-06-17 12:26:53 -07001253 return ERR_PTR(-ENOMEM);
1254 }
1255
1256 obj->maps_cap = new_cap;
1257 obj->maps = new_maps;
1258
1259 /* zero out new maps */
1260 memset(obj->maps + obj->nr_maps, 0,
1261 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1262 /*
1263 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1264 * when failure (zclose won't close negative fd)).
1265 */
1266 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1267 obj->maps[i].fd = -1;
1268 obj->maps[i].inner_map_fd = -1;
1269 }
1270
1271 return &obj->maps[obj->nr_maps++];
Daniel Borkmannd8599002019-04-09 23:20:13 +02001272}
1273
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001274static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1275{
1276 long page_sz = sysconf(_SC_PAGE_SIZE);
1277 size_t map_sz;
1278
Andrii Nakryikoc7019172020-01-16 22:08:00 -08001279 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001280 map_sz = roundup(map_sz, page_sz);
1281 return map_sz;
1282}
1283
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001284static char *internal_map_name(struct bpf_object *obj,
1285 enum libbpf_map_type type)
1286{
Toke Høiland-Jørgensen113e6b72020-02-17 18:17:01 +01001287 char map_name[BPF_OBJ_NAME_LEN], *p;
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001288 const char *sfx = libbpf_type_to_btf_name[type];
1289 int sfx_len = max((size_t)7, strlen(sfx));
1290 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1291 strlen(obj->name));
1292
1293 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1294 sfx_len, libbpf_type_to_btf_name[type]);
1295
Toke Høiland-Jørgensen113e6b72020-02-17 18:17:01 +01001296 /* sanitise map name to characters allowed by kernel */
1297 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1298 if (!isalnum(*p) && *p != '_' && *p != '.')
1299 *p = '_';
1300
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001301 return strdup(map_name);
1302}
1303
Daniel Borkmannd8599002019-04-09 23:20:13 +02001304static int
Andrii Nakryikobf829272019-06-17 12:26:53 -07001305bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001306 int sec_idx, void *data, size_t data_sz)
Daniel Borkmannd8599002019-04-09 23:20:13 +02001307{
Andrii Nakryikobf829272019-06-17 12:26:53 -07001308 struct bpf_map_def *def;
1309 struct bpf_map *map;
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001310 int err;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001311
1312 map = bpf_object__add_map(obj);
1313 if (IS_ERR(map))
1314 return PTR_ERR(map);
Daniel Borkmannd8599002019-04-09 23:20:13 +02001315
1316 map->libbpf_type = type;
Andrii Nakryikodb488142019-06-17 12:26:54 -07001317 map->sec_idx = sec_idx;
1318 map->sec_offset = 0;
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001319 map->name = internal_map_name(obj, type);
Daniel Borkmannd8599002019-04-09 23:20:13 +02001320 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001321 pr_warn("failed to alloc map name\n");
Daniel Borkmannd8599002019-04-09 23:20:13 +02001322 return -ENOMEM;
1323 }
1324
Andrii Nakryikobf829272019-06-17 12:26:53 -07001325 def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001326 def->type = BPF_MAP_TYPE_ARRAY;
1327 def->key_size = sizeof(int);
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001328 def->value_size = data_sz;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001329 def->max_entries = 1;
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001330 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001331 ? BPF_F_RDONLY_PROG : 0;
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08001332 def->map_flags |= BPF_F_MMAPABLE;
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -08001333
1334 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001335 map->name, map->sec_idx, map->sec_offset, def->map_flags);
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -08001336
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001337 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1338 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1339 if (map->mmaped == MAP_FAILED) {
1340 err = -errno;
1341 map->mmaped = NULL;
1342 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1343 map->name, err);
1344 zfree(&map->name);
1345 return err;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001346 }
1347
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001348 if (data)
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001349 memcpy(map->mmaped, data, data_sz);
1350
Andrii Nakryikoe1d1dc42019-04-16 11:47:17 -07001351 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
Daniel Borkmannd8599002019-04-09 23:20:13 +02001352 return 0;
1353}
1354
Andrii Nakryikobf829272019-06-17 12:26:53 -07001355static int bpf_object__init_global_data_maps(struct bpf_object *obj)
Eric Leblond4708bbd2016-11-15 04:05:47 +00001356{
Andrii Nakryikobf829272019-06-17 12:26:53 -07001357 int err;
1358
Andrii Nakryikobf829272019-06-17 12:26:53 -07001359 /*
1360 * Populate obj->maps with libbpf internal maps.
1361 */
1362 if (obj->efile.data_shndx >= 0) {
1363 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -07001364 obj->efile.data_shndx,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001365 obj->efile.data->d_buf,
1366 obj->efile.data->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001367 if (err)
1368 return err;
1369 }
1370 if (obj->efile.rodata_shndx >= 0) {
1371 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -07001372 obj->efile.rodata_shndx,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001373 obj->efile.rodata->d_buf,
1374 obj->efile.rodata->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001375 if (err)
1376 return err;
1377 }
1378 if (obj->efile.bss_shndx >= 0) {
1379 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
Andrii Nakryikodb488142019-06-17 12:26:54 -07001380 obj->efile.bss_shndx,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08001381 NULL,
1382 obj->efile.bss->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001383 if (err)
1384 return err;
1385 }
1386 return 0;
1387}
1388
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001389
1390static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1391 const void *name)
1392{
1393 int i;
1394
1395 for (i = 0; i < obj->nr_extern; i++) {
1396 if (strcmp(obj->externs[i].name, name) == 0)
1397 return &obj->externs[i];
1398 }
1399 return NULL;
1400}
1401
1402static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
1403 char value)
1404{
1405 switch (ext->type) {
1406 case EXT_BOOL:
1407 if (value == 'm') {
1408 pr_warn("extern %s=%c should be tristate or char\n",
1409 ext->name, value);
1410 return -EINVAL;
1411 }
1412 *(bool *)ext_val = value == 'y' ? true : false;
1413 break;
1414 case EXT_TRISTATE:
1415 if (value == 'y')
1416 *(enum libbpf_tristate *)ext_val = TRI_YES;
1417 else if (value == 'm')
1418 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1419 else /* value == 'n' */
1420 *(enum libbpf_tristate *)ext_val = TRI_NO;
1421 break;
1422 case EXT_CHAR:
1423 *(char *)ext_val = value;
1424 break;
1425 case EXT_UNKNOWN:
1426 case EXT_INT:
1427 case EXT_CHAR_ARR:
1428 default:
1429 pr_warn("extern %s=%c should be bool, tristate, or char\n",
1430 ext->name, value);
1431 return -EINVAL;
1432 }
1433 ext->is_set = true;
1434 return 0;
1435}
1436
1437static int set_ext_value_str(struct extern_desc *ext, char *ext_val,
1438 const char *value)
1439{
1440 size_t len;
1441
1442 if (ext->type != EXT_CHAR_ARR) {
1443 pr_warn("extern %s=%s should char array\n", ext->name, value);
1444 return -EINVAL;
1445 }
1446
1447 len = strlen(value);
1448 if (value[len - 1] != '"') {
1449 pr_warn("extern '%s': invalid string config '%s'\n",
1450 ext->name, value);
1451 return -EINVAL;
1452 }
1453
1454 /* strip quotes */
1455 len -= 2;
1456 if (len >= ext->sz) {
1457 pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1458 ext->name, value, len, ext->sz - 1);
1459 len = ext->sz - 1;
1460 }
1461 memcpy(ext_val, value + 1, len);
1462 ext_val[len] = '\0';
1463 ext->is_set = true;
1464 return 0;
1465}
1466
1467static int parse_u64(const char *value, __u64 *res)
1468{
1469 char *value_end;
1470 int err;
1471
1472 errno = 0;
1473 *res = strtoull(value, &value_end, 0);
1474 if (errno) {
1475 err = -errno;
1476 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1477 return err;
1478 }
1479 if (*value_end) {
1480 pr_warn("failed to parse '%s' as integer completely\n", value);
1481 return -EINVAL;
1482 }
1483 return 0;
1484}
1485
1486static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
1487{
1488 int bit_sz = ext->sz * 8;
1489
1490 if (ext->sz == 8)
1491 return true;
1492
1493 /* Validate that value stored in u64 fits in integer of `ext->sz`
1494 * bytes size without any loss of information. If the target integer
1495 * is signed, we rely on the following limits of integer type of
1496 * Y bits and subsequent transformation:
1497 *
1498 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1499 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1500 * 0 <= X + 2^(Y-1) < 2^Y
1501 *
1502 * For unsigned target integer, check that all the (64 - Y) bits are
1503 * zero.
1504 */
1505 if (ext->is_signed)
1506 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1507 else
1508 return (v >> bit_sz) == 0;
1509}
1510
1511static int set_ext_value_num(struct extern_desc *ext, void *ext_val,
1512 __u64 value)
1513{
1514 if (ext->type != EXT_INT && ext->type != EXT_CHAR) {
1515 pr_warn("extern %s=%llu should be integer\n",
Andrii Nakryiko7745ff92019-12-18 21:21:03 -08001516 ext->name, (unsigned long long)value);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001517 return -EINVAL;
1518 }
1519 if (!is_ext_value_in_range(ext, value)) {
1520 pr_warn("extern %s=%llu value doesn't fit in %d bytes\n",
Andrii Nakryiko7745ff92019-12-18 21:21:03 -08001521 ext->name, (unsigned long long)value, ext->sz);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001522 return -ERANGE;
1523 }
1524 switch (ext->sz) {
1525 case 1: *(__u8 *)ext_val = value; break;
1526 case 2: *(__u16 *)ext_val = value; break;
1527 case 4: *(__u32 *)ext_val = value; break;
1528 case 8: *(__u64 *)ext_val = value; break;
1529 default:
1530 return -EINVAL;
1531 }
1532 ext->is_set = true;
1533 return 0;
1534}
1535
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001536static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1537 char *buf, void *data)
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001538{
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001539 struct extern_desc *ext;
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001540 char *sep, *value;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001541 int len, err = 0;
1542 void *ext_val;
1543 __u64 num;
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001544
1545 if (strncmp(buf, "CONFIG_", 7))
1546 return 0;
1547
1548 sep = strchr(buf, '=');
1549 if (!sep) {
1550 pr_warn("failed to parse '%s': no separator\n", buf);
1551 return -EINVAL;
1552 }
1553
1554 /* Trim ending '\n' */
1555 len = strlen(buf);
1556 if (buf[len - 1] == '\n')
1557 buf[len - 1] = '\0';
1558 /* Split on '=' and ensure that a value is present. */
1559 *sep = '\0';
1560 if (!sep[1]) {
1561 *sep = '=';
1562 pr_warn("failed to parse '%s': no value\n", buf);
1563 return -EINVAL;
1564 }
1565
1566 ext = find_extern_by_name(obj, buf);
1567 if (!ext || ext->is_set)
1568 return 0;
1569
1570 ext_val = data + ext->data_off;
1571 value = sep + 1;
1572
1573 switch (*value) {
1574 case 'y': case 'n': case 'm':
1575 err = set_ext_value_tri(ext, ext_val, *value);
1576 break;
1577 case '"':
1578 err = set_ext_value_str(ext, ext_val, value);
1579 break;
1580 default:
1581 /* assume integer */
1582 err = parse_u64(value, &num);
1583 if (err) {
1584 pr_warn("extern %s=%s should be integer\n",
1585 ext->name, value);
1586 return err;
1587 }
1588 err = set_ext_value_num(ext, ext_val, num);
1589 break;
1590 }
1591 if (err)
1592 return err;
1593 pr_debug("extern %s=%s\n", ext->name, value);
1594 return 0;
1595}
1596
1597static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1598{
1599 char buf[PATH_MAX];
1600 struct utsname uts;
1601 int len, err = 0;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001602 gzFile file;
1603
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001604 uname(&uts);
1605 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1606 if (len < 0)
1607 return -EINVAL;
1608 else if (len >= PATH_MAX)
1609 return -ENAMETOOLONG;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001610
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001611 /* gzopen also accepts uncompressed files. */
1612 file = gzopen(buf, "r");
1613 if (!file)
1614 file = gzopen("/proc/config.gz", "r");
1615
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001616 if (!file) {
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001617 pr_warn("failed to open system Kconfig\n");
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001618 return -ENOENT;
1619 }
1620
1621 while (gzgets(file, buf, sizeof(buf))) {
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001622 err = bpf_object__process_kconfig_line(obj, buf, data);
1623 if (err) {
1624 pr_warn("error parsing system Kconfig line '%s': %d\n",
1625 buf, err);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001626 goto out;
1627 }
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001628 }
1629
1630out:
1631 gzclose(file);
1632 return err;
1633}
1634
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08001635static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1636 const char *config, void *data)
1637{
1638 char buf[PATH_MAX];
1639 int err = 0;
1640 FILE *file;
1641
1642 file = fmemopen((void *)config, strlen(config), "r");
1643 if (!file) {
1644 err = -errno;
1645 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1646 return err;
1647 }
1648
1649 while (fgets(buf, sizeof(buf), file)) {
1650 err = bpf_object__process_kconfig_line(obj, buf, data);
1651 if (err) {
1652 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1653 buf, err);
1654 break;
1655 }
1656 }
1657
1658 fclose(file);
1659 return err;
1660}
1661
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001662static int bpf_object__init_kconfig_map(struct bpf_object *obj)
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001663{
1664 struct extern_desc *last_ext;
1665 size_t map_sz;
1666 int err;
1667
1668 if (obj->nr_extern == 0)
1669 return 0;
1670
1671 last_ext = &obj->externs[obj->nr_extern - 1];
1672 map_sz = last_ext->data_off + last_ext->sz;
1673
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001674 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001675 obj->efile.symbols_shndx,
1676 NULL, map_sz);
1677 if (err)
1678 return err;
1679
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08001680 obj->kconfig_map_idx = obj->nr_maps - 1;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001681
1682 return 0;
1683}
1684
Andrii Nakryikobf829272019-06-17 12:26:53 -07001685static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1686{
Eric Leblond4708bbd2016-11-15 04:05:47 +00001687 Elf_Data *symbols = obj->efile.symbols;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001688 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001689 Elf_Data *data = NULL;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001690 Elf_Scn *scn;
1691
1692 if (obj->efile.maps_shndx < 0)
1693 return 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001694
Eric Leblond4708bbd2016-11-15 04:05:47 +00001695 if (!symbols)
1696 return -EINVAL;
1697
Andrii Nakryikobf829272019-06-17 12:26:53 -07001698 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
1699 if (scn)
1700 data = elf_getdata(scn, NULL);
1701 if (!scn || !data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001702 pr_warn("failed to get Elf_Data from map section %d\n",
1703 obj->efile.maps_shndx);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001704 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +00001705 }
1706
Eric Leblond4708bbd2016-11-15 04:05:47 +00001707 /*
1708 * Count number of maps. Each map has a name.
1709 * Array of maps is not supported: only the first element is
1710 * considered.
1711 *
1712 * TODO: Detect array of map and report error.
1713 */
Andrii Nakryikobf829272019-06-17 12:26:53 -07001714 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1715 for (i = 0; i < nr_syms; i++) {
Eric Leblond4708bbd2016-11-15 04:05:47 +00001716 GElf_Sym sym;
1717
1718 if (!gelf_getsym(symbols, i, &sym))
1719 continue;
1720 if (sym.st_shndx != obj->efile.maps_shndx)
1721 continue;
1722 nr_maps++;
1723 }
Craig Gallekb13c5c12017-10-05 10:41:57 -04001724 /* Assume equally sized map definitions */
Andrii Nakryikobf829272019-06-17 12:26:53 -07001725 pr_debug("maps in %s: %d maps in %zd bytes\n",
1726 obj->path, nr_maps, data->d_size);
Daniel Borkmann4f8827d2019-04-24 00:45:57 +02001727
Andrii Nakryiko98e527af2019-11-06 18:08:55 -08001728 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001729 pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
Kefeng Wangbe180102019-10-21 13:55:32 +08001730 obj->path, nr_maps, data->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001731 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001732 }
Andrii Nakryiko98e527af2019-11-06 18:08:55 -08001733 map_def_sz = data->d_size / nr_maps;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001734
Andrii Nakryikobf829272019-06-17 12:26:53 -07001735 /* Fill obj->maps using data in "maps" section. */
1736 for (i = 0; i < nr_syms; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00001737 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +00001738 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001739 struct bpf_map_def *def;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001740 struct bpf_map *map;
Wang Nan561bbcc2015-11-27 08:47:36 +00001741
1742 if (!gelf_getsym(symbols, i, &sym))
1743 continue;
Wang Nan666810e2016-01-25 09:55:49 +00001744 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +00001745 continue;
1746
Andrii Nakryikobf829272019-06-17 12:26:53 -07001747 map = bpf_object__add_map(obj);
1748 if (IS_ERR(map))
1749 return PTR_ERR(map);
1750
1751 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +00001752 sym.st_name);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -07001753 if (!map_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001754 pr_warn("failed to get map #%d name sym string for obj %s\n",
1755 i, obj->path);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -07001756 return -LIBBPF_ERRNO__FORMAT;
1757 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001758
Andrii Nakryikobf829272019-06-17 12:26:53 -07001759 map->libbpf_type = LIBBPF_MAP_UNSPEC;
Andrii Nakryikodb488142019-06-17 12:26:54 -07001760 map->sec_idx = sym.st_shndx;
1761 map->sec_offset = sym.st_value;
1762 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1763 map_name, map->sec_idx, map->sec_offset);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001764 if (sym.st_value + map_def_sz > data->d_size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001765 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1766 obj->path, map_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +00001767 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +00001768 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001769
Andrii Nakryikobf829272019-06-17 12:26:53 -07001770 map->name = strdup(map_name);
1771 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001772 pr_warn("failed to alloc map name\n");
Wang Nan973170e2015-12-08 02:25:29 +00001773 return -ENOMEM;
1774 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001775 pr_debug("map %d is \"%s\"\n", i, map->name);
Eric Leblond4708bbd2016-11-15 04:05:47 +00001776 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001777 /*
1778 * If the definition of the map in the object file fits in
1779 * bpf_map_def, copy it. Any extra fields in our version
1780 * of bpf_map_def will default to zero as a result of the
1781 * calloc above.
1782 */
1783 if (map_def_sz <= sizeof(struct bpf_map_def)) {
Andrii Nakryikobf829272019-06-17 12:26:53 -07001784 memcpy(&map->def, def, map_def_sz);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001785 } else {
1786 /*
1787 * Here the map structure being read is bigger than what
1788 * we expect, truncate if the excess bits are all zero.
1789 * If they are not zero, reject this map as
1790 * incompatible.
1791 */
1792 char *b;
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001793
Craig Gallekb13c5c12017-10-05 10:41:57 -04001794 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1795 b < ((char *)def) + map_def_sz; b++) {
1796 if (*b != 0) {
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001797 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
Kefeng Wangbe180102019-10-21 13:55:32 +08001798 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -07001799 if (strict)
1800 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001801 }
1802 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001803 memcpy(&map->def, def, sizeof(struct bpf_map_def));
Craig Gallekb13c5c12017-10-05 10:41:57 -04001804 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001805 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001806 return 0;
1807}
Eric Leblond4708bbd2016-11-15 04:05:47 +00001808
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001809static const struct btf_type *
1810skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001811{
1812 const struct btf_type *t = btf__type_by_id(btf, id);
1813
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001814 if (res_id)
1815 *res_id = id;
1816
1817 while (btf_is_mod(t) || btf_is_typedef(t)) {
1818 if (res_id)
1819 *res_id = t->type;
1820 t = btf__type_by_id(btf, t->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001821 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001822
1823 return t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001824}
1825
Martin KaFai Lau590a0082020-01-08 16:35:14 -08001826static const struct btf_type *
1827resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1828{
1829 const struct btf_type *t;
1830
1831 t = skip_mods_and_typedefs(btf, id, NULL);
1832 if (!btf_is_ptr(t))
1833 return NULL;
1834
1835 t = skip_mods_and_typedefs(btf, t->type, res_id);
1836
1837 return btf_is_func_proto(t) ? t : NULL;
1838}
1839
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001840/*
1841 * Fetch integer attribute of BTF map definition. Such attributes are
1842 * represented using a pointer to an array, in which dimensionality of array
1843 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1844 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1845 * type definition, while using only sizeof(void *) space in ELF data section.
1846 */
1847static bool get_map_field_int(const char *map_name, const struct btf *btf,
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001848 const struct btf_type *def,
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001849 const struct btf_member *m, __u32 *res)
1850{
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001851 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001852 const char *name = btf__name_by_offset(btf, m->name_off);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001853 const struct btf_array *arr_info;
1854 const struct btf_type *arr_t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001855
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001856 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001857 pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
1858 map_name, name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001859 return false;
1860 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001861
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001862 arr_t = btf__type_by_id(btf, t->type);
1863 if (!arr_t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001864 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1865 map_name, name, t->type);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001866 return false;
1867 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001868 if (!btf_is_array(arr_t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001869 pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
1870 map_name, name, btf_kind(arr_t));
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001871 return false;
1872 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001873 arr_info = btf_array(arr_t);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001874 *res = arr_info->nelems;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001875 return true;
1876}
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001877
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001878static int build_map_pin_path(struct bpf_map *map, const char *path)
1879{
1880 char buf[PATH_MAX];
1881 int err, len;
1882
1883 if (!path)
1884 path = "/sys/fs/bpf";
1885
1886 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1887 if (len < 0)
1888 return -EINVAL;
1889 else if (len >= PATH_MAX)
1890 return -ENAMETOOLONG;
1891
1892 err = bpf_map__set_pin_path(map, buf);
1893 if (err)
1894 return err;
1895
1896 return 0;
1897}
1898
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001899static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1900 const struct btf_type *sec,
1901 int var_idx, int sec_idx,
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001902 const Elf_Data *data, bool strict,
1903 const char *pin_root_path)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001904{
1905 const struct btf_type *var, *def, *t;
1906 const struct btf_var_secinfo *vi;
1907 const struct btf_var *var_extra;
1908 const struct btf_member *m;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001909 const char *map_name;
1910 struct bpf_map *map;
1911 int vlen, i;
1912
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001913 vi = btf_var_secinfos(sec) + var_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001914 var = btf__type_by_id(obj->btf, vi->type);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001915 var_extra = btf_var(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001916 map_name = btf__name_by_offset(obj->btf, var->name_off);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001917 vlen = btf_vlen(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001918
1919 if (map_name == NULL || map_name[0] == '\0') {
Kefeng Wangbe180102019-10-21 13:55:32 +08001920 pr_warn("map #%d: empty name.\n", var_idx);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001921 return -EINVAL;
1922 }
1923 if ((__u64)vi->offset + vi->size > data->d_size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001924 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001925 return -EINVAL;
1926 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001927 if (!btf_is_var(var)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001928 pr_warn("map '%s': unexpected var kind %u.\n",
1929 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001930 return -EINVAL;
1931 }
1932 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1933 var_extra->linkage != BTF_VAR_STATIC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001934 pr_warn("map '%s': unsupported var linkage %u.\n",
1935 map_name, var_extra->linkage);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001936 return -EOPNOTSUPP;
1937 }
1938
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001939 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001940 if (!btf_is_struct(def)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001941 pr_warn("map '%s': unexpected def kind %u.\n",
1942 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001943 return -EINVAL;
1944 }
1945 if (def->size > vi->size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001946 pr_warn("map '%s': invalid def size.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001947 return -EINVAL;
1948 }
1949
1950 map = bpf_object__add_map(obj);
1951 if (IS_ERR(map))
1952 return PTR_ERR(map);
1953 map->name = strdup(map_name);
1954 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001955 pr_warn("map '%s': failed to alloc map name.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001956 return -ENOMEM;
1957 }
1958 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1959 map->def.type = BPF_MAP_TYPE_UNSPEC;
1960 map->sec_idx = sec_idx;
1961 map->sec_offset = vi->offset;
1962 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1963 map_name, map->sec_idx, map->sec_offset);
1964
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001965 vlen = btf_vlen(def);
1966 m = btf_members(def);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001967 for (i = 0; i < vlen; i++, m++) {
1968 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1969
1970 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001971 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001972 return -EINVAL;
1973 }
1974 if (strcmp(name, "type") == 0) {
1975 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001976 &map->def.type))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001977 return -EINVAL;
1978 pr_debug("map '%s': found type = %u.\n",
1979 map_name, map->def.type);
1980 } else if (strcmp(name, "max_entries") == 0) {
1981 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001982 &map->def.max_entries))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001983 return -EINVAL;
1984 pr_debug("map '%s': found max_entries = %u.\n",
1985 map_name, map->def.max_entries);
1986 } else if (strcmp(name, "map_flags") == 0) {
1987 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001988 &map->def.map_flags))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001989 return -EINVAL;
1990 pr_debug("map '%s': found map_flags = %u.\n",
1991 map_name, map->def.map_flags);
1992 } else if (strcmp(name, "key_size") == 0) {
1993 __u32 sz;
1994
1995 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001996 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001997 return -EINVAL;
1998 pr_debug("map '%s': found key_size = %u.\n",
1999 map_name, sz);
2000 if (map->def.key_size && map->def.key_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002001 pr_warn("map '%s': conflicting key size %u != %u.\n",
2002 map_name, map->def.key_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002003 return -EINVAL;
2004 }
2005 map->def.key_size = sz;
2006 } else if (strcmp(name, "key") == 0) {
2007 __s64 sz;
2008
2009 t = btf__type_by_id(obj->btf, m->type);
2010 if (!t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002011 pr_warn("map '%s': key type [%d] not found.\n",
2012 map_name, m->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002013 return -EINVAL;
2014 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002015 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002016 pr_warn("map '%s': key spec is not PTR: %u.\n",
2017 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002018 return -EINVAL;
2019 }
2020 sz = btf__resolve_size(obj->btf, t->type);
2021 if (sz < 0) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002022 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2023 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002024 return sz;
2025 }
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002026 pr_debug("map '%s': found key [%u], sz = %zd.\n",
2027 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002028 if (map->def.key_size && map->def.key_size != sz) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002029 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2030 map_name, map->def.key_size, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002031 return -EINVAL;
2032 }
2033 map->def.key_size = sz;
2034 map->btf_key_type_id = t->type;
2035 } else if (strcmp(name, "value_size") == 0) {
2036 __u32 sz;
2037
2038 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07002039 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002040 return -EINVAL;
2041 pr_debug("map '%s': found value_size = %u.\n",
2042 map_name, sz);
2043 if (map->def.value_size && map->def.value_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002044 pr_warn("map '%s': conflicting value size %u != %u.\n",
2045 map_name, map->def.value_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002046 return -EINVAL;
2047 }
2048 map->def.value_size = sz;
2049 } else if (strcmp(name, "value") == 0) {
2050 __s64 sz;
2051
2052 t = btf__type_by_id(obj->btf, m->type);
2053 if (!t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002054 pr_warn("map '%s': value type [%d] not found.\n",
2055 map_name, m->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002056 return -EINVAL;
2057 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002058 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002059 pr_warn("map '%s': value spec is not PTR: %u.\n",
2060 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002061 return -EINVAL;
2062 }
2063 sz = btf__resolve_size(obj->btf, t->type);
2064 if (sz < 0) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002065 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2066 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002067 return sz;
2068 }
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002069 pr_debug("map '%s': found value [%u], sz = %zd.\n",
2070 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002071 if (map->def.value_size && map->def.value_size != sz) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002072 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2073 map_name, map->def.value_size, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002074 return -EINVAL;
2075 }
2076 map->def.value_size = sz;
2077 map->btf_value_type_id = t->type;
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002078 } else if (strcmp(name, "pinning") == 0) {
2079 __u32 val;
2080 int err;
2081
2082 if (!get_map_field_int(map_name, obj->btf, def, m,
2083 &val))
2084 return -EINVAL;
2085 pr_debug("map '%s': found pinning = %u.\n",
2086 map_name, val);
2087
2088 if (val != LIBBPF_PIN_NONE &&
2089 val != LIBBPF_PIN_BY_NAME) {
2090 pr_warn("map '%s': invalid pinning value %u.\n",
2091 map_name, val);
2092 return -EINVAL;
2093 }
2094 if (val == LIBBPF_PIN_BY_NAME) {
2095 err = build_map_pin_path(map, pin_root_path);
2096 if (err) {
2097 pr_warn("map '%s': couldn't build pin path.\n",
2098 map_name);
2099 return err;
2100 }
2101 }
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002102 } else {
2103 if (strict) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002104 pr_warn("map '%s': unknown field '%s'.\n",
2105 map_name, name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002106 return -ENOTSUP;
2107 }
2108 pr_debug("map '%s': ignoring unknown field '%s'.\n",
2109 map_name, name);
2110 }
2111 }
2112
2113 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002114 pr_warn("map '%s': map type isn't specified.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002115 return -EINVAL;
2116 }
2117
2118 return 0;
2119}
2120
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002121static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2122 const char *pin_root_path)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002123{
2124 const struct btf_type *sec = NULL;
2125 int nr_types, i, vlen, err;
2126 const struct btf_type *t;
2127 const char *name;
2128 Elf_Data *data;
2129 Elf_Scn *scn;
2130
2131 if (obj->efile.btf_maps_shndx < 0)
2132 return 0;
2133
2134 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
2135 if (scn)
2136 data = elf_getdata(scn, NULL);
2137 if (!scn || !data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002138 pr_warn("failed to get Elf_Data from map section %d (%s)\n",
2139 obj->efile.maps_shndx, MAPS_ELF_SEC);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002140 return -EINVAL;
2141 }
2142
2143 nr_types = btf__get_nr_types(obj->btf);
2144 for (i = 1; i <= nr_types; i++) {
2145 t = btf__type_by_id(obj->btf, i);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002146 if (!btf_is_datasec(t))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002147 continue;
2148 name = btf__name_by_offset(obj->btf, t->name_off);
2149 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2150 sec = t;
2151 break;
2152 }
2153 }
2154
2155 if (!sec) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002156 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002157 return -ENOENT;
2158 }
2159
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002160 vlen = btf_vlen(sec);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002161 for (i = 0; i < vlen; i++) {
2162 err = bpf_object__init_user_btf_map(obj, sec, i,
2163 obj->efile.btf_maps_shndx,
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002164 data, strict,
2165 pin_root_path);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002166 if (err)
2167 return err;
2168 }
2169
2170 return 0;
2171}
2172
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08002173static int bpf_object__init_maps(struct bpf_object *obj,
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08002174 const struct bpf_object_open_opts *opts)
Andrii Nakryikobf829272019-06-17 12:26:53 -07002175{
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002176 const char *pin_root_path;
2177 bool strict;
Andrii Nakryikobf829272019-06-17 12:26:53 -07002178 int err;
Eric Leblond4708bbd2016-11-15 04:05:47 +00002179
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002180 strict = !OPTS_GET(opts, relaxed_maps, false);
2181 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2182
Andrii Nakryikobf829272019-06-17 12:26:53 -07002183 err = bpf_object__init_user_maps(obj, strict);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002184 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2185 err = err ?: bpf_object__init_global_data_maps(obj);
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08002186 err = err ?: bpf_object__init_kconfig_map(obj);
Martin KaFai Lau590a0082020-01-08 16:35:14 -08002187 err = err ?: bpf_object__init_struct_ops_maps(obj);
Andrii Nakryikobf829272019-06-17 12:26:53 -07002188 if (err)
2189 return err;
2190
Andrii Nakryikobf829272019-06-17 12:26:53 -07002191 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +00002192}
2193
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01002194static bool section_have_execinstr(struct bpf_object *obj, int idx)
2195{
2196 Elf_Scn *scn;
2197 GElf_Shdr sh;
2198
2199 scn = elf_getscn(obj->efile.elf, idx);
2200 if (!scn)
2201 return false;
2202
2203 if (gelf_getshdr(scn, &sh) != &sh)
2204 return false;
2205
2206 if (sh.sh_flags & SHF_EXECINSTR)
2207 return true;
2208
2209 return false;
2210}
2211
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002212static void bpf_object__sanitize_btf(struct bpf_object *obj)
2213{
Alexei Starovoitov2d3eb672020-01-09 22:41:19 -08002214 bool has_func_global = obj->caps.btf_func_global;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002215 bool has_datasec = obj->caps.btf_datasec;
2216 bool has_func = obj->caps.btf_func;
2217 struct btf *btf = obj->btf;
2218 struct btf_type *t;
2219 int i, j, vlen;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002220
Alexei Starovoitov2d3eb672020-01-09 22:41:19 -08002221 if (!obj->btf || (has_func && has_datasec && has_func_global))
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002222 return;
2223
2224 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2225 t = (struct btf_type *)btf__type_by_id(btf, i);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002226
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002227 if (!has_datasec && btf_is_var(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002228 /* replace VAR with INT */
2229 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
Andrii Nakryiko1d4126c2019-07-19 12:46:03 -07002230 /*
2231 * using size = 1 is the safest choice, 4 will be too
2232 * big and cause kernel BTF validation failure if
2233 * original variable took less than 4 bytes
2234 */
2235 t->size = 1;
Jakub Kicinski708852d2019-08-13 16:24:57 -07002236 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002237 } else if (!has_datasec && btf_is_datasec(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002238 /* replace DATASEC with STRUCT */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002239 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2240 struct btf_member *m = btf_members(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002241 struct btf_type *vt;
2242 char *name;
2243
2244 name = (char *)btf__name_by_offset(btf, t->name_off);
2245 while (*name) {
2246 if (*name == '.')
2247 *name = '_';
2248 name++;
2249 }
2250
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002251 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002252 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2253 for (j = 0; j < vlen; j++, v++, m++) {
2254 /* order of field assignments is important */
2255 m->offset = v->offset * 8;
2256 m->type = v->type;
2257 /* preserve variable name as member name */
2258 vt = (void *)btf__type_by_id(btf, v->type);
2259 m->name_off = vt->name_off;
2260 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002261 } else if (!has_func && btf_is_func_proto(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002262 /* replace FUNC_PROTO with ENUM */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002263 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002264 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2265 t->size = sizeof(__u32); /* kernel enforced */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07002266 } else if (!has_func && btf_is_func(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002267 /* replace FUNC with TYPEDEF */
2268 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
Alexei Starovoitov2d3eb672020-01-09 22:41:19 -08002269 } else if (!has_func_global && btf_is_func(t)) {
2270 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2271 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002272 }
2273 }
2274}
2275
2276static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
2277{
2278 if (!obj->btf_ext)
2279 return;
2280
2281 if (!obj->caps.btf_func) {
2282 btf_ext__free(obj->btf_ext);
2283 obj->btf_ext = NULL;
2284 }
2285}
2286
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002287static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
2288{
Andrii Nakryiko53276442020-02-19 22:26:35 -08002289 return obj->efile.st_ops_shndx >= 0 || obj->nr_extern > 0;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002290}
2291
Andrii Nakryiko063183b2019-06-17 12:26:55 -07002292static int bpf_object__init_btf(struct bpf_object *obj,
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002293 Elf_Data *btf_data,
2294 Elf_Data *btf_ext_data)
2295{
Andrii Nakryikob7d7f3e2020-01-16 22:07:59 -08002296 int err = -ENOENT;
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002297
2298 if (btf_data) {
2299 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2300 if (IS_ERR(obj->btf)) {
Andrii Nakryikob7d7f3e2020-01-16 22:07:59 -08002301 err = PTR_ERR(obj->btf);
2302 obj->btf = NULL;
Kefeng Wangbe180102019-10-21 13:55:32 +08002303 pr_warn("Error loading ELF section %s: %d.\n",
2304 BTF_ELF_SEC, err);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002305 goto out;
2306 }
Andrii Nakryikob7d7f3e2020-01-16 22:07:59 -08002307 err = 0;
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002308 }
2309 if (btf_ext_data) {
2310 if (!obj->btf) {
2311 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2312 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2313 goto out;
2314 }
2315 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2316 btf_ext_data->d_size);
2317 if (IS_ERR(obj->btf_ext)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002318 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2319 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002320 obj->btf_ext = NULL;
2321 goto out;
2322 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002323 }
2324out:
Andrii Nakryikob7d7f3e2020-01-16 22:07:59 -08002325 if (err && bpf_object__is_btf_mandatory(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002326 pr_warn("BTF is required, but is missing or corrupted.\n");
Andrii Nakryikob7d7f3e2020-01-16 22:07:59 -08002327 return err;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002328 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07002329 return 0;
2330}
2331
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002332static int bpf_object__finalize_btf(struct bpf_object *obj)
2333{
2334 int err;
2335
2336 if (!obj->btf)
2337 return 0;
2338
2339 err = btf__finalize_data(obj, obj->btf);
2340 if (!err)
2341 return 0;
2342
2343 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2344 btf__free(obj->btf);
2345 obj->btf = NULL;
2346 btf_ext__free(obj->btf_ext);
2347 obj->btf_ext = NULL;
2348
2349 if (bpf_object__is_btf_mandatory(obj)) {
2350 pr_warn("BTF is required, but is missing or corrupted.\n");
2351 return -ENOENT;
2352 }
2353 return 0;
2354}
2355
KP Singha6ed02c2020-01-17 22:28:25 +01002356static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
2357{
2358 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
2359 return true;
2360
2361 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2362 * also need vmlinux BTF
2363 */
2364 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2365 return true;
2366
2367 return false;
2368}
2369
2370static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
2371{
2372 struct bpf_program *prog;
2373 int err;
2374
2375 bpf_object__for_each_program(prog, obj) {
2376 if (libbpf_prog_needs_vmlinux_btf(prog)) {
2377 obj->btf_vmlinux = libbpf_find_kernel_btf();
2378 if (IS_ERR(obj->btf_vmlinux)) {
2379 err = PTR_ERR(obj->btf_vmlinux);
2380 pr_warn("Error loading vmlinux BTF: %d\n", err);
2381 obj->btf_vmlinux = NULL;
2382 return err;
2383 }
2384 return 0;
2385 }
2386 }
2387
2388 return 0;
2389}
2390
Andrii Nakryiko063183b2019-06-17 12:26:55 -07002391static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2392{
2393 int err = 0;
2394
2395 if (!obj->btf)
2396 return 0;
2397
2398 bpf_object__sanitize_btf(obj);
2399 bpf_object__sanitize_btf_ext(obj);
2400
2401 err = btf__load(obj->btf);
2402 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002403 pr_warn("Error loading %s into kernel: %d.\n",
2404 BTF_ELF_SEC, err);
Andrii Nakryiko063183b2019-06-17 12:26:55 -07002405 btf__free(obj->btf);
2406 obj->btf = NULL;
Andrii Nakryiko04efe592019-07-19 12:32:42 -07002407 /* btf_ext can't exist without btf, so free it as well */
2408 if (obj->btf_ext) {
2409 btf_ext__free(obj->btf_ext);
2410 obj->btf_ext = NULL;
2411 }
2412
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002413 if (bpf_object__is_btf_mandatory(obj))
2414 return err;
Andrii Nakryiko063183b2019-06-17 12:26:55 -07002415 }
2416 return 0;
2417}
2418
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08002419static int bpf_object__elf_collect(struct bpf_object *obj)
Wang Nan29603662015-07-01 02:13:56 +00002420{
2421 Elf *elf = obj->efile.elf;
2422 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002423 Elf_Data *btf_ext_data = NULL;
Daniel Borkmann1713d682019-04-09 23:20:14 +02002424 Elf_Data *btf_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +00002425 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +00002426 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +00002427
2428 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
2429 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002430 pr_warn("failed to get e_shstrndx from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002431 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00002432 }
2433
2434 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2435 char *name;
2436 GElf_Shdr sh;
2437 Elf_Data *data;
2438
2439 idx++;
2440 if (gelf_getshdr(scn, &sh) != &sh) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002441 pr_warn("failed to get section(%d) header from %s\n",
2442 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002443 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00002444 }
2445
2446 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
2447 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002448 pr_warn("failed to get section(%d) name from %s\n",
2449 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002450 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00002451 }
2452
2453 data = elf_getdata(scn, 0);
2454 if (!data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002455 pr_warn("failed to get section(%d) data from %s(%s)\n",
2456 idx, name, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002457 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00002458 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01002459 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2460 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +00002461 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2462 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +00002463
Daniel Borkmann1713d682019-04-09 23:20:14 +02002464 if (strcmp(name, "license") == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00002465 err = bpf_object__init_license(obj,
2466 data->d_buf,
2467 data->d_size);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002468 if (err)
2469 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02002470 } else if (strcmp(name, "version") == 0) {
John Fastabend54b86252019-10-18 07:41:26 -07002471 err = bpf_object__init_kversion(obj,
2472 data->d_buf,
2473 data->d_size);
2474 if (err)
2475 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02002476 } else if (strcmp(name, "maps") == 0) {
Wang Nan666810e2016-01-25 09:55:49 +00002477 obj->efile.maps_shndx = idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002478 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2479 obj->efile.btf_maps_shndx = idx;
Daniel Borkmann1713d682019-04-09 23:20:14 +02002480 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2481 btf_data = data;
Yonghong Song2993e052018-11-19 15:29:16 -08002482 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002483 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002484 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +00002485 if (obj->efile.symbols) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002486 pr_warn("bpf: multiple SYMTAB in %s\n",
2487 obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002488 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00002489 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002490 obj->efile.symbols = data;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002491 obj->efile.symbols_shndx = idx;
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002492 obj->efile.strtabidx = sh.sh_link;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02002493 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2494 if (sh.sh_flags & SHF_EXECINSTR) {
2495 if (strcmp(name, ".text") == 0)
2496 obj->efile.text_shndx = idx;
2497 err = bpf_object__add_program(obj, data->d_buf,
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002498 data->d_size,
2499 name, idx);
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02002500 if (err) {
2501 char errmsg[STRERR_BUFSIZE];
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002502 char *cp;
Wang Nan6371ca3b2015-11-06 13:49:37 +00002503
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002504 cp = libbpf_strerror_r(-err, errmsg,
2505 sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002506 pr_warn("failed to alloc program %s (%s): %s",
2507 name, obj->path, cp);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002508 return err;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02002509 }
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08002510 } else if (strcmp(name, DATA_SEC) == 0) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002511 obj->efile.data = data;
2512 obj->efile.data_shndx = idx;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08002513 } else if (strcmp(name, RODATA_SEC) == 0) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002514 obj->efile.rodata = data;
2515 obj->efile.rodata_shndx = idx;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08002516 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2517 obj->efile.st_ops_data = data;
2518 obj->efile.st_ops_shndx = idx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002519 } else {
2520 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nana5b8bd42015-07-01 02:14:00 +00002521 }
Wang Nanb62f06e2015-07-01 02:14:01 +00002522 } else if (sh.sh_type == SHT_REL) {
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002523 int nr_sects = obj->efile.nr_reloc_sects;
2524 void *sects = obj->efile.reloc_sects;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01002525 int sec = sh.sh_info; /* points to other section */
2526
2527 /* Only do relo for section with exec instructions */
Martin KaFai Lau590a0082020-01-08 16:35:14 -08002528 if (!section_have_execinstr(obj, sec) &&
2529 strcmp(name, ".rel" STRUCT_OPS_SEC)) {
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01002530 pr_debug("skip relo %s(%d) for section(%d)\n",
2531 name, idx, sec);
2532 continue;
2533 }
Wang Nanb62f06e2015-07-01 02:14:01 +00002534
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002535 sects = reallocarray(sects, nr_sects + 1,
2536 sizeof(*obj->efile.reloc_sects));
2537 if (!sects) {
2538 pr_warn("reloc_sects realloc failed\n");
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002539 return -ENOMEM;
Wang Nanb62f06e2015-07-01 02:14:01 +00002540 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002541
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002542 obj->efile.reloc_sects = sects;
2543 obj->efile.nr_reloc_sects++;
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002544
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002545 obj->efile.reloc_sects[nr_sects].shdr = sh;
2546 obj->efile.reloc_sects[nr_sects].data = data;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08002547 } else if (sh.sh_type == SHT_NOBITS &&
2548 strcmp(name, BSS_SEC) == 0) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002549 obj->efile.bss = data;
2550 obj->efile.bss_shndx = idx;
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01002551 } else {
2552 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +00002553 }
Wang Nan29603662015-07-01 02:13:56 +00002554 }
Wang Nan561bbcc2015-11-27 08:47:36 +00002555
Andrii Nakryikod3a3aa02019-10-28 16:37:27 -07002556 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002557 pr_warn("Corrupted ELF file: index of strtab invalid\n");
Andrii Nakryikof1021542019-05-29 10:36:07 -07002558 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00002559 }
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08002560 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
Wang Nan29603662015-07-01 02:13:56 +00002561}
2562
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002563static bool sym_is_extern(const GElf_Sym *sym)
2564{
2565 int bind = GELF_ST_BIND(sym->st_info);
2566 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2567 return sym->st_shndx == SHN_UNDEF &&
2568 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2569 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2570}
2571
2572static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2573{
2574 const struct btf_type *t;
2575 const char *var_name;
2576 int i, n;
2577
2578 if (!btf)
2579 return -ESRCH;
2580
2581 n = btf__get_nr_types(btf);
2582 for (i = 1; i <= n; i++) {
2583 t = btf__type_by_id(btf, i);
2584
2585 if (!btf_is_var(t))
2586 continue;
2587
2588 var_name = btf__name_by_offset(btf, t->name_off);
2589 if (strcmp(var_name, ext_name))
2590 continue;
2591
2592 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2593 return -EINVAL;
2594
2595 return i;
2596 }
2597
2598 return -ENOENT;
2599}
2600
2601static enum extern_type find_extern_type(const struct btf *btf, int id,
2602 bool *is_signed)
2603{
2604 const struct btf_type *t;
2605 const char *name;
2606
2607 t = skip_mods_and_typedefs(btf, id, NULL);
2608 name = btf__name_by_offset(btf, t->name_off);
2609
2610 if (is_signed)
2611 *is_signed = false;
2612 switch (btf_kind(t)) {
2613 case BTF_KIND_INT: {
2614 int enc = btf_int_encoding(t);
2615
2616 if (enc & BTF_INT_BOOL)
2617 return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN;
2618 if (is_signed)
2619 *is_signed = enc & BTF_INT_SIGNED;
2620 if (t->size == 1)
2621 return EXT_CHAR;
2622 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
2623 return EXT_UNKNOWN;
2624 return EXT_INT;
2625 }
2626 case BTF_KIND_ENUM:
2627 if (t->size != 4)
2628 return EXT_UNKNOWN;
2629 if (strcmp(name, "libbpf_tristate"))
2630 return EXT_UNKNOWN;
2631 return EXT_TRISTATE;
2632 case BTF_KIND_ARRAY:
2633 if (btf_array(t)->nelems == 0)
2634 return EXT_UNKNOWN;
2635 if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR)
2636 return EXT_UNKNOWN;
2637 return EXT_CHAR_ARR;
2638 default:
2639 return EXT_UNKNOWN;
2640 }
2641}
2642
2643static int cmp_externs(const void *_a, const void *_b)
2644{
2645 const struct extern_desc *a = _a;
2646 const struct extern_desc *b = _b;
2647
2648 /* descending order by alignment requirements */
2649 if (a->align != b->align)
2650 return a->align > b->align ? -1 : 1;
2651 /* ascending order by size, within same alignment class */
2652 if (a->sz != b->sz)
2653 return a->sz < b->sz ? -1 : 1;
2654 /* resolve ties by name */
2655 return strcmp(a->name, b->name);
2656}
2657
2658static int bpf_object__collect_externs(struct bpf_object *obj)
2659{
2660 const struct btf_type *t;
2661 struct extern_desc *ext;
2662 int i, n, off, btf_id;
2663 struct btf_type *sec;
2664 const char *ext_name;
2665 Elf_Scn *scn;
2666 GElf_Shdr sh;
2667
2668 if (!obj->efile.symbols)
2669 return 0;
2670
2671 scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
2672 if (!scn)
2673 return -LIBBPF_ERRNO__FORMAT;
2674 if (gelf_getshdr(scn, &sh) != &sh)
2675 return -LIBBPF_ERRNO__FORMAT;
2676 n = sh.sh_size / sh.sh_entsize;
2677
2678 pr_debug("looking for externs among %d symbols...\n", n);
2679 for (i = 0; i < n; i++) {
2680 GElf_Sym sym;
2681
2682 if (!gelf_getsym(obj->efile.symbols, i, &sym))
2683 return -LIBBPF_ERRNO__FORMAT;
2684 if (!sym_is_extern(&sym))
2685 continue;
2686 ext_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
2687 sym.st_name);
2688 if (!ext_name || !ext_name[0])
2689 continue;
2690
2691 ext = obj->externs;
2692 ext = reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
2693 if (!ext)
2694 return -ENOMEM;
2695 obj->externs = ext;
2696 ext = &ext[obj->nr_extern];
2697 memset(ext, 0, sizeof(*ext));
2698 obj->nr_extern++;
2699
2700 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
2701 if (ext->btf_id <= 0) {
2702 pr_warn("failed to find BTF for extern '%s': %d\n",
2703 ext_name, ext->btf_id);
2704 return ext->btf_id;
2705 }
2706 t = btf__type_by_id(obj->btf, ext->btf_id);
2707 ext->name = btf__name_by_offset(obj->btf, t->name_off);
2708 ext->sym_idx = i;
2709 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
2710 ext->sz = btf__resolve_size(obj->btf, t->type);
2711 if (ext->sz <= 0) {
2712 pr_warn("failed to resolve size of extern '%s': %d\n",
2713 ext_name, ext->sz);
2714 return ext->sz;
2715 }
2716 ext->align = btf__align_of(obj->btf, t->type);
2717 if (ext->align <= 0) {
2718 pr_warn("failed to determine alignment of extern '%s': %d\n",
2719 ext_name, ext->align);
2720 return -EINVAL;
2721 }
2722 ext->type = find_extern_type(obj->btf, t->type,
2723 &ext->is_signed);
2724 if (ext->type == EXT_UNKNOWN) {
2725 pr_warn("extern '%s' type is unsupported\n", ext_name);
2726 return -ENOTSUP;
2727 }
2728 }
2729 pr_debug("collected %d externs total\n", obj->nr_extern);
2730
2731 if (!obj->nr_extern)
2732 return 0;
2733
2734 /* sort externs by (alignment, size, name) and calculate their offsets
2735 * within a map */
2736 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
2737 off = 0;
2738 for (i = 0; i < obj->nr_extern; i++) {
2739 ext = &obj->externs[i];
2740 ext->data_off = roundup(off, ext->align);
2741 off = ext->data_off + ext->sz;
2742 pr_debug("extern #%d: symbol %d, off %u, name %s\n",
2743 i, ext->sym_idx, ext->data_off, ext->name);
2744 }
2745
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08002746 btf_id = btf__find_by_name(obj->btf, KCONFIG_SEC);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002747 if (btf_id <= 0) {
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08002748 pr_warn("no BTF info found for '%s' datasec\n", KCONFIG_SEC);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002749 return -ESRCH;
2750 }
2751
2752 sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id);
2753 sec->size = off;
2754 n = btf_vlen(sec);
2755 for (i = 0; i < n; i++) {
2756 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
2757
2758 t = btf__type_by_id(obj->btf, vs->type);
2759 ext_name = btf__name_by_offset(obj->btf, t->name_off);
2760 ext = find_extern_by_name(obj, ext_name);
2761 if (!ext) {
2762 pr_warn("failed to find extern definition for BTF var '%s'\n",
2763 ext_name);
2764 return -ESRCH;
2765 }
2766 vs->offset = ext->data_off;
2767 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
2768 }
2769
2770 return 0;
2771}
2772
Wang Nan34090912015-07-01 02:14:02 +00002773static struct bpf_program *
2774bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
2775{
2776 struct bpf_program *prog;
2777 size_t i;
2778
2779 for (i = 0; i < obj->nr_programs; i++) {
2780 prog = &obj->programs[i];
2781 if (prog->idx == idx)
2782 return prog;
2783 }
2784 return NULL;
2785}
2786
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07002787struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07002788bpf_object__find_program_by_title(const struct bpf_object *obj,
2789 const char *title)
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07002790{
2791 struct bpf_program *pos;
2792
2793 bpf_object__for_each_program(pos, obj) {
2794 if (pos->section_name && !strcmp(pos->section_name, title))
2795 return pos;
2796 }
2797 return NULL;
2798}
2799
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08002800struct bpf_program *
2801bpf_object__find_program_by_name(const struct bpf_object *obj,
2802 const char *name)
2803{
2804 struct bpf_program *prog;
2805
2806 bpf_object__for_each_program(prog, obj) {
2807 if (!strcmp(prog->name, name))
2808 return prog;
2809 }
2810 return NULL;
2811}
2812
Daniel Borkmannd8599002019-04-09 23:20:13 +02002813static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
2814 int shndx)
2815{
2816 return shndx == obj->efile.data_shndx ||
2817 shndx == obj->efile.bss_shndx ||
2818 shndx == obj->efile.rodata_shndx;
2819}
2820
2821static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
2822 int shndx)
2823{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002824 return shndx == obj->efile.maps_shndx ||
2825 shndx == obj->efile.btf_maps_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002826}
2827
Daniel Borkmannd8599002019-04-09 23:20:13 +02002828static enum libbpf_map_type
2829bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
2830{
2831 if (shndx == obj->efile.data_shndx)
2832 return LIBBPF_MAP_DATA;
2833 else if (shndx == obj->efile.bss_shndx)
2834 return LIBBPF_MAP_BSS;
2835 else if (shndx == obj->efile.rodata_shndx)
2836 return LIBBPF_MAP_RODATA;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002837 else if (shndx == obj->efile.symbols_shndx)
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08002838 return LIBBPF_MAP_KCONFIG;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002839 else
2840 return LIBBPF_MAP_UNSPEC;
2841}
2842
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002843static int bpf_program__record_reloc(struct bpf_program *prog,
2844 struct reloc_desc *reloc_desc,
2845 __u32 insn_idx, const char *name,
2846 const GElf_Sym *sym, const GElf_Rel *rel)
2847{
2848 struct bpf_insn *insn = &prog->insns[insn_idx];
2849 size_t map_idx, nr_maps = prog->obj->nr_maps;
2850 struct bpf_object *obj = prog->obj;
2851 __u32 shdr_idx = sym->st_shndx;
2852 enum libbpf_map_type type;
2853 struct bpf_map *map;
2854
2855 /* sub-program call relocation */
2856 if (insn->code == (BPF_JMP | BPF_CALL)) {
2857 if (insn->src_reg != BPF_PSEUDO_CALL) {
2858 pr_warn("incorrect bpf_call opcode\n");
2859 return -LIBBPF_ERRNO__RELOC;
2860 }
2861 /* text_shndx can be 0, if no default "main" program exists */
2862 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
2863 pr_warn("bad call relo against section %u\n", shdr_idx);
2864 return -LIBBPF_ERRNO__RELOC;
2865 }
2866 if (sym->st_value % 8) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002867 pr_warn("bad call relo offset: %zu\n",
2868 (size_t)sym->st_value);
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002869 return -LIBBPF_ERRNO__RELOC;
2870 }
2871 reloc_desc->type = RELO_CALL;
2872 reloc_desc->insn_idx = insn_idx;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08002873 reloc_desc->sym_off = sym->st_value;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002874 obj->has_pseudo_calls = true;
2875 return 0;
2876 }
2877
2878 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002879 pr_warn("invalid relo for insns[%d].code 0x%x\n",
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002880 insn_idx, insn->code);
2881 return -LIBBPF_ERRNO__RELOC;
2882 }
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002883
2884 if (sym_is_extern(sym)) {
2885 int sym_idx = GELF_R_SYM(rel->r_info);
2886 int i, n = obj->nr_extern;
2887 struct extern_desc *ext;
2888
2889 for (i = 0; i < n; i++) {
2890 ext = &obj->externs[i];
2891 if (ext->sym_idx == sym_idx)
2892 break;
2893 }
2894 if (i >= n) {
2895 pr_warn("extern relo failed to find extern for sym %d\n",
2896 sym_idx);
2897 return -LIBBPF_ERRNO__RELOC;
2898 }
2899 pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n",
2900 i, ext->name, ext->sym_idx, ext->data_off, insn_idx);
2901 reloc_desc->type = RELO_EXTERN;
2902 reloc_desc->insn_idx = insn_idx;
2903 reloc_desc->sym_off = ext->data_off;
2904 return 0;
2905 }
2906
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002907 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
Andrii Nakryiko393cdfb2019-11-20 23:07:43 -08002908 pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
2909 name, shdr_idx);
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002910 return -LIBBPF_ERRNO__RELOC;
2911 }
2912
2913 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
2914
2915 /* generic map reference relocation */
2916 if (type == LIBBPF_MAP_UNSPEC) {
2917 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
2918 pr_warn("bad map relo against section %u\n",
2919 shdr_idx);
2920 return -LIBBPF_ERRNO__RELOC;
2921 }
2922 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
2923 map = &obj->maps[map_idx];
2924 if (map->libbpf_type != type ||
2925 map->sec_idx != sym->st_shndx ||
2926 map->sec_offset != sym->st_value)
2927 continue;
2928 pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
2929 map_idx, map->name, map->sec_idx,
2930 map->sec_offset, insn_idx);
2931 break;
2932 }
2933 if (map_idx >= nr_maps) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002934 pr_warn("map relo failed to find map for sec %u, off %zu\n",
2935 shdr_idx, (size_t)sym->st_value);
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002936 return -LIBBPF_ERRNO__RELOC;
2937 }
2938 reloc_desc->type = RELO_LD64;
2939 reloc_desc->insn_idx = insn_idx;
2940 reloc_desc->map_idx = map_idx;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08002941 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002942 return 0;
2943 }
2944
2945 /* global data map relocation */
2946 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
2947 pr_warn("bad data relo against section %u\n", shdr_idx);
2948 return -LIBBPF_ERRNO__RELOC;
2949 }
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002950 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
2951 map = &obj->maps[map_idx];
2952 if (map->libbpf_type != type)
2953 continue;
2954 pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
2955 map_idx, map->name, map->sec_idx, map->sec_offset,
2956 insn_idx);
2957 break;
2958 }
2959 if (map_idx >= nr_maps) {
2960 pr_warn("data relo failed to find map for sec %u\n",
2961 shdr_idx);
2962 return -LIBBPF_ERRNO__RELOC;
2963 }
2964
2965 reloc_desc->type = RELO_DATA;
2966 reloc_desc->insn_idx = insn_idx;
2967 reloc_desc->map_idx = map_idx;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08002968 reloc_desc->sym_off = sym->st_value;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002969 return 0;
2970}
2971
Wang Nan34090912015-07-01 02:14:02 +00002972static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002973bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
2974 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +00002975{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002976 Elf_Data *symbols = obj->efile.symbols;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002977 int err, i, nrels;
Wang Nan34090912015-07-01 02:14:02 +00002978
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002979 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
Wang Nan34090912015-07-01 02:14:02 +00002980 nrels = shdr->sh_size / shdr->sh_entsize;
2981
2982 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
2983 if (!prog->reloc_desc) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002984 pr_warn("failed to alloc memory in relocation\n");
Wang Nan34090912015-07-01 02:14:02 +00002985 return -ENOMEM;
2986 }
2987 prog->nr_reloc = nrels;
2988
2989 for (i = 0; i < nrels; i++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002990 const char *name;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002991 __u32 insn_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002992 GElf_Sym sym;
2993 GElf_Rel rel;
Wang Nan34090912015-07-01 02:14:02 +00002994
2995 if (!gelf_getrel(data, i, &rel)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002996 pr_warn("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002997 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00002998 }
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002999 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003000 pr_warn("relocation: symbol %"PRIx64" not found\n",
3001 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +00003002 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00003003 }
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08003004 if (rel.r_offset % sizeof(struct bpf_insn))
3005 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00003006
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08003007 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
Daniel Borkmannd8599002019-04-09 23:20:13 +02003008 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
3009 sym.st_name) ? : "<?>";
3010
Andrii Nakryiko679152d2019-12-12 09:19:18 -08003011 pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
3012 (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info),
3013 (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info),
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08003014 GELF_ST_BIND(sym.st_info), sym.st_name, name,
3015 insn_idx);
Daniel Borkmannd8599002019-04-09 23:20:13 +02003016
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08003017 err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
3018 insn_idx, name, &sym, &rel);
3019 if (err)
3020 return err;
Wang Nan34090912015-07-01 02:14:02 +00003021 }
3022 return 0;
3023}
3024
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07003025static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003026{
3027 struct bpf_map_def *def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003028 __u32 key_type_id = 0, value_type_id = 0;
Yonghong Song96408c42019-02-04 11:00:58 -08003029 int ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003030
Martin KaFai Lau590a0082020-01-08 16:35:14 -08003031 /* if it's BTF-defined map, we don't need to search for type IDs.
3032 * For struct_ops map, it does not need btf_key_type_id and
3033 * btf_value_type_id.
3034 */
3035 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3036 bpf_map__is_struct_ops(map))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07003037 return 0;
3038
Daniel Borkmannd8599002019-04-09 23:20:13 +02003039 if (!bpf_map__is_internal(map)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07003040 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
Daniel Borkmannd8599002019-04-09 23:20:13 +02003041 def->value_size, &key_type_id,
3042 &value_type_id);
3043 } else {
3044 /*
3045 * LLVM annotates global data differently in BTF, that is,
3046 * only as '.data', '.bss' or '.rodata'.
3047 */
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07003048 ret = btf__find_by_name(obj->btf,
Daniel Borkmannd8599002019-04-09 23:20:13 +02003049 libbpf_type_to_btf_name[map->libbpf_type]);
3050 }
3051 if (ret < 0)
Yonghong Song96408c42019-02-04 11:00:58 -08003052 return ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003053
Yonghong Song96408c42019-02-04 11:00:58 -08003054 map->btf_key_type_id = key_type_id;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003055 map->btf_value_type_id = bpf_map__is_internal(map) ?
3056 ret : value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003057 return 0;
3058}
3059
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003060int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3061{
3062 struct bpf_map_info info = {};
3063 __u32 len = sizeof(info);
3064 int new_fd, err;
3065 char *new_name;
3066
3067 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3068 if (err)
3069 return err;
3070
3071 new_name = strdup(info.name);
3072 if (!new_name)
3073 return -errno;
3074
3075 new_fd = open("/", O_RDONLY | O_CLOEXEC);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003076 if (new_fd < 0) {
3077 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003078 goto err_free_new_name;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003079 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003080
3081 new_fd = dup3(fd, new_fd, O_CLOEXEC);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003082 if (new_fd < 0) {
3083 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003084 goto err_close_new_fd;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003085 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003086
3087 err = zclose(map->fd);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003088 if (err) {
3089 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003090 goto err_close_new_fd;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003091 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003092 free(map->name);
3093
3094 map->fd = new_fd;
3095 map->name = new_name;
3096 map->def.type = info.type;
3097 map->def.key_size = info.key_size;
3098 map->def.value_size = info.value_size;
3099 map->def.max_entries = info.max_entries;
3100 map->def.map_flags = info.map_flags;
3101 map->btf_key_type_id = info.btf_key_type_id;
3102 map->btf_value_type_id = info.btf_value_type_id;
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +01003103 map->reused = true;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003104
3105 return 0;
3106
3107err_close_new_fd:
3108 close(new_fd);
3109err_free_new_name:
3110 free(new_name);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01003111 return err;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003112}
3113
Andrey Ignatov1a11a4c2019-02-14 15:01:42 -08003114int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3115{
3116 if (!map || !max_entries)
3117 return -EINVAL;
3118
3119 /* If map already created, its attributes can't be changed. */
3120 if (map->fd >= 0)
3121 return -EBUSY;
3122
3123 map->def.max_entries = max_entries;
3124
3125 return 0;
3126}
3127
Wang Nan52d33522015-07-01 02:14:04 +00003128static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08003129bpf_object__probe_name(struct bpf_object *obj)
3130{
3131 struct bpf_load_program_attr attr;
3132 char *cp, errmsg[STRERR_BUFSIZE];
3133 struct bpf_insn insns[] = {
3134 BPF_MOV64_IMM(BPF_REG_0, 0),
3135 BPF_EXIT_INSN(),
3136 };
3137 int ret;
3138
3139 /* make sure basic loading works */
3140
3141 memset(&attr, 0, sizeof(attr));
3142 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3143 attr.insns = insns;
3144 attr.insns_cnt = ARRAY_SIZE(insns);
3145 attr.license = "GPL";
3146
3147 ret = bpf_load_program_xattr(&attr, NULL, 0);
3148 if (ret < 0) {
3149 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08003150 pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
3151 __func__, cp, errno);
Stanislav Fomichev47eff612018-11-20 17:11:19 -08003152 return -errno;
3153 }
3154 close(ret);
3155
3156 /* now try the same program, but with the name */
3157
3158 attr.name = "test";
3159 ret = bpf_load_program_xattr(&attr, NULL, 0);
3160 if (ret >= 0) {
3161 obj->caps.name = 1;
3162 close(ret);
3163 }
3164
3165 return 0;
3166}
3167
3168static int
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003169bpf_object__probe_global_data(struct bpf_object *obj)
3170{
3171 struct bpf_load_program_attr prg_attr;
3172 struct bpf_create_map_attr map_attr;
3173 char *cp, errmsg[STRERR_BUFSIZE];
3174 struct bpf_insn insns[] = {
3175 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3176 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3177 BPF_MOV64_IMM(BPF_REG_0, 0),
3178 BPF_EXIT_INSN(),
3179 };
3180 int ret, map;
3181
3182 memset(&map_attr, 0, sizeof(map_attr));
3183 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3184 map_attr.key_size = sizeof(int);
3185 map_attr.value_size = 32;
3186 map_attr.max_entries = 1;
3187
3188 map = bpf_create_map_xattr(&map_attr);
3189 if (map < 0) {
3190 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08003191 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3192 __func__, cp, errno);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003193 return -errno;
3194 }
3195
3196 insns[0].imm = map;
3197
3198 memset(&prg_attr, 0, sizeof(prg_attr));
3199 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3200 prg_attr.insns = insns;
3201 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3202 prg_attr.license = "GPL";
3203
3204 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3205 if (ret >= 0) {
3206 obj->caps.global_data = 1;
3207 close(ret);
3208 }
3209
3210 close(map);
3211 return 0;
3212}
3213
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003214static int bpf_object__probe_btf_func(struct bpf_object *obj)
3215{
Andrii Nakryiko8983b732019-11-20 23:07:42 -08003216 static const char strs[] = "\0int\0x\0a";
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003217 /* void x(int a) {} */
3218 __u32 types[] = {
3219 /* int */
3220 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3221 /* FUNC_PROTO */ /* [2] */
3222 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3223 BTF_PARAM_ENC(7, 1),
3224 /* FUNC x */ /* [3] */
3225 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3226 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02003227 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003228
Michal Rosteckicfd49212019-05-29 20:31:09 +02003229 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
3230 strs, sizeof(strs));
3231 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003232 obj->caps.btf_func = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02003233 close(btf_fd);
3234 return 1;
3235 }
3236
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003237 return 0;
3238}
3239
Alexei Starovoitov2d3eb672020-01-09 22:41:19 -08003240static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
3241{
3242 static const char strs[] = "\0int\0x\0a";
3243 /* static void x(int a) {} */
3244 __u32 types[] = {
3245 /* int */
3246 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3247 /* FUNC_PROTO */ /* [2] */
3248 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3249 BTF_PARAM_ENC(7, 1),
3250 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
3251 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3252 };
3253 int btf_fd;
3254
3255 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
3256 strs, sizeof(strs));
3257 if (btf_fd >= 0) {
3258 obj->caps.btf_func_global = 1;
3259 close(btf_fd);
3260 return 1;
3261 }
3262
3263 return 0;
3264}
3265
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003266static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
3267{
Andrii Nakryiko8983b732019-11-20 23:07:42 -08003268 static const char strs[] = "\0x\0.data";
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003269 /* static int a; */
3270 __u32 types[] = {
3271 /* int */
3272 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3273 /* VAR x */ /* [2] */
3274 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3275 BTF_VAR_STATIC,
3276 /* DATASEC val */ /* [3] */
3277 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3278 BTF_VAR_SECINFO_ENC(2, 0, 4),
3279 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02003280 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003281
Michal Rosteckicfd49212019-05-29 20:31:09 +02003282 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
3283 strs, sizeof(strs));
3284 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003285 obj->caps.btf_datasec = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02003286 close(btf_fd);
3287 return 1;
3288 }
3289
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003290 return 0;
3291}
3292
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -08003293static int bpf_object__probe_array_mmap(struct bpf_object *obj)
3294{
3295 struct bpf_create_map_attr attr = {
3296 .map_type = BPF_MAP_TYPE_ARRAY,
3297 .map_flags = BPF_F_MMAPABLE,
3298 .key_size = sizeof(int),
3299 .value_size = sizeof(int),
3300 .max_entries = 1,
3301 };
3302 int fd;
3303
3304 fd = bpf_create_map_xattr(&attr);
3305 if (fd >= 0) {
3306 obj->caps.array_mmap = 1;
3307 close(fd);
3308 return 1;
3309 }
3310
3311 return 0;
3312}
3313
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003314static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08003315bpf_object__probe_caps(struct bpf_object *obj)
3316{
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003317 int (*probe_fn[])(struct bpf_object *obj) = {
3318 bpf_object__probe_name,
3319 bpf_object__probe_global_data,
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003320 bpf_object__probe_btf_func,
Alexei Starovoitov2d3eb672020-01-09 22:41:19 -08003321 bpf_object__probe_btf_func_global,
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07003322 bpf_object__probe_btf_datasec,
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -08003323 bpf_object__probe_array_mmap,
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003324 };
3325 int i, ret;
3326
3327 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
3328 ret = probe_fn[i](obj);
3329 if (ret < 0)
Stanislav Fomichev15ea1642019-05-14 20:38:49 -07003330 pr_debug("Probe #%d failed with %d.\n", i, ret);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003331 }
3332
3333 return 0;
Stanislav Fomichev47eff612018-11-20 17:11:19 -08003334}
3335
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01003336static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
3337{
3338 struct bpf_map_info map_info = {};
3339 char msg[STRERR_BUFSIZE];
3340 __u32 map_info_len;
3341
3342 map_info_len = sizeof(map_info);
3343
3344 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
3345 pr_warn("failed to get map info for map FD %d: %s\n",
3346 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
3347 return false;
3348 }
3349
3350 return (map_info.type == map->def.type &&
3351 map_info.key_size == map->def.key_size &&
3352 map_info.value_size == map->def.value_size &&
3353 map_info.max_entries == map->def.max_entries &&
3354 map_info.map_flags == map->def.map_flags);
3355}
3356
3357static int
3358bpf_object__reuse_map(struct bpf_map *map)
3359{
3360 char *cp, errmsg[STRERR_BUFSIZE];
3361 int err, pin_fd;
3362
3363 pin_fd = bpf_obj_get(map->pin_path);
3364 if (pin_fd < 0) {
3365 err = -errno;
3366 if (err == -ENOENT) {
3367 pr_debug("found no pinned map to reuse at '%s'\n",
3368 map->pin_path);
3369 return 0;
3370 }
3371
3372 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
3373 pr_warn("couldn't retrieve pinned map '%s': %s\n",
3374 map->pin_path, cp);
3375 return err;
3376 }
3377
3378 if (!map_is_reuse_compat(map, pin_fd)) {
3379 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
3380 map->pin_path);
3381 close(pin_fd);
3382 return -EINVAL;
3383 }
3384
3385 err = bpf_map__reuse_fd(map, pin_fd);
3386 if (err) {
3387 close(pin_fd);
3388 return err;
3389 }
3390 map->pinned = true;
3391 pr_debug("reused pinned map at '%s'\n", map->pin_path);
3392
3393 return 0;
3394}
3395
Stanislav Fomichev47eff612018-11-20 17:11:19 -08003396static int
Daniel Borkmannd8599002019-04-09 23:20:13 +02003397bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
3398{
Andrii Nakryiko166750b2019-12-13 17:47:08 -08003399 enum libbpf_map_type map_type = map->libbpf_type;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003400 char *cp, errmsg[STRERR_BUFSIZE];
3401 int err, zero = 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003402
Andrii Nakryiko166750b2019-12-13 17:47:08 -08003403 /* kernel already zero-initializes .bss map. */
3404 if (map_type == LIBBPF_MAP_BSS)
Daniel Borkmannd8599002019-04-09 23:20:13 +02003405 return 0;
3406
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08003407 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
3408 if (err) {
3409 err = -errno;
3410 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
3411 pr_warn("Error setting initial map(%s) contents: %s\n",
3412 map->name, cp);
3413 return err;
3414 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02003415
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08003416 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
3417 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02003418 err = bpf_map_freeze(map->fd);
3419 if (err) {
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08003420 err = -errno;
3421 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08003422 pr_warn("Error freezing map(%s) as read-only: %s\n",
3423 map->name, cp);
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08003424 return err;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003425 }
3426 }
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08003427 return 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003428}
3429
3430static int
Wang Nan52d33522015-07-01 02:14:04 +00003431bpf_object__create_maps(struct bpf_object *obj)
3432{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003433 struct bpf_create_map_attr create_attr = {};
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07003434 int nr_cpus = 0;
Wang Nan52d33522015-07-01 02:14:04 +00003435 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003436 int err;
Wang Nan52d33522015-07-01 02:14:04 +00003437
Wang Nan9d759a92015-11-27 08:47:35 +00003438 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003439 struct bpf_map *map = &obj->maps[i];
3440 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003441 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003442 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00003443
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01003444 if (map->pin_path) {
3445 err = bpf_object__reuse_map(map);
3446 if (err) {
3447 pr_warn("error reusing pinned map %s\n",
3448 map->name);
3449 return err;
3450 }
3451 }
3452
Jakub Kicinski26736eb2018-07-10 14:43:06 -07003453 if (map->fd >= 0) {
3454 pr_debug("skip map create (preset) %s: fd=%d\n",
3455 map->name, map->fd);
3456 continue;
3457 }
3458
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08003459 if (obj->caps.name)
3460 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07003461 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003462 create_attr.map_type = def->type;
3463 create_attr.map_flags = def->map_flags;
3464 create_attr.key_size = def->key_size;
3465 create_attr.value_size = def->value_size;
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07003466 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
3467 !def->max_entries) {
3468 if (!nr_cpus)
3469 nr_cpus = libbpf_num_possible_cpus();
3470 if (nr_cpus < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003471 pr_warn("failed to determine number of system CPUs: %d\n",
3472 nr_cpus);
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07003473 err = nr_cpus;
3474 goto err_out;
3475 }
3476 pr_debug("map '%s': setting size to %d\n",
3477 map->name, nr_cpus);
3478 create_attr.max_entries = nr_cpus;
3479 } else {
3480 create_attr.max_entries = def->max_entries;
3481 }
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07003482 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07003483 create_attr.btf_key_type_id = 0;
3484 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08003485 if (bpf_map_type__is_map_in_map(def->type) &&
3486 map->inner_map_fd >= 0)
3487 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08003488 if (bpf_map__is_struct_ops(map))
3489 create_attr.btf_vmlinux_value_type_id =
3490 map->btf_vmlinux_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003491
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07003492 if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003493 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07003494 create_attr.btf_key_type_id = map->btf_key_type_id;
3495 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003496 }
3497
3498 *pfd = bpf_create_map_xattr(&create_attr);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07003499 if (*pfd < 0 && (create_attr.btf_key_type_id ||
3500 create_attr.btf_value_type_id)) {
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07003501 err = -errno;
3502 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08003503 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
3504 map->name, cp, err);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07003505 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07003506 create_attr.btf_key_type_id = 0;
3507 create_attr.btf_value_type_id = 0;
3508 map->btf_key_type_id = 0;
3509 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003510 *pfd = bpf_create_map_xattr(&create_attr);
3511 }
3512
Wang Nan52d33522015-07-01 02:14:04 +00003513 if (*pfd < 0) {
3514 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00003515
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07003516 err = -errno;
Daniel Borkmannd8599002019-04-09 23:20:13 +02003517err_out:
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07003518 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08003519 pr_warn("failed to create map (name: '%s'): %s(%d)\n",
3520 map->name, cp, err);
Toke Høiland-Jørgensendc3a2d22019-12-16 19:12:04 +01003521 pr_perm_msg(err);
Wang Nan52d33522015-07-01 02:14:04 +00003522 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00003523 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00003524 return err;
3525 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02003526
3527 if (bpf_map__is_internal(map)) {
3528 err = bpf_object__populate_internal_map(obj, map);
3529 if (err < 0) {
3530 zclose(*pfd);
3531 goto err_out;
3532 }
3533 }
3534
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01003535 if (map->pin_path && !map->pinned) {
3536 err = bpf_map__pin(map, NULL);
3537 if (err) {
3538 pr_warn("failed to auto-pin map name '%s' at '%s'\n",
3539 map->name, map->pin_path);
3540 return err;
3541 }
3542 }
3543
Andrii Nakryiko76e10222019-05-29 10:36:10 -07003544 pr_debug("created map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00003545 }
3546
Wang Nan52d33522015-07-01 02:14:04 +00003547 return 0;
3548}
3549
Wang Nan8a47a6c2015-07-01 02:14:05 +00003550static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003551check_btf_ext_reloc_err(struct bpf_program *prog, int err,
3552 void *btf_prog_info, const char *info_name)
3553{
3554 if (err != -ENOENT) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003555 pr_warn("Error in loading %s for sec %s.\n",
3556 info_name, prog->section_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003557 return err;
3558 }
3559
3560 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
3561
3562 if (btf_prog_info) {
3563 /*
3564 * Some info has already been found but has problem
Andrii Nakryiko399dc652019-05-29 10:36:11 -07003565 * in the last btf_ext reloc. Must have to error out.
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003566 */
Kefeng Wangbe180102019-10-21 13:55:32 +08003567 pr_warn("Error in relocating %s for sec %s.\n",
3568 info_name, prog->section_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003569 return err;
3570 }
3571
Andrii Nakryiko399dc652019-05-29 10:36:11 -07003572 /* Have problem loading the very first info. Ignore the rest. */
Kefeng Wangbe180102019-10-21 13:55:32 +08003573 pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
3574 info_name, prog->section_name, info_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003575 return 0;
3576}
3577
3578static int
3579bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
3580 const char *section_name, __u32 insn_offset)
3581{
3582 int err;
3583
3584 if (!insn_offset || prog->func_info) {
3585 /*
3586 * !insn_offset => main program
3587 *
3588 * For sub prog, the main program's func_info has to
3589 * be loaded first (i.e. prog->func_info != NULL)
3590 */
3591 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
3592 section_name, insn_offset,
3593 &prog->func_info,
3594 &prog->func_info_cnt);
3595 if (err)
3596 return check_btf_ext_reloc_err(prog, err,
3597 prog->func_info,
3598 "bpf_func_info");
3599
3600 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
3601 }
3602
Martin KaFai Lau3d650142018-12-07 16:42:31 -08003603 if (!insn_offset || prog->line_info) {
3604 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
3605 section_name, insn_offset,
3606 &prog->line_info,
3607 &prog->line_info_cnt);
3608 if (err)
3609 return check_btf_ext_reloc_err(prog, err,
3610 prog->line_info,
3611 "bpf_line_info");
3612
3613 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
3614 }
3615
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003616 return 0;
3617}
3618
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003619#define BPF_CORE_SPEC_MAX_LEN 64
3620
3621/* represents BPF CO-RE field or array element accessor */
3622struct bpf_core_accessor {
3623 __u32 type_id; /* struct/union type or array element type */
3624 __u32 idx; /* field index or array index */
3625 const char *name; /* field name or NULL for array accessor */
3626};
3627
3628struct bpf_core_spec {
3629 const struct btf *btf;
3630 /* high-level spec: named fields and array indices only */
3631 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
3632 /* high-level spec length */
3633 int len;
3634 /* raw, low-level spec: 1-to-1 with accessor spec string */
3635 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
3636 /* raw spec length */
3637 int raw_len;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003638 /* field bit offset represented by spec */
3639 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003640};
3641
3642static bool str_is_empty(const char *s)
3643{
3644 return !s || !s[0];
3645}
3646
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08003647static bool is_flex_arr(const struct btf *btf,
3648 const struct bpf_core_accessor *acc,
3649 const struct btf_array *arr)
3650{
3651 const struct btf_type *t;
3652
3653 /* not a flexible array, if not inside a struct or has non-zero size */
3654 if (!acc->name || arr->nelems > 0)
3655 return false;
3656
3657 /* has to be the last member of enclosing struct */
3658 t = btf__type_by_id(btf, acc->type_id);
3659 return acc->idx == btf_vlen(t) - 1;
3660}
3661
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003662/*
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003663 * Turn bpf_field_reloc into a low- and high-level spec representation,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003664 * validating correctness along the way, as well as calculating resulting
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003665 * field bit offset, specified by accessor string. Low-level spec captures
3666 * every single level of nestedness, including traversing anonymous
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003667 * struct/union members. High-level one only captures semantically meaningful
3668 * "turning points": named fields and array indicies.
3669 * E.g., for this case:
3670 *
3671 * struct sample {
3672 * int __unimportant;
3673 * struct {
3674 * int __1;
3675 * int __2;
3676 * int a[7];
3677 * };
3678 * };
3679 *
3680 * struct sample *s = ...;
3681 *
3682 * int x = &s->a[3]; // access string = '0:1:2:3'
3683 *
3684 * Low-level spec has 1:1 mapping with each element of access string (it's
3685 * just a parsed access string representation): [0, 1, 2, 3].
3686 *
3687 * High-level spec will capture only 3 points:
3688 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
3689 * - field 'a' access (corresponds to '2' in low-level spec);
3690 * - array element #3 access (corresponds to '3' in low-level spec).
3691 *
3692 */
3693static int bpf_core_spec_parse(const struct btf *btf,
3694 __u32 type_id,
3695 const char *spec_str,
3696 struct bpf_core_spec *spec)
3697{
3698 int access_idx, parsed_len, i;
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08003699 struct bpf_core_accessor *acc;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003700 const struct btf_type *t;
3701 const char *name;
3702 __u32 id;
3703 __s64 sz;
3704
3705 if (str_is_empty(spec_str) || *spec_str == ':')
3706 return -EINVAL;
3707
3708 memset(spec, 0, sizeof(*spec));
3709 spec->btf = btf;
3710
3711 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
3712 while (*spec_str) {
3713 if (*spec_str == ':')
3714 ++spec_str;
3715 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
3716 return -EINVAL;
3717 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
3718 return -E2BIG;
3719 spec_str += parsed_len;
3720 spec->raw_spec[spec->raw_len++] = access_idx;
3721 }
3722
3723 if (spec->raw_len == 0)
3724 return -EINVAL;
3725
3726 /* first spec value is always reloc type array index */
3727 t = skip_mods_and_typedefs(btf, type_id, &id);
3728 if (!t)
3729 return -EINVAL;
3730
3731 access_idx = spec->raw_spec[0];
3732 spec->spec[0].type_id = id;
3733 spec->spec[0].idx = access_idx;
3734 spec->len++;
3735
3736 sz = btf__resolve_size(btf, id);
3737 if (sz < 0)
3738 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003739 spec->bit_offset = access_idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003740
3741 for (i = 1; i < spec->raw_len; i++) {
3742 t = skip_mods_and_typedefs(btf, id, &id);
3743 if (!t)
3744 return -EINVAL;
3745
3746 access_idx = spec->raw_spec[i];
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08003747 acc = &spec->spec[spec->len];
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003748
3749 if (btf_is_composite(t)) {
3750 const struct btf_member *m;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003751 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003752
3753 if (access_idx >= btf_vlen(t))
3754 return -EINVAL;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003755
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003756 bit_offset = btf_member_bit_offset(t, access_idx);
3757 spec->bit_offset += bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003758
3759 m = btf_members(t) + access_idx;
3760 if (m->name_off) {
3761 name = btf__name_by_offset(btf, m->name_off);
3762 if (str_is_empty(name))
3763 return -EINVAL;
3764
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08003765 acc->type_id = id;
3766 acc->idx = access_idx;
3767 acc->name = name;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003768 spec->len++;
3769 }
3770
3771 id = m->type;
3772 } else if (btf_is_array(t)) {
3773 const struct btf_array *a = btf_array(t);
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08003774 bool flex;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003775
3776 t = skip_mods_and_typedefs(btf, a->type, &id);
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08003777 if (!t)
3778 return -EINVAL;
3779
3780 flex = is_flex_arr(btf, acc - 1, a);
3781 if (!flex && access_idx >= a->nelems)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003782 return -EINVAL;
3783
3784 spec->spec[spec->len].type_id = id;
3785 spec->spec[spec->len].idx = access_idx;
3786 spec->len++;
3787
3788 sz = btf__resolve_size(btf, id);
3789 if (sz < 0)
3790 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003791 spec->bit_offset += access_idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003792 } else {
Kefeng Wangbe180102019-10-21 13:55:32 +08003793 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
3794 type_id, spec_str, i, id, btf_kind(t));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003795 return -EINVAL;
3796 }
3797 }
3798
3799 return 0;
3800}
3801
3802static bool bpf_core_is_flavor_sep(const char *s)
3803{
3804 /* check X___Y name pattern, where X and Y are not underscores */
3805 return s[0] != '_' && /* X */
3806 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
3807 s[4] != '_'; /* Y */
3808}
3809
3810/* Given 'some_struct_name___with_flavor' return the length of a name prefix
3811 * before last triple underscore. Struct name part after last triple
3812 * underscore is ignored by BPF CO-RE relocation during relocation matching.
3813 */
3814static size_t bpf_core_essential_name_len(const char *name)
3815{
3816 size_t n = strlen(name);
3817 int i;
3818
3819 for (i = n - 5; i >= 0; i--) {
3820 if (bpf_core_is_flavor_sep(name + i))
3821 return i + 1;
3822 }
3823 return n;
3824}
3825
3826/* dynamically sized list of type IDs */
3827struct ids_vec {
3828 __u32 *data;
3829 int len;
3830};
3831
3832static void bpf_core_free_cands(struct ids_vec *cand_ids)
3833{
3834 free(cand_ids->data);
3835 free(cand_ids);
3836}
3837
3838static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
3839 __u32 local_type_id,
3840 const struct btf *targ_btf)
3841{
3842 size_t local_essent_len, targ_essent_len;
3843 const char *local_name, *targ_name;
3844 const struct btf_type *t;
3845 struct ids_vec *cand_ids;
3846 __u32 *new_ids;
3847 int i, err, n;
3848
3849 t = btf__type_by_id(local_btf, local_type_id);
3850 if (!t)
3851 return ERR_PTR(-EINVAL);
3852
3853 local_name = btf__name_by_offset(local_btf, t->name_off);
3854 if (str_is_empty(local_name))
3855 return ERR_PTR(-EINVAL);
3856 local_essent_len = bpf_core_essential_name_len(local_name);
3857
3858 cand_ids = calloc(1, sizeof(*cand_ids));
3859 if (!cand_ids)
3860 return ERR_PTR(-ENOMEM);
3861
3862 n = btf__get_nr_types(targ_btf);
3863 for (i = 1; i <= n; i++) {
3864 t = btf__type_by_id(targ_btf, i);
3865 targ_name = btf__name_by_offset(targ_btf, t->name_off);
3866 if (str_is_empty(targ_name))
3867 continue;
3868
3869 targ_essent_len = bpf_core_essential_name_len(targ_name);
3870 if (targ_essent_len != local_essent_len)
3871 continue;
3872
3873 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
3874 pr_debug("[%d] %s: found candidate [%d] %s\n",
3875 local_type_id, local_name, i, targ_name);
Andrii Nakryiko35b92112020-01-24 12:18:46 -08003876 new_ids = reallocarray(cand_ids->data,
3877 cand_ids->len + 1,
3878 sizeof(*cand_ids->data));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003879 if (!new_ids) {
3880 err = -ENOMEM;
3881 goto err_out;
3882 }
3883 cand_ids->data = new_ids;
3884 cand_ids->data[cand_ids->len++] = i;
3885 }
3886 }
3887 return cand_ids;
3888err_out:
3889 bpf_core_free_cands(cand_ids);
3890 return ERR_PTR(err);
3891}
3892
3893/* Check two types for compatibility, skipping const/volatile/restrict and
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003894 * typedefs, to ensure we are relocating compatible entities:
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003895 * - any two STRUCTs/UNIONs are compatible and can be mixed;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003896 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003897 * - any two PTRs are always compatible;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003898 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
3899 * least one of enums should be anonymous;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003900 * - for ENUMs, check sizes, names are ignored;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003901 * - for INT, size and signedness are ignored;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003902 * - for ARRAY, dimensionality is ignored, element types are checked for
3903 * compatibility recursively;
3904 * - everything else shouldn't be ever a target of relocation.
3905 * These rules are not set in stone and probably will be adjusted as we get
3906 * more experience with using BPF CO-RE relocations.
3907 */
3908static int bpf_core_fields_are_compat(const struct btf *local_btf,
3909 __u32 local_id,
3910 const struct btf *targ_btf,
3911 __u32 targ_id)
3912{
3913 const struct btf_type *local_type, *targ_type;
3914
3915recur:
3916 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
3917 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
3918 if (!local_type || !targ_type)
3919 return -EINVAL;
3920
3921 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
3922 return 1;
3923 if (btf_kind(local_type) != btf_kind(targ_type))
3924 return 0;
3925
3926 switch (btf_kind(local_type)) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003927 case BTF_KIND_PTR:
3928 return 1;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003929 case BTF_KIND_FWD:
3930 case BTF_KIND_ENUM: {
3931 const char *local_name, *targ_name;
3932 size_t local_len, targ_len;
3933
3934 local_name = btf__name_by_offset(local_btf,
3935 local_type->name_off);
3936 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
3937 local_len = bpf_core_essential_name_len(local_name);
3938 targ_len = bpf_core_essential_name_len(targ_name);
3939 /* one of them is anonymous or both w/ same flavor-less names */
3940 return local_len == 0 || targ_len == 0 ||
3941 (local_len == targ_len &&
3942 strncmp(local_name, targ_name, local_len) == 0);
3943 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003944 case BTF_KIND_INT:
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003945 /* just reject deprecated bitfield-like integers; all other
3946 * integers are by default compatible between each other
3947 */
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003948 return btf_int_offset(local_type) == 0 &&
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003949 btf_int_offset(targ_type) == 0;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003950 case BTF_KIND_ARRAY:
3951 local_id = btf_array(local_type)->type;
3952 targ_id = btf_array(targ_type)->type;
3953 goto recur;
3954 default:
Kefeng Wangbe180102019-10-21 13:55:32 +08003955 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
3956 btf_kind(local_type), local_id, targ_id);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003957 return 0;
3958 }
3959}
3960
3961/*
3962 * Given single high-level named field accessor in local type, find
3963 * corresponding high-level accessor for a target type. Along the way,
3964 * maintain low-level spec for target as well. Also keep updating target
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003965 * bit offset.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003966 *
3967 * Searching is performed through recursive exhaustive enumeration of all
3968 * fields of a struct/union. If there are any anonymous (embedded)
3969 * structs/unions, they are recursively searched as well. If field with
3970 * desired name is found, check compatibility between local and target types,
3971 * before returning result.
3972 *
3973 * 1 is returned, if field is found.
3974 * 0 is returned if no compatible field is found.
3975 * <0 is returned on error.
3976 */
3977static int bpf_core_match_member(const struct btf *local_btf,
3978 const struct bpf_core_accessor *local_acc,
3979 const struct btf *targ_btf,
3980 __u32 targ_id,
3981 struct bpf_core_spec *spec,
3982 __u32 *next_targ_id)
3983{
3984 const struct btf_type *local_type, *targ_type;
3985 const struct btf_member *local_member, *m;
3986 const char *local_name, *targ_name;
3987 __u32 local_id;
3988 int i, n, found;
3989
3990 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
3991 if (!targ_type)
3992 return -EINVAL;
3993 if (!btf_is_composite(targ_type))
3994 return 0;
3995
3996 local_id = local_acc->type_id;
3997 local_type = btf__type_by_id(local_btf, local_id);
3998 local_member = btf_members(local_type) + local_acc->idx;
3999 local_name = btf__name_by_offset(local_btf, local_member->name_off);
4000
4001 n = btf_vlen(targ_type);
4002 m = btf_members(targ_type);
4003 for (i = 0; i < n; i++, m++) {
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004004 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004005
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004006 bit_offset = btf_member_bit_offset(targ_type, i);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004007
4008 /* too deep struct/union/array nesting */
4009 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4010 return -E2BIG;
4011
4012 /* speculate this member will be the good one */
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004013 spec->bit_offset += bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004014 spec->raw_spec[spec->raw_len++] = i;
4015
4016 targ_name = btf__name_by_offset(targ_btf, m->name_off);
4017 if (str_is_empty(targ_name)) {
4018 /* embedded struct/union, we need to go deeper */
4019 found = bpf_core_match_member(local_btf, local_acc,
4020 targ_btf, m->type,
4021 spec, next_targ_id);
4022 if (found) /* either found or error */
4023 return found;
4024 } else if (strcmp(local_name, targ_name) == 0) {
4025 /* matching named field */
4026 struct bpf_core_accessor *targ_acc;
4027
4028 targ_acc = &spec->spec[spec->len++];
4029 targ_acc->type_id = targ_id;
4030 targ_acc->idx = i;
4031 targ_acc->name = targ_name;
4032
4033 *next_targ_id = m->type;
4034 found = bpf_core_fields_are_compat(local_btf,
4035 local_member->type,
4036 targ_btf, m->type);
4037 if (!found)
4038 spec->len--; /* pop accessor */
4039 return found;
4040 }
4041 /* member turned out not to be what we looked for */
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004042 spec->bit_offset -= bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004043 spec->raw_len--;
4044 }
4045
4046 return 0;
4047}
4048
4049/*
4050 * Try to match local spec to a target type and, if successful, produce full
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004051 * target spec (high-level, low-level + bit offset).
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004052 */
4053static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
4054 const struct btf *targ_btf, __u32 targ_id,
4055 struct bpf_core_spec *targ_spec)
4056{
4057 const struct btf_type *targ_type;
4058 const struct bpf_core_accessor *local_acc;
4059 struct bpf_core_accessor *targ_acc;
4060 int i, sz, matched;
4061
4062 memset(targ_spec, 0, sizeof(*targ_spec));
4063 targ_spec->btf = targ_btf;
4064
4065 local_acc = &local_spec->spec[0];
4066 targ_acc = &targ_spec->spec[0];
4067
4068 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
4069 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
4070 &targ_id);
4071 if (!targ_type)
4072 return -EINVAL;
4073
4074 if (local_acc->name) {
4075 matched = bpf_core_match_member(local_spec->btf,
4076 local_acc,
4077 targ_btf, targ_id,
4078 targ_spec, &targ_id);
4079 if (matched <= 0)
4080 return matched;
4081 } else {
4082 /* for i=0, targ_id is already treated as array element
4083 * type (because it's the original struct), for others
4084 * we should find array element type first
4085 */
4086 if (i > 0) {
4087 const struct btf_array *a;
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08004088 bool flex;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004089
4090 if (!btf_is_array(targ_type))
4091 return 0;
4092
4093 a = btf_array(targ_type);
Andrii Nakryiko1b484b32019-12-14 23:08:43 -08004094 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
4095 if (!flex && local_acc->idx >= a->nelems)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004096 return 0;
4097 if (!skip_mods_and_typedefs(targ_btf, a->type,
4098 &targ_id))
4099 return -EINVAL;
4100 }
4101
4102 /* too deep struct/union/array nesting */
4103 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4104 return -E2BIG;
4105
4106 targ_acc->type_id = targ_id;
4107 targ_acc->idx = local_acc->idx;
4108 targ_acc->name = NULL;
4109 targ_spec->len++;
4110 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
4111 targ_spec->raw_len++;
4112
4113 sz = btf__resolve_size(targ_btf, targ_id);
4114 if (sz < 0)
4115 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004116 targ_spec->bit_offset += local_acc->idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004117 }
4118 }
4119
4120 return 1;
4121}
4122
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004123static int bpf_core_calc_field_relo(const struct bpf_program *prog,
4124 const struct bpf_field_reloc *relo,
4125 const struct bpf_core_spec *spec,
4126 __u32 *val, bool *validate)
4127{
4128 const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
4129 const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
4130 __u32 byte_off, byte_sz, bit_off, bit_sz;
4131 const struct btf_member *m;
4132 const struct btf_type *mt;
4133 bool bitfield;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07004134 __s64 sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004135
4136 /* a[n] accessor needs special handling */
4137 if (!acc->name) {
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07004138 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
4139 *val = spec->bit_offset / 8;
4140 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
4141 sz = btf__resolve_size(spec->btf, acc->type_id);
4142 if (sz < 0)
4143 return -EINVAL;
4144 *val = sz;
4145 } else {
4146 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004147 bpf_program__title(prog, false),
4148 relo->kind, relo->insn_off / 8);
4149 return -EINVAL;
4150 }
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004151 if (validate)
4152 *validate = true;
4153 return 0;
4154 }
4155
4156 m = btf_members(t) + acc->idx;
4157 mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
4158 bit_off = spec->bit_offset;
4159 bit_sz = btf_member_bitfield_size(t, acc->idx);
4160
4161 bitfield = bit_sz > 0;
4162 if (bitfield) {
4163 byte_sz = mt->size;
4164 byte_off = bit_off / 8 / byte_sz * byte_sz;
4165 /* figure out smallest int size necessary for bitfield load */
4166 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
4167 if (byte_sz >= 8) {
4168 /* bitfield can't be read with 64-bit read */
4169 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
4170 bpf_program__title(prog, false),
4171 relo->kind, relo->insn_off / 8);
4172 return -E2BIG;
4173 }
4174 byte_sz *= 2;
4175 byte_off = bit_off / 8 / byte_sz * byte_sz;
4176 }
4177 } else {
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07004178 sz = btf__resolve_size(spec->btf, m->type);
4179 if (sz < 0)
4180 return -EINVAL;
4181 byte_sz = sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004182 byte_off = spec->bit_offset / 8;
4183 bit_sz = byte_sz * 8;
4184 }
4185
4186 /* for bitfields, all the relocatable aspects are ambiguous and we
4187 * might disagree with compiler, so turn off validation of expected
4188 * value, except for signedness
4189 */
4190 if (validate)
4191 *validate = !bitfield;
4192
4193 switch (relo->kind) {
4194 case BPF_FIELD_BYTE_OFFSET:
4195 *val = byte_off;
4196 break;
4197 case BPF_FIELD_BYTE_SIZE:
4198 *val = byte_sz;
4199 break;
4200 case BPF_FIELD_SIGNED:
4201 /* enums will be assumed unsigned */
4202 *val = btf_is_enum(mt) ||
4203 (btf_int_encoding(mt) & BTF_INT_SIGNED);
4204 if (validate)
4205 *validate = true; /* signedness is never ambiguous */
4206 break;
4207 case BPF_FIELD_LSHIFT_U64:
4208#if __BYTE_ORDER == __LITTLE_ENDIAN
4209 *val = 64 - (bit_off + bit_sz - byte_off * 8);
4210#else
4211 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
4212#endif
4213 break;
4214 case BPF_FIELD_RSHIFT_U64:
4215 *val = 64 - bit_sz;
4216 if (validate)
4217 *validate = true; /* right shift is never ambiguous */
4218 break;
4219 case BPF_FIELD_EXISTS:
4220 default:
4221 pr_warn("prog '%s': unknown relo %d at insn #%d\n",
4222 bpf_program__title(prog, false),
4223 relo->kind, relo->insn_off / 8);
4224 return -EINVAL;
4225 }
4226
4227 return 0;
4228}
4229
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004230/*
4231 * Patch relocatable BPF instruction.
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004232 *
4233 * Patched value is determined by relocation kind and target specification.
4234 * For field existence relocation target spec will be NULL if field is not
4235 * found.
4236 * Expected insn->imm value is determined using relocation kind and local
4237 * spec, and is checked before patching instruction. If actual insn->imm value
4238 * is wrong, bail out with error.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004239 *
4240 * Currently three kinds of BPF instructions are supported:
4241 * 1. rX = <imm> (assignment with immediate operand);
4242 * 2. rX += <imm> (arithmetic operations with immediate operand);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004243 */
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004244static int bpf_core_reloc_insn(struct bpf_program *prog,
4245 const struct bpf_field_reloc *relo,
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004246 int relo_idx,
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004247 const struct bpf_core_spec *local_spec,
4248 const struct bpf_core_spec *targ_spec)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004249{
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004250 __u32 orig_val, new_val;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004251 struct bpf_insn *insn;
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004252 bool validate = true;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004253 int insn_idx, err;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004254 __u8 class;
4255
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004256 if (relo->insn_off % sizeof(struct bpf_insn))
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004257 return -EINVAL;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004258 insn_idx = relo->insn_off / sizeof(struct bpf_insn);
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004259 insn = &prog->insns[insn_idx];
4260 class = BPF_CLASS(insn->code);
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004261
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004262 if (relo->kind == BPF_FIELD_EXISTS) {
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004263 orig_val = 1; /* can't generate EXISTS relo w/o local field */
4264 new_val = targ_spec ? 1 : 0;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004265 } else if (!targ_spec) {
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004266 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
4267 bpf_program__title(prog, false), relo_idx, insn_idx);
4268 insn->code = BPF_JMP | BPF_CALL;
4269 insn->dst_reg = 0;
4270 insn->src_reg = 0;
4271 insn->off = 0;
4272 /* if this instruction is reachable (not a dead code),
4273 * verifier will complain with the following message:
4274 * invalid func unknown#195896080
4275 */
4276 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
4277 return 0;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004278 } else {
4279 err = bpf_core_calc_field_relo(prog, relo, local_spec,
4280 &orig_val, &validate);
4281 if (err)
4282 return err;
4283 err = bpf_core_calc_field_relo(prog, relo, targ_spec,
4284 &new_val, NULL);
4285 if (err)
4286 return err;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004287 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004288
Andrii Nakryiko8ab9da52019-12-23 10:03:05 -08004289 switch (class) {
4290 case BPF_ALU:
4291 case BPF_ALU64:
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004292 if (BPF_SRC(insn->code) != BPF_K)
4293 return -EINVAL;
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004294 if (validate && insn->imm != orig_val) {
4295 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
4296 bpf_program__title(prog, false), relo_idx,
4297 insn_idx, insn->imm, orig_val, new_val);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004298 return -EINVAL;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004299 }
4300 orig_val = insn->imm;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004301 insn->imm = new_val;
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004302 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
4303 bpf_program__title(prog, false), relo_idx, insn_idx,
4304 orig_val, new_val);
Andrii Nakryiko8ab9da52019-12-23 10:03:05 -08004305 break;
4306 case BPF_LDX:
4307 case BPF_ST:
4308 case BPF_STX:
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004309 if (validate && insn->off != orig_val) {
4310 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LD/LDX/ST/STX) value: got %u, exp %u -> %u\n",
4311 bpf_program__title(prog, false), relo_idx,
4312 insn_idx, insn->off, orig_val, new_val);
Andrii Nakryiko8ab9da52019-12-23 10:03:05 -08004313 return -EINVAL;
4314 }
4315 if (new_val > SHRT_MAX) {
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004316 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
4317 bpf_program__title(prog, false), relo_idx,
4318 insn_idx, new_val);
Andrii Nakryiko8ab9da52019-12-23 10:03:05 -08004319 return -ERANGE;
4320 }
4321 orig_val = insn->off;
4322 insn->off = new_val;
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004323 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
4324 bpf_program__title(prog, false), relo_idx, insn_idx,
4325 orig_val, new_val);
Andrii Nakryiko8ab9da52019-12-23 10:03:05 -08004326 break;
4327 default:
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004328 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
4329 bpf_program__title(prog, false), relo_idx,
Kefeng Wangbe180102019-10-21 13:55:32 +08004330 insn_idx, insn->code, insn->src_reg, insn->dst_reg,
4331 insn->off, insn->imm);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004332 return -EINVAL;
4333 }
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004334
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004335 return 0;
4336}
4337
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004338/* Output spec definition in the format:
4339 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
4340 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
4341 */
4342static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
4343{
4344 const struct btf_type *t;
4345 const char *s;
4346 __u32 type_id;
4347 int i;
4348
4349 type_id = spec->spec[0].type_id;
4350 t = btf__type_by_id(spec->btf, type_id);
4351 s = btf__name_by_offset(spec->btf, t->name_off);
4352 libbpf_print(level, "[%u] %s + ", type_id, s);
4353
4354 for (i = 0; i < spec->raw_len; i++)
4355 libbpf_print(level, "%d%s", spec->raw_spec[i],
4356 i == spec->raw_len - 1 ? " => " : ":");
4357
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004358 libbpf_print(level, "%u.%u @ &x",
4359 spec->bit_offset / 8, spec->bit_offset % 8);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004360
4361 for (i = 0; i < spec->len; i++) {
4362 if (spec->spec[i].name)
4363 libbpf_print(level, ".%s", spec->spec[i].name);
4364 else
4365 libbpf_print(level, "[%u]", spec->spec[i].idx);
4366 }
4367
4368}
4369
4370static size_t bpf_core_hash_fn(const void *key, void *ctx)
4371{
4372 return (size_t)key;
4373}
4374
4375static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
4376{
4377 return k1 == k2;
4378}
4379
4380static void *u32_as_hash_key(__u32 x)
4381{
4382 return (void *)(uintptr_t)x;
4383}
4384
4385/*
4386 * CO-RE relocate single instruction.
4387 *
4388 * The outline and important points of the algorithm:
4389 * 1. For given local type, find corresponding candidate target types.
4390 * Candidate type is a type with the same "essential" name, ignoring
4391 * everything after last triple underscore (___). E.g., `sample`,
4392 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
4393 * for each other. Names with triple underscore are referred to as
4394 * "flavors" and are useful, among other things, to allow to
4395 * specify/support incompatible variations of the same kernel struct, which
4396 * might differ between different kernel versions and/or build
4397 * configurations.
4398 *
4399 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
4400 * converter, when deduplicated BTF of a kernel still contains more than
4401 * one different types with the same name. In that case, ___2, ___3, etc
4402 * are appended starting from second name conflict. But start flavors are
4403 * also useful to be defined "locally", in BPF program, to extract same
4404 * data from incompatible changes between different kernel
4405 * versions/configurations. For instance, to handle field renames between
4406 * kernel versions, one can use two flavors of the struct name with the
4407 * same common name and use conditional relocations to extract that field,
4408 * depending on target kernel version.
4409 * 2. For each candidate type, try to match local specification to this
4410 * candidate target type. Matching involves finding corresponding
4411 * high-level spec accessors, meaning that all named fields should match,
4412 * as well as all array accesses should be within the actual bounds. Also,
4413 * types should be compatible (see bpf_core_fields_are_compat for details).
4414 * 3. It is supported and expected that there might be multiple flavors
4415 * matching the spec. As long as all the specs resolve to the same set of
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004416 * offsets across all candidates, there is no error. If there is any
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004417 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
4418 * imprefection of BTF deduplication, which can cause slight duplication of
4419 * the same BTF type, if some directly or indirectly referenced (by
4420 * pointer) type gets resolved to different actual types in different
4421 * object files. If such situation occurs, deduplicated BTF will end up
4422 * with two (or more) structurally identical types, which differ only in
4423 * types they refer to through pointer. This should be OK in most cases and
4424 * is not an error.
4425 * 4. Candidate types search is performed by linearly scanning through all
4426 * types in target BTF. It is anticipated that this is overall more
4427 * efficient memory-wise and not significantly worse (if not better)
4428 * CPU-wise compared to prebuilding a map from all local type names to
4429 * a list of candidate type names. It's also sped up by caching resolved
4430 * list of matching candidates per each local "root" type ID, that has at
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004431 * least one bpf_field_reloc associated with it. This list is shared
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004432 * between multiple relocations for the same type ID and is updated as some
4433 * of the candidates are pruned due to structural incompatibility.
4434 */
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004435static int bpf_core_reloc_field(struct bpf_program *prog,
4436 const struct bpf_field_reloc *relo,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004437 int relo_idx,
4438 const struct btf *local_btf,
4439 const struct btf *targ_btf,
4440 struct hashmap *cand_cache)
4441{
4442 const char *prog_name = bpf_program__title(prog, false);
4443 struct bpf_core_spec local_spec, cand_spec, targ_spec;
4444 const void *type_key = u32_as_hash_key(relo->type_id);
4445 const struct btf_type *local_type, *cand_type;
4446 const char *local_name, *cand_name;
4447 struct ids_vec *cand_ids;
4448 __u32 local_id, cand_id;
4449 const char *spec_str;
4450 int i, j, err;
4451
4452 local_id = relo->type_id;
4453 local_type = btf__type_by_id(local_btf, local_id);
4454 if (!local_type)
4455 return -EINVAL;
4456
4457 local_name = btf__name_by_offset(local_btf, local_type->name_off);
4458 if (str_is_empty(local_name))
4459 return -EINVAL;
4460
4461 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
4462 if (str_is_empty(spec_str))
4463 return -EINVAL;
4464
4465 err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
4466 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004467 pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
4468 prog_name, relo_idx, local_id, local_name, spec_str,
4469 err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004470 return -EINVAL;
4471 }
4472
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004473 pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
4474 relo->kind);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004475 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
4476 libbpf_print(LIBBPF_DEBUG, "\n");
4477
4478 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
4479 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
4480 if (IS_ERR(cand_ids)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004481 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
4482 prog_name, relo_idx, local_id, local_name,
4483 PTR_ERR(cand_ids));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004484 return PTR_ERR(cand_ids);
4485 }
4486 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
4487 if (err) {
4488 bpf_core_free_cands(cand_ids);
4489 return err;
4490 }
4491 }
4492
4493 for (i = 0, j = 0; i < cand_ids->len; i++) {
4494 cand_id = cand_ids->data[i];
4495 cand_type = btf__type_by_id(targ_btf, cand_id);
4496 cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
4497
4498 err = bpf_core_spec_match(&local_spec, targ_btf,
4499 cand_id, &cand_spec);
4500 pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
4501 prog_name, relo_idx, i, cand_name);
4502 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
4503 libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
4504 if (err < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004505 pr_warn("prog '%s': relo #%d: matching error: %d\n",
4506 prog_name, relo_idx, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004507 return err;
4508 }
4509 if (err == 0)
4510 continue;
4511
4512 if (j == 0) {
4513 targ_spec = cand_spec;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004514 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004515 /* if there are many candidates, they should all
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004516 * resolve to the same bit offset
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004517 */
Kefeng Wangbe180102019-10-21 13:55:32 +08004518 pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004519 prog_name, relo_idx, cand_spec.bit_offset,
4520 targ_spec.bit_offset);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004521 return -EINVAL;
4522 }
4523
4524 cand_ids->data[j++] = cand_spec.spec[0].type_id;
4525 }
4526
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004527 /*
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004528 * For BPF_FIELD_EXISTS relo or when used BPF program has field
4529 * existence checks or kernel version/config checks, it's expected
4530 * that we might not find any candidates. In this case, if field
4531 * wasn't found in any candidate, the list of candidates shouldn't
4532 * change at all, we'll just handle relocating appropriately,
4533 * depending on relo's kind.
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004534 */
4535 if (j > 0)
4536 cand_ids->len = j;
4537
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004538 /*
4539 * If no candidates were found, it might be both a programmer error,
4540 * as well as expected case, depending whether instruction w/
4541 * relocation is guarded in some way that makes it unreachable (dead
4542 * code) if relocation can't be resolved. This is handled in
4543 * bpf_core_reloc_insn() uniformly by replacing that instruction with
4544 * BPF helper call insn (using invalid helper ID). If that instruction
4545 * is indeed unreachable, then it will be ignored and eliminated by
4546 * verifier. If it was an error, then verifier will complain and point
4547 * to a specific instruction number in its log.
4548 */
4549 if (j == 0)
4550 pr_debug("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
4551 prog_name, relo_idx, local_id, local_name, spec_str);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004552
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004553 /* bpf_core_reloc_insn should know how to handle missing targ_spec */
Andrii Nakryikod7a25272020-01-23 21:38:37 -08004554 err = bpf_core_reloc_insn(prog, relo, relo_idx, &local_spec,
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004555 j ? &targ_spec : NULL);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004556 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004557 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
4558 prog_name, relo_idx, relo->insn_off, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004559 return -EINVAL;
4560 }
4561
4562 return 0;
4563}
4564
4565static int
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004566bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004567{
4568 const struct btf_ext_info_sec *sec;
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004569 const struct bpf_field_reloc *rec;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004570 const struct btf_ext_info *seg;
4571 struct hashmap_entry *entry;
4572 struct hashmap *cand_cache = NULL;
4573 struct bpf_program *prog;
4574 struct btf *targ_btf;
4575 const char *sec_name;
4576 int i, err = 0;
4577
4578 if (targ_btf_path)
4579 targ_btf = btf__parse_elf(targ_btf_path, NULL);
4580 else
Martin KaFai Laufb2426a2020-01-15 15:00:31 -08004581 targ_btf = libbpf_find_kernel_btf();
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004582 if (IS_ERR(targ_btf)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004583 pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004584 return PTR_ERR(targ_btf);
4585 }
4586
4587 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
4588 if (IS_ERR(cand_cache)) {
4589 err = PTR_ERR(cand_cache);
4590 goto out;
4591 }
4592
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004593 seg = &obj->btf_ext->field_reloc_info;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004594 for_each_btf_ext_sec(seg, sec) {
4595 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
4596 if (str_is_empty(sec_name)) {
4597 err = -EINVAL;
4598 goto out;
4599 }
4600 prog = bpf_object__find_program_by_title(obj, sec_name);
4601 if (!prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004602 pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
4603 sec_name);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004604 err = -EINVAL;
4605 goto out;
4606 }
4607
4608 pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
4609 sec_name, sec->num_info);
4610
4611 for_each_btf_ext_rec(seg, sec, i, rec) {
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004612 err = bpf_core_reloc_field(prog, rec, i, obj->btf,
4613 targ_btf, cand_cache);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004614 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004615 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
4616 sec_name, i, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004617 goto out;
4618 }
4619 }
4620 }
4621
4622out:
4623 btf__free(targ_btf);
4624 if (!IS_ERR_OR_NULL(cand_cache)) {
4625 hashmap__for_each_entry(cand_cache, entry, i) {
4626 bpf_core_free_cands(entry->value);
4627 }
4628 hashmap__free(cand_cache);
4629 }
4630 return err;
4631}
4632
4633static int
4634bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
4635{
4636 int err = 0;
4637
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004638 if (obj->btf_ext->field_reloc_info.len)
4639 err = bpf_core_reloc_fields(obj, targ_btf_path);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004640
4641 return err;
4642}
4643
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004644static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004645bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
4646 struct reloc_desc *relo)
4647{
4648 struct bpf_insn *insn, *new_insn;
4649 struct bpf_program *text;
4650 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08004651 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004652
Andrii Nakryiko9173cac2020-01-15 11:08:56 -08004653 if (prog->idx != obj->efile.text_shndx && prog->main_prog_cnt == 0) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004654 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
4655 if (!text) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004656 pr_warn("no .text section found yet relo into text exist\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004657 return -LIBBPF_ERRNO__RELOC;
4658 }
4659 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07004660 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004661 if (!new_insn) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004662 pr_warn("oom in prog realloc\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004663 return -ENOMEM;
4664 }
Andrii Nakryiko3dc5e052019-11-06 18:08:51 -08004665 prog->insns = new_insn;
Yonghong Song2993e052018-11-19 15:29:16 -08004666
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004667 if (obj->btf_ext) {
4668 err = bpf_program_reloc_btf_ext(prog, obj,
4669 text->section_name,
4670 prog->insns_cnt);
4671 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08004672 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08004673 }
4674
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004675 memcpy(new_insn + prog->insns_cnt, text->insns,
4676 text->insns_cnt * sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004677 prog->main_prog_cnt = prog->insns_cnt;
4678 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00004679 pr_debug("added %zd insn from %s to prog %s\n",
4680 text->insns_cnt, text->section_name,
4681 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004682 }
Andrii Nakryiko9173cac2020-01-15 11:08:56 -08004683
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004684 insn = &prog->insns[relo->insn_idx];
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004685 insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004686 return 0;
4687}
4688
4689static int
Wang Nan9d759a92015-11-27 08:47:35 +00004690bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00004691{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004692 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00004693
Yonghong Song2993e052018-11-19 15:29:16 -08004694 if (!prog)
4695 return 0;
4696
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004697 if (obj->btf_ext) {
4698 err = bpf_program_reloc_btf_ext(prog, obj,
4699 prog->section_name, 0);
4700 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08004701 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08004702 }
4703
4704 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00004705 return 0;
4706
4707 for (i = 0; i < prog->nr_reloc; i++) {
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004708 struct reloc_desc *relo = &prog->reloc_desc[i];
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004709 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
Wang Nan8a47a6c2015-07-01 02:14:05 +00004710
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004711 if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
4712 pr_warn("relocation out of range: '%s'\n",
4713 prog->section_name);
4714 return -LIBBPF_ERRNO__RELOC;
4715 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00004716
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004717 switch (relo->type) {
4718 case RELO_LD64:
4719 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004720 insn[0].imm = obj->maps[relo->map_idx].fd;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004721 break;
4722 case RELO_DATA:
4723 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
4724 insn[1].imm = insn[0].imm + relo->sym_off;
4725 insn[0].imm = obj->maps[relo->map_idx].fd;
4726 break;
4727 case RELO_EXTERN:
4728 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08004729 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004730 insn[1].imm = relo->sym_off;
4731 break;
4732 case RELO_CALL:
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004733 err = bpf_program__reloc_text(prog, obj, relo);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004734 if (err)
4735 return err;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004736 break;
4737 default:
4738 pr_warn("relo #%d: bad relo type %d\n", i, relo->type);
4739 return -EINVAL;
Wang Nan8a47a6c2015-07-01 02:14:05 +00004740 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00004741 }
4742
4743 zfree(&prog->reloc_desc);
4744 prog->nr_reloc = 0;
4745 return 0;
4746}
4747
Wang Nan8a47a6c2015-07-01 02:14:05 +00004748static int
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004749bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
Wang Nan8a47a6c2015-07-01 02:14:05 +00004750{
4751 struct bpf_program *prog;
4752 size_t i;
4753 int err;
4754
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004755 if (obj->btf_ext) {
4756 err = bpf_object__relocate_core(obj, targ_btf_path);
4757 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004758 pr_warn("failed to perform CO-RE relocations: %d\n",
4759 err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004760 return err;
4761 }
4762 }
Andrii Nakryiko9173cac2020-01-15 11:08:56 -08004763 /* ensure .text is relocated first, as it's going to be copied as-is
4764 * later for sub-program calls
4765 */
Wang Nan8a47a6c2015-07-01 02:14:05 +00004766 for (i = 0; i < obj->nr_programs; i++) {
4767 prog = &obj->programs[i];
Andrii Nakryiko9173cac2020-01-15 11:08:56 -08004768 if (prog->idx != obj->efile.text_shndx)
4769 continue;
4770
4771 err = bpf_program__relocate(prog, obj);
4772 if (err) {
4773 pr_warn("failed to relocate '%s'\n", prog->section_name);
4774 return err;
4775 }
4776 break;
4777 }
4778 /* now relocate everything but .text, which by now is relocated
4779 * properly, so we can copy raw sub-program instructions as is safely
4780 */
4781 for (i = 0; i < obj->nr_programs; i++) {
4782 prog = &obj->programs[i];
4783 if (prog->idx == obj->efile.text_shndx)
4784 continue;
Wang Nan8a47a6c2015-07-01 02:14:05 +00004785
Wang Nan9d759a92015-11-27 08:47:35 +00004786 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00004787 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004788 pr_warn("failed to relocate '%s'\n", prog->section_name);
Wang Nan8a47a6c2015-07-01 02:14:05 +00004789 return err;
4790 }
4791 }
4792 return 0;
4793}
4794
Martin KaFai Lau590a0082020-01-08 16:35:14 -08004795static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
4796 GElf_Shdr *shdr,
4797 Elf_Data *data);
4798
Wang Nan34090912015-07-01 02:14:02 +00004799static int bpf_object__collect_reloc(struct bpf_object *obj)
4800{
4801 int i, err;
4802
4803 if (!obj_elf_valid(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004804 pr_warn("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00004805 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00004806 }
4807
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08004808 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
4809 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
4810 Elf_Data *data = obj->efile.reloc_sects[i].data;
Wang Nan34090912015-07-01 02:14:02 +00004811 int idx = shdr->sh_info;
4812 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00004813
4814 if (shdr->sh_type != SHT_REL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004815 pr_warn("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004816 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00004817 }
4818
Martin KaFai Lau590a0082020-01-08 16:35:14 -08004819 if (idx == obj->efile.st_ops_shndx) {
4820 err = bpf_object__collect_struct_ops_map_reloc(obj,
4821 shdr,
4822 data);
4823 if (err)
4824 return err;
4825 continue;
4826 }
4827
Wang Nan34090912015-07-01 02:14:02 +00004828 prog = bpf_object__find_prog_by_idx(obj, idx);
4829 if (!prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004830 pr_warn("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004831 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00004832 }
4833
Andrii Nakryiko399dc652019-05-29 10:36:11 -07004834 err = bpf_program__collect_reloc(prog, shdr, data, obj);
Wang Nan34090912015-07-01 02:14:02 +00004835 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00004836 return err;
Wang Nan34090912015-07-01 02:14:02 +00004837 }
4838 return 0;
4839}
4840
Wang Nan55cffde2015-07-01 02:14:07 +00004841static int
Yonghong Song2993e052018-11-19 15:29:16 -08004842load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004843 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00004844{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004845 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004846 char *cp, errmsg[STRERR_BUFSIZE];
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004847 int log_buf_size = BPF_LOG_BUF_SIZE;
Wang Nan55cffde2015-07-01 02:14:07 +00004848 char *log_buf;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07004849 int btf_fd, ret;
Wang Nan55cffde2015-07-01 02:14:07 +00004850
Andrii Nakryikofba01a02019-05-29 10:36:08 -07004851 if (!insns || !insns_cnt)
4852 return -EINVAL;
4853
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004854 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08004855 load_attr.prog_type = prog->type;
4856 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08004857 if (prog->caps->name)
4858 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004859 load_attr.insns = insns;
4860 load_attr.insns_cnt = insns_cnt;
4861 load_attr.license = license;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08004862 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) {
4863 load_attr.attach_btf_id = prog->attach_btf_id;
Alexei Starovoitov2db6eab2020-01-20 16:53:47 -08004864 } else if (prog->type == BPF_PROG_TYPE_TRACING ||
4865 prog->type == BPF_PROG_TYPE_EXT) {
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08004866 load_attr.attach_prog_fd = prog->attach_prog_fd;
4867 load_attr.attach_btf_id = prog->attach_btf_id;
4868 } else {
4869 load_attr.kern_version = kern_version;
4870 load_attr.prog_ifindex = prog->prog_ifindex;
4871 }
Andrii Nakryiko3415ec62019-08-01 00:24:05 -07004872 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
4873 if (prog->obj->btf_ext)
4874 btf_fd = bpf_object__btf_fd(prog->obj);
4875 else
4876 btf_fd = -1;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07004877 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08004878 load_attr.func_info = prog->func_info;
4879 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004880 load_attr.func_info_cnt = prog->func_info_cnt;
Martin KaFai Lau3d650142018-12-07 16:42:31 -08004881 load_attr.line_info = prog->line_info;
4882 load_attr.line_info_rec_size = prog->line_info_rec_size;
4883 load_attr.line_info_cnt = prog->line_info_cnt;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004884 load_attr.log_level = prog->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01004885 load_attr.prog_flags = prog->prog_flags;
Wang Nan55cffde2015-07-01 02:14:07 +00004886
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004887retry_load:
4888 log_buf = malloc(log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00004889 if (!log_buf)
Kefeng Wangbe180102019-10-21 13:55:32 +08004890 pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
Wang Nan55cffde2015-07-01 02:14:07 +00004891
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004892 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00004893
4894 if (ret >= 0) {
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004895 if (load_attr.log_level)
4896 pr_debug("verifier log:\n%s", log_buf);
Wang Nan55cffde2015-07-01 02:14:07 +00004897 *pfd = ret;
4898 ret = 0;
4899 goto out;
4900 }
4901
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004902 if (errno == ENOSPC) {
4903 log_buf_size <<= 1;
4904 free(log_buf);
4905 goto retry_load;
4906 }
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004907 ret = -errno;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004908 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08004909 pr_warn("load bpf program failed: %s\n", cp);
Toke Høiland-Jørgensendc3a2d22019-12-16 19:12:04 +01004910 pr_perm_msg(ret);
Wang Nan55cffde2015-07-01 02:14:07 +00004911
Wang Nan6371ca3b2015-11-06 13:49:37 +00004912 if (log_buf && log_buf[0] != '\0') {
4913 ret = -LIBBPF_ERRNO__VERIFY;
Kefeng Wangbe180102019-10-21 13:55:32 +08004914 pr_warn("-- BEGIN DUMP LOG ---\n");
4915 pr_warn("\n%s\n", log_buf);
4916 pr_warn("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004917 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004918 pr_warn("Program too large (%zu insns), at most %d insns\n",
4919 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00004920 ret = -LIBBPF_ERRNO__PROG2BIG;
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004921 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00004922 /* Wrong program type? */
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004923 int fd;
Wang Nan705fa212016-07-13 10:44:02 +00004924
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004925 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
4926 load_attr.expected_attach_type = 0;
4927 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
4928 if (fd >= 0) {
4929 close(fd);
4930 ret = -LIBBPF_ERRNO__PROGTYPE;
4931 goto out;
Wang Nan6371ca3b2015-11-06 13:49:37 +00004932 }
Wang Nan55cffde2015-07-01 02:14:07 +00004933 }
4934
4935out:
4936 free(log_buf);
4937 return ret;
4938}
4939
KP Singha6ed02c2020-01-17 22:28:25 +01004940static int libbpf_find_attach_btf_id(struct bpf_program *prog);
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004941
4942int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
Wang Nan55cffde2015-07-01 02:14:07 +00004943{
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004944 int err = 0, fd, i, btf_id;
4945
Eelco Chaudronff26ce52020-02-20 13:26:35 +00004946 if ((prog->type == BPF_PROG_TYPE_TRACING ||
4947 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
KP Singha6ed02c2020-01-17 22:28:25 +01004948 btf_id = libbpf_find_attach_btf_id(prog);
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004949 if (btf_id <= 0)
4950 return btf_id;
4951 prog->attach_btf_id = btf_id;
4952 }
Wang Nan55cffde2015-07-01 02:14:07 +00004953
Wang Nanb5805632015-11-16 12:10:09 +00004954 if (prog->instances.nr < 0 || !prog->instances.fds) {
4955 if (prog->preprocessor) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004956 pr_warn("Internal error: can't load program '%s'\n",
4957 prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00004958 return -LIBBPF_ERRNO__INTERNAL;
4959 }
Wang Nan55cffde2015-07-01 02:14:07 +00004960
Wang Nanb5805632015-11-16 12:10:09 +00004961 prog->instances.fds = malloc(sizeof(int));
4962 if (!prog->instances.fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004963 pr_warn("Not enough memory for BPF fds\n");
Wang Nanb5805632015-11-16 12:10:09 +00004964 return -ENOMEM;
4965 }
4966 prog->instances.nr = 1;
4967 prog->instances.fds[0] = -1;
4968 }
4969
4970 if (!prog->preprocessor) {
4971 if (prog->instances.nr != 1) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004972 pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
4973 prog->section_name, prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +00004974 }
Yonghong Song2993e052018-11-19 15:29:16 -08004975 err = load_program(prog, prog->insns, prog->insns_cnt,
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004976 license, kern_ver, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00004977 if (!err)
4978 prog->instances.fds[0] = fd;
4979 goto out;
4980 }
4981
4982 for (i = 0; i < prog->instances.nr; i++) {
4983 struct bpf_prog_prep_result result;
4984 bpf_program_prep_t preprocessor = prog->preprocessor;
4985
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -08004986 memset(&result, 0, sizeof(result));
Wang Nanb5805632015-11-16 12:10:09 +00004987 err = preprocessor(prog, i, prog->insns,
4988 prog->insns_cnt, &result);
4989 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004990 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
4991 i, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00004992 goto out;
4993 }
4994
4995 if (!result.new_insn_ptr || !result.new_insn_cnt) {
4996 pr_debug("Skip loading the %dth instance of program '%s'\n",
4997 i, prog->section_name);
4998 prog->instances.fds[i] = -1;
4999 if (result.pfd)
5000 *result.pfd = -1;
5001 continue;
5002 }
5003
Yonghong Song2993e052018-11-19 15:29:16 -08005004 err = load_program(prog, result.new_insn_ptr,
Andrii Nakryiko13acb502019-12-13 17:43:34 -08005005 result.new_insn_cnt, license, kern_ver, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00005006 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005007 pr_warn("Loading the %dth instance of program '%s' failed\n",
5008 i, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00005009 goto out;
5010 }
5011
5012 if (result.pfd)
5013 *result.pfd = fd;
5014 prog->instances.fds[i] = fd;
5015 }
5016out:
Wang Nan55cffde2015-07-01 02:14:07 +00005017 if (err)
Kefeng Wangbe180102019-10-21 13:55:32 +08005018 pr_warn("failed to load program '%s'\n", prog->section_name);
Wang Nan55cffde2015-07-01 02:14:07 +00005019 zfree(&prog->insns);
5020 prog->insns_cnt = 0;
5021 return err;
5022}
5023
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005024static bool bpf_program__is_function_storage(const struct bpf_program *prog,
5025 const struct bpf_object *obj)
Jakub Kicinski9a94f272018-06-28 14:41:38 -07005026{
5027 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
5028}
5029
Wang Nan55cffde2015-07-01 02:14:07 +00005030static int
Quentin Monnet60276f92019-05-24 11:36:47 +01005031bpf_object__load_progs(struct bpf_object *obj, int log_level)
Wang Nan55cffde2015-07-01 02:14:07 +00005032{
5033 size_t i;
5034 int err;
5035
5036 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07005037 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005038 continue;
Quentin Monnet501b1252019-05-29 15:26:41 +01005039 obj->programs[i].log_level |= log_level;
Wang Nan55cffde2015-07-01 02:14:07 +00005040 err = bpf_program__load(&obj->programs[i],
5041 obj->license,
5042 obj->kern_version);
5043 if (err)
5044 return err;
5045 }
5046 return 0;
5047}
5048
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005049static struct bpf_object *
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07005050__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08005051 const struct bpf_object_open_opts *opts)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005052{
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005053 const char *obj_name, *kconfig;
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005054 struct bpf_program *prog;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005055 struct bpf_object *obj;
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005056 char tmp_name[64];
Wang Nan6371ca3b2015-11-06 13:49:37 +00005057 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005058
5059 if (elf_version(EV_CURRENT) == EV_NONE) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005060 pr_warn("failed to init libelf for %s\n",
5061 path ? : "(mem buf)");
Wang Nan6371ca3b2015-11-06 13:49:37 +00005062 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005063 }
5064
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005065 if (!OPTS_VALID(opts, bpf_object_open_opts))
5066 return ERR_PTR(-EINVAL);
5067
Andrii Nakryiko1aace102019-11-21 16:35:27 -08005068 obj_name = OPTS_GET(opts, object_name, NULL);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005069 if (obj_buf) {
5070 if (!obj_name) {
5071 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
5072 (unsigned long)obj_buf,
5073 (unsigned long)obj_buf_sz);
5074 obj_name = tmp_name;
5075 }
5076 path = obj_name;
5077 pr_debug("loading object '%s' from buffer\n", obj_name);
5078 }
5079
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005080 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
Wang Nan6371ca3b2015-11-06 13:49:37 +00005081 if (IS_ERR(obj))
5082 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005083
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005084 kconfig = OPTS_GET(opts, kconfig, NULL);
5085 if (kconfig) {
5086 obj->kconfig = strdup(kconfig);
5087 if (!obj->kconfig)
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005088 return ERR_PTR(-ENOMEM);
5089 }
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005090
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005091 err = bpf_object__elf_init(obj);
5092 err = err ? : bpf_object__check_endianness(obj);
5093 err = err ? : bpf_object__elf_collect(obj);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005094 err = err ? : bpf_object__collect_externs(obj);
5095 err = err ? : bpf_object__finalize_btf(obj);
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005096 err = err ? : bpf_object__init_maps(obj, opts);
5097 err = err ? : bpf_object__init_prog_names(obj);
5098 err = err ? : bpf_object__collect_reloc(obj);
5099 if (err)
5100 goto out;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005101 bpf_object__elf_finish(obj);
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005102
5103 bpf_object__for_each_program(prog, obj) {
5104 enum bpf_prog_type prog_type;
5105 enum bpf_attach_type attach_type;
5106
Martin KaFai Lau590a0082020-01-08 16:35:14 -08005107 if (prog->type != BPF_PROG_TYPE_UNSPEC)
5108 continue;
5109
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005110 err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
5111 &attach_type);
5112 if (err == -ESRCH)
5113 /* couldn't guess, but user might manually specify */
5114 continue;
5115 if (err)
5116 goto out;
5117
5118 bpf_program__set_type(prog, prog_type);
5119 bpf_program__set_expected_attach_type(prog, attach_type);
Alexei Starovoitov2db6eab2020-01-20 16:53:47 -08005120 if (prog_type == BPF_PROG_TYPE_TRACING ||
5121 prog_type == BPF_PROG_TYPE_EXT)
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005122 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005123 }
5124
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005125 return obj;
5126out:
5127 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00005128 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005129}
5130
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07005131static struct bpf_object *
5132__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005133{
Andrii Nakryikoe00aca62019-10-22 10:21:00 -07005134 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005135 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
5136 );
5137
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005138 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07005139 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005140 return NULL;
5141
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07005142 pr_debug("loading %s\n", attr->file);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005143 return __bpf_object__open(attr->file, NULL, 0, &opts);
John Fastabendc034a172018-10-15 11:19:55 -07005144}
5145
5146struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
5147{
5148 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07005149}
5150
5151struct bpf_object *bpf_object__open(const char *path)
5152{
5153 struct bpf_object_open_attr attr = {
5154 .file = path,
5155 .prog_type = BPF_PROG_TYPE_UNSPEC,
5156 };
5157
5158 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00005159}
5160
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005161struct bpf_object *
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08005162bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005163{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005164 if (!path)
5165 return ERR_PTR(-EINVAL);
5166
5167 pr_debug("loading %s\n", path);
5168
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005169 return __bpf_object__open(path, NULL, 0, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005170}
5171
5172struct bpf_object *
5173bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08005174 const struct bpf_object_open_opts *opts)
Wang Nan6c956392015-07-01 02:13:54 +00005175{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005176 if (!obj_buf || obj_buf_sz == 0)
5177 return ERR_PTR(-EINVAL);
Wang Nan6c956392015-07-01 02:13:54 +00005178
Andrii Nakryiko291ee022019-10-15 11:28:46 -07005179 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005180}
5181
5182struct bpf_object *
5183bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
5184 const char *name)
5185{
Andrii Nakryikoe00aca62019-10-22 10:21:00 -07005186 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07005187 .object_name = name,
5188 /* wrong default, but backwards-compatible */
5189 .relaxed_maps = true,
5190 );
5191
5192 /* returning NULL is wrong, but backwards-compatible */
5193 if (!obj_buf || obj_buf_sz == 0)
5194 return NULL;
5195
5196 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005197}
5198
Wang Nan52d33522015-07-01 02:14:04 +00005199int bpf_object__unload(struct bpf_object *obj)
5200{
5201 size_t i;
5202
5203 if (!obj)
5204 return -EINVAL;
5205
Martin KaFai Lau590a0082020-01-08 16:35:14 -08005206 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan9d759a92015-11-27 08:47:35 +00005207 zclose(obj->maps[i].fd);
Martin KaFai Lau590a0082020-01-08 16:35:14 -08005208 if (obj->maps[i].st_ops)
5209 zfree(&obj->maps[i].st_ops->kern_vdata);
5210 }
Wang Nan52d33522015-07-01 02:14:04 +00005211
Wang Nan55cffde2015-07-01 02:14:07 +00005212 for (i = 0; i < obj->nr_programs; i++)
5213 bpf_program__unload(&obj->programs[i]);
5214
Wang Nan52d33522015-07-01 02:14:04 +00005215 return 0;
5216}
5217
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005218static int bpf_object__sanitize_maps(struct bpf_object *obj)
5219{
5220 struct bpf_map *m;
5221
5222 bpf_object__for_each_map(m, obj) {
5223 if (!bpf_map__is_internal(m))
5224 continue;
5225 if (!obj->caps.global_data) {
5226 pr_warn("kernel doesn't support global data\n");
5227 return -ENOTSUP;
5228 }
5229 if (!obj->caps.array_mmap)
5230 m->def.map_flags ^= BPF_F_MMAPABLE;
5231 }
5232
5233 return 0;
5234}
5235
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005236static int bpf_object__resolve_externs(struct bpf_object *obj,
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005237 const char *extra_kconfig)
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005238{
5239 bool need_config = false;
5240 struct extern_desc *ext;
5241 int err, i;
5242 void *data;
5243
5244 if (obj->nr_extern == 0)
5245 return 0;
5246
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08005247 data = obj->maps[obj->kconfig_map_idx].mmaped;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005248
5249 for (i = 0; i < obj->nr_extern; i++) {
5250 ext = &obj->externs[i];
5251
5252 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
5253 void *ext_val = data + ext->data_off;
5254 __u32 kver = get_kernel_version();
5255
5256 if (!kver) {
5257 pr_warn("failed to get kernel version\n");
5258 return -EINVAL;
5259 }
5260 err = set_ext_value_num(ext, ext_val, kver);
5261 if (err)
5262 return err;
5263 pr_debug("extern %s=0x%x\n", ext->name, kver);
5264 } else if (strncmp(ext->name, "CONFIG_", 7) == 0) {
5265 need_config = true;
5266 } else {
5267 pr_warn("unrecognized extern '%s'\n", ext->name);
5268 return -EINVAL;
5269 }
5270 }
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005271 if (need_config && extra_kconfig) {
5272 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, data);
5273 if (err)
5274 return -EINVAL;
5275 need_config = false;
5276 for (i = 0; i < obj->nr_extern; i++) {
5277 ext = &obj->externs[i];
5278 if (!ext->is_set) {
5279 need_config = true;
5280 break;
5281 }
5282 }
5283 }
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005284 if (need_config) {
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005285 err = bpf_object__read_kconfig_file(obj, data);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005286 if (err)
5287 return -EINVAL;
5288 }
5289 for (i = 0; i < obj->nr_extern; i++) {
5290 ext = &obj->externs[i];
5291
5292 if (!ext->is_set && !ext->is_weak) {
5293 pr_warn("extern %s (strong) not resolved\n", ext->name);
5294 return -ESRCH;
5295 } else if (!ext->is_set) {
5296 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
5297 ext->name);
5298 }
5299 }
5300
5301 return 0;
5302}
5303
Quentin Monnet60276f92019-05-24 11:36:47 +01005304int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
Wang Nan52d33522015-07-01 02:14:04 +00005305{
Quentin Monnet60276f92019-05-24 11:36:47 +01005306 struct bpf_object *obj;
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +01005307 int err, i;
Wang Nan6371ca3b2015-11-06 13:49:37 +00005308
Quentin Monnet60276f92019-05-24 11:36:47 +01005309 if (!attr)
5310 return -EINVAL;
5311 obj = attr->obj;
Wang Nan52d33522015-07-01 02:14:04 +00005312 if (!obj)
5313 return -EINVAL;
5314
5315 if (obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005316 pr_warn("object should not be loaded twice\n");
Wang Nan52d33522015-07-01 02:14:04 +00005317 return -EINVAL;
5318 }
5319
5320 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00005321
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005322 err = bpf_object__probe_caps(obj);
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005323 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005324 err = err ? : bpf_object__sanitize_and_load_btf(obj);
5325 err = err ? : bpf_object__sanitize_maps(obj);
KP Singha6ed02c2020-01-17 22:28:25 +01005326 err = err ? : bpf_object__load_vmlinux_btf(obj);
Martin KaFai Lau590a0082020-01-08 16:35:14 -08005327 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005328 err = err ? : bpf_object__create_maps(obj);
5329 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
5330 err = err ? : bpf_object__load_progs(obj, attr->log_level);
KP Singha6ed02c2020-01-17 22:28:25 +01005331
5332 btf__free(obj->btf_vmlinux);
5333 obj->btf_vmlinux = NULL;
5334
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08005335 if (err)
5336 goto out;
Wang Nan52d33522015-07-01 02:14:04 +00005337
5338 return 0;
5339out:
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +01005340 /* unpin any maps that were auto-pinned during load */
5341 for (i = 0; i < obj->nr_maps; i++)
5342 if (obj->maps[i].pinned && !obj->maps[i].reused)
5343 bpf_map__unpin(&obj->maps[i], NULL);
5344
Wang Nan52d33522015-07-01 02:14:04 +00005345 bpf_object__unload(obj);
Kefeng Wangbe180102019-10-21 13:55:32 +08005346 pr_warn("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00005347 return err;
Wang Nan52d33522015-07-01 02:14:04 +00005348}
5349
Quentin Monnet60276f92019-05-24 11:36:47 +01005350int bpf_object__load(struct bpf_object *obj)
5351{
5352 struct bpf_object_load_attr attr = {
5353 .obj = obj,
5354 };
5355
5356 return bpf_object__load_xattr(&attr);
5357}
5358
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01005359static int make_parent_dir(const char *path)
5360{
5361 char *cp, errmsg[STRERR_BUFSIZE];
5362 char *dname, *dir;
5363 int err = 0;
5364
5365 dname = strdup(path);
5366 if (dname == NULL)
5367 return -ENOMEM;
5368
5369 dir = dirname(dname);
5370 if (mkdir(dir, 0700) && errno != EEXIST)
5371 err = -errno;
5372
5373 free(dname);
5374 if (err) {
5375 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5376 pr_warn("failed to mkdir %s: %s\n", path, cp);
5377 }
5378 return err;
5379}
5380
Joe Stringerf3675402017-01-26 13:19:56 -08005381static int check_path(const char *path)
5382{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02005383 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08005384 struct statfs st_fs;
5385 char *dname, *dir;
5386 int err = 0;
5387
5388 if (path == NULL)
5389 return -EINVAL;
5390
5391 dname = strdup(path);
5392 if (dname == NULL)
5393 return -ENOMEM;
5394
5395 dir = dirname(dname);
5396 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07005397 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08005398 pr_warn("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08005399 err = -errno;
5400 }
5401 free(dname);
5402
5403 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005404 pr_warn("specified path %s is not on BPF FS\n", path);
Joe Stringerf3675402017-01-26 13:19:56 -08005405 err = -EINVAL;
5406 }
5407
5408 return err;
5409}
5410
5411int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
5412 int instance)
5413{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02005414 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08005415 int err;
5416
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01005417 err = make_parent_dir(path);
5418 if (err)
5419 return err;
5420
Joe Stringerf3675402017-01-26 13:19:56 -08005421 err = check_path(path);
5422 if (err)
5423 return err;
5424
5425 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005426 pr_warn("invalid program pointer\n");
Joe Stringerf3675402017-01-26 13:19:56 -08005427 return -EINVAL;
5428 }
5429
5430 if (instance < 0 || instance >= prog->instances.nr) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005431 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
5432 instance, prog->section_name, prog->instances.nr);
Joe Stringerf3675402017-01-26 13:19:56 -08005433 return -EINVAL;
5434 }
5435
5436 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07005437 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08005438 pr_warn("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08005439 return -errno;
5440 }
5441 pr_debug("pinned program '%s'\n", path);
5442
5443 return 0;
5444}
5445
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005446int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
5447 int instance)
5448{
5449 int err;
5450
5451 err = check_path(path);
5452 if (err)
5453 return err;
5454
5455 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005456 pr_warn("invalid program pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005457 return -EINVAL;
5458 }
5459
5460 if (instance < 0 || instance >= prog->instances.nr) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005461 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
5462 instance, prog->section_name, prog->instances.nr);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005463 return -EINVAL;
5464 }
5465
5466 err = unlink(path);
5467 if (err != 0)
5468 return -errno;
5469 pr_debug("unpinned program '%s'\n", path);
5470
5471 return 0;
5472}
5473
Joe Stringerf3675402017-01-26 13:19:56 -08005474int bpf_program__pin(struct bpf_program *prog, const char *path)
5475{
5476 int i, err;
5477
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01005478 err = make_parent_dir(path);
5479 if (err)
5480 return err;
5481
Joe Stringerf3675402017-01-26 13:19:56 -08005482 err = check_path(path);
5483 if (err)
5484 return err;
5485
5486 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005487 pr_warn("invalid program pointer\n");
Joe Stringerf3675402017-01-26 13:19:56 -08005488 return -EINVAL;
5489 }
5490
5491 if (prog->instances.nr <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005492 pr_warn("no instances of prog %s to pin\n",
Joe Stringerf3675402017-01-26 13:19:56 -08005493 prog->section_name);
5494 return -EINVAL;
5495 }
5496
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08005497 if (prog->instances.nr == 1) {
5498 /* don't create subdirs when pinning single instance */
5499 return bpf_program__pin_instance(prog, path, 0);
5500 }
5501
Joe Stringerf3675402017-01-26 13:19:56 -08005502 for (i = 0; i < prog->instances.nr; i++) {
5503 char buf[PATH_MAX];
5504 int len;
5505
5506 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005507 if (len < 0) {
5508 err = -EINVAL;
5509 goto err_unpin;
5510 } else if (len >= PATH_MAX) {
5511 err = -ENAMETOOLONG;
5512 goto err_unpin;
5513 }
5514
5515 err = bpf_program__pin_instance(prog, buf, i);
5516 if (err)
5517 goto err_unpin;
5518 }
5519
5520 return 0;
5521
5522err_unpin:
5523 for (i = i - 1; i >= 0; i--) {
5524 char buf[PATH_MAX];
5525 int len;
5526
5527 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
5528 if (len < 0)
5529 continue;
5530 else if (len >= PATH_MAX)
5531 continue;
5532
5533 bpf_program__unpin_instance(prog, buf, i);
5534 }
5535
5536 rmdir(path);
5537
5538 return err;
5539}
5540
5541int bpf_program__unpin(struct bpf_program *prog, const char *path)
5542{
5543 int i, err;
5544
5545 err = check_path(path);
5546 if (err)
5547 return err;
5548
5549 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005550 pr_warn("invalid program pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005551 return -EINVAL;
5552 }
5553
5554 if (prog->instances.nr <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005555 pr_warn("no instances of prog %s to pin\n",
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005556 prog->section_name);
5557 return -EINVAL;
5558 }
5559
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08005560 if (prog->instances.nr == 1) {
5561 /* don't create subdirs when pinning single instance */
5562 return bpf_program__unpin_instance(prog, path, 0);
5563 }
5564
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005565 for (i = 0; i < prog->instances.nr; i++) {
5566 char buf[PATH_MAX];
5567 int len;
5568
5569 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08005570 if (len < 0)
5571 return -EINVAL;
5572 else if (len >= PATH_MAX)
5573 return -ENAMETOOLONG;
5574
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005575 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08005576 if (err)
5577 return err;
5578 }
5579
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005580 err = rmdir(path);
5581 if (err)
5582 return -errno;
5583
Joe Stringerf3675402017-01-26 13:19:56 -08005584 return 0;
5585}
5586
Joe Stringerb6989f32017-01-26 13:19:57 -08005587int bpf_map__pin(struct bpf_map *map, const char *path)
5588{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02005589 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08005590 int err;
5591
Joe Stringerb6989f32017-01-26 13:19:57 -08005592 if (map == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005593 pr_warn("invalid map pointer\n");
Joe Stringerb6989f32017-01-26 13:19:57 -08005594 return -EINVAL;
5595 }
5596
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005597 if (map->pin_path) {
5598 if (path && strcmp(path, map->pin_path)) {
5599 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
5600 bpf_map__name(map), map->pin_path, path);
5601 return -EINVAL;
5602 } else if (map->pinned) {
5603 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
5604 bpf_map__name(map), map->pin_path);
5605 return 0;
5606 }
5607 } else {
5608 if (!path) {
5609 pr_warn("missing a path to pin map '%s' at\n",
5610 bpf_map__name(map));
5611 return -EINVAL;
5612 } else if (map->pinned) {
5613 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
5614 return -EEXIST;
5615 }
5616
5617 map->pin_path = strdup(path);
5618 if (!map->pin_path) {
5619 err = -errno;
5620 goto out_err;
5621 }
Joe Stringerb6989f32017-01-26 13:19:57 -08005622 }
5623
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01005624 err = make_parent_dir(map->pin_path);
5625 if (err)
5626 return err;
5627
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005628 err = check_path(map->pin_path);
5629 if (err)
5630 return err;
5631
5632 if (bpf_obj_pin(map->fd, map->pin_path)) {
5633 err = -errno;
5634 goto out_err;
5635 }
5636
5637 map->pinned = true;
5638 pr_debug("pinned map '%s'\n", map->pin_path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005639
Joe Stringerb6989f32017-01-26 13:19:57 -08005640 return 0;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005641
5642out_err:
5643 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5644 pr_warn("failed to pin map: %s\n", cp);
5645 return err;
Joe Stringerb6989f32017-01-26 13:19:57 -08005646}
5647
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005648int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08005649{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005650 int err;
5651
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005652 if (map == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005653 pr_warn("invalid map pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005654 return -EINVAL;
5655 }
5656
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005657 if (map->pin_path) {
5658 if (path && strcmp(path, map->pin_path)) {
5659 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
5660 bpf_map__name(map), map->pin_path, path);
5661 return -EINVAL;
5662 }
5663 path = map->pin_path;
5664 } else if (!path) {
5665 pr_warn("no path to unpin map '%s' from\n",
5666 bpf_map__name(map));
5667 return -EINVAL;
5668 }
5669
5670 err = check_path(path);
5671 if (err)
5672 return err;
5673
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005674 err = unlink(path);
5675 if (err != 0)
5676 return -errno;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005677
5678 map->pinned = false;
5679 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005680
5681 return 0;
5682}
5683
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005684int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
5685{
5686 char *new = NULL;
5687
5688 if (path) {
5689 new = strdup(path);
5690 if (!new)
5691 return -errno;
5692 }
5693
5694 free(map->pin_path);
5695 map->pin_path = new;
5696 return 0;
5697}
5698
5699const char *bpf_map__get_pin_path(const struct bpf_map *map)
5700{
5701 return map->pin_path;
5702}
5703
5704bool bpf_map__is_pinned(const struct bpf_map *map)
5705{
5706 return map->pinned;
5707}
5708
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005709int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
5710{
Joe Stringerd5148d82017-01-26 13:19:58 -08005711 struct bpf_map *map;
5712 int err;
5713
5714 if (!obj)
5715 return -ENOENT;
5716
5717 if (!obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005718 pr_warn("object not yet loaded; load it first\n");
Joe Stringerd5148d82017-01-26 13:19:58 -08005719 return -ENOENT;
5720 }
5721
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08005722 bpf_object__for_each_map(map, obj) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005723 char *pin_path = NULL;
Joe Stringerd5148d82017-01-26 13:19:58 -08005724 char buf[PATH_MAX];
Joe Stringerd5148d82017-01-26 13:19:58 -08005725
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005726 if (path) {
5727 int len;
5728
5729 len = snprintf(buf, PATH_MAX, "%s/%s", path,
5730 bpf_map__name(map));
5731 if (len < 0) {
5732 err = -EINVAL;
5733 goto err_unpin_maps;
5734 } else if (len >= PATH_MAX) {
5735 err = -ENAMETOOLONG;
5736 goto err_unpin_maps;
5737 }
5738 pin_path = buf;
5739 } else if (!map->pin_path) {
5740 continue;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005741 }
5742
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005743 err = bpf_map__pin(map, pin_path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005744 if (err)
5745 goto err_unpin_maps;
5746 }
5747
5748 return 0;
5749
5750err_unpin_maps:
5751 while ((map = bpf_map__prev(map, obj))) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005752 if (!map->pin_path)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005753 continue;
5754
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005755 bpf_map__unpin(map, NULL);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005756 }
5757
5758 return err;
5759}
5760
5761int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
5762{
5763 struct bpf_map *map;
5764 int err;
5765
5766 if (!obj)
5767 return -ENOENT;
5768
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08005769 bpf_object__for_each_map(map, obj) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005770 char *pin_path = NULL;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005771 char buf[PATH_MAX];
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005772
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005773 if (path) {
5774 int len;
Joe Stringerd5148d82017-01-26 13:19:58 -08005775
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005776 len = snprintf(buf, PATH_MAX, "%s/%s", path,
5777 bpf_map__name(map));
5778 if (len < 0)
5779 return -EINVAL;
5780 else if (len >= PATH_MAX)
5781 return -ENAMETOOLONG;
5782 pin_path = buf;
5783 } else if (!map->pin_path) {
5784 continue;
5785 }
5786
5787 err = bpf_map__unpin(map, pin_path);
Joe Stringerd5148d82017-01-26 13:19:58 -08005788 if (err)
5789 return err;
5790 }
5791
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005792 return 0;
5793}
5794
5795int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
5796{
5797 struct bpf_program *prog;
5798 int err;
5799
5800 if (!obj)
5801 return -ENOENT;
5802
5803 if (!obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005804 pr_warn("object not yet loaded; load it first\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005805 return -ENOENT;
5806 }
5807
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005808 bpf_object__for_each_program(prog, obj) {
5809 char buf[PATH_MAX];
5810 int len;
5811
5812 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08005813 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005814 if (len < 0) {
5815 err = -EINVAL;
5816 goto err_unpin_programs;
5817 } else if (len >= PATH_MAX) {
5818 err = -ENAMETOOLONG;
5819 goto err_unpin_programs;
5820 }
5821
5822 err = bpf_program__pin(prog, buf);
5823 if (err)
5824 goto err_unpin_programs;
5825 }
5826
5827 return 0;
5828
5829err_unpin_programs:
5830 while ((prog = bpf_program__prev(prog, obj))) {
5831 char buf[PATH_MAX];
5832 int len;
5833
5834 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08005835 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005836 if (len < 0)
5837 continue;
5838 else if (len >= PATH_MAX)
5839 continue;
5840
5841 bpf_program__unpin(prog, buf);
5842 }
5843
5844 return err;
5845}
5846
5847int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
5848{
5849 struct bpf_program *prog;
5850 int err;
5851
5852 if (!obj)
5853 return -ENOENT;
5854
Joe Stringerd5148d82017-01-26 13:19:58 -08005855 bpf_object__for_each_program(prog, obj) {
5856 char buf[PATH_MAX];
5857 int len;
5858
5859 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08005860 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08005861 if (len < 0)
5862 return -EINVAL;
5863 else if (len >= PATH_MAX)
5864 return -ENAMETOOLONG;
5865
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005866 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08005867 if (err)
5868 return err;
5869 }
5870
5871 return 0;
5872}
5873
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005874int bpf_object__pin(struct bpf_object *obj, const char *path)
5875{
5876 int err;
5877
5878 err = bpf_object__pin_maps(obj, path);
5879 if (err)
5880 return err;
5881
5882 err = bpf_object__pin_programs(obj, path);
5883 if (err) {
5884 bpf_object__unpin_maps(obj, path);
5885 return err;
5886 }
5887
5888 return 0;
5889}
5890
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005891void bpf_object__close(struct bpf_object *obj)
5892{
Wang Nana5b8bd42015-07-01 02:14:00 +00005893 size_t i;
5894
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005895 if (!obj)
5896 return;
5897
Wang Nan10931d22016-11-26 07:03:26 +00005898 if (obj->clear_priv)
5899 obj->clear_priv(obj, obj->priv);
5900
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005901 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00005902 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005903 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08005904 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005905
Wang Nan9d759a92015-11-27 08:47:35 +00005906 for (i = 0; i < obj->nr_maps; i++) {
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08005907 struct bpf_map *map = &obj->maps[i];
5908
5909 if (map->clear_priv)
5910 map->clear_priv(map, map->priv);
5911 map->priv = NULL;
5912 map->clear_priv = NULL;
5913
5914 if (map->mmaped) {
5915 munmap(map->mmaped, bpf_map_mmap_sz(map));
5916 map->mmaped = NULL;
5917 }
5918
Martin KaFai Lau590a0082020-01-08 16:35:14 -08005919 if (map->st_ops) {
5920 zfree(&map->st_ops->data);
5921 zfree(&map->st_ops->progs);
5922 zfree(&map->st_ops->kern_func_off);
5923 zfree(&map->st_ops);
5924 }
5925
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08005926 zfree(&map->name);
5927 zfree(&map->pin_path);
Wang Nan9d759a92015-11-27 08:47:35 +00005928 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02005929
Andrii Nakryiko8601fd42019-12-18 16:28:35 -08005930 zfree(&obj->kconfig);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005931 zfree(&obj->externs);
5932 obj->nr_extern = 0;
5933
Wang Nan9d759a92015-11-27 08:47:35 +00005934 zfree(&obj->maps);
5935 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00005936
5937 if (obj->programs && obj->nr_programs) {
5938 for (i = 0; i < obj->nr_programs; i++)
5939 bpf_program__exit(&obj->programs[i]);
5940 }
5941 zfree(&obj->programs);
5942
Wang Nan9a208ef2015-07-01 02:14:10 +00005943 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005944 free(obj);
5945}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005946
Wang Nan9a208ef2015-07-01 02:14:10 +00005947struct bpf_object *
5948bpf_object__next(struct bpf_object *prev)
5949{
5950 struct bpf_object *next;
5951
5952 if (!prev)
5953 next = list_first_entry(&bpf_objects_list,
5954 struct bpf_object,
5955 list);
5956 else
5957 next = list_next_entry(prev, list);
5958
5959 /* Empty list is noticed here so don't need checking on entry. */
5960 if (&next->list == &bpf_objects_list)
5961 return NULL;
5962
5963 return next;
5964}
5965
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005966const char *bpf_object__name(const struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00005967{
Andrii Nakryikoc9e4c302019-10-04 15:40:36 -07005968 return obj ? obj->name : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00005969}
5970
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005971unsigned int bpf_object__kversion(const struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00005972{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03005973 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00005974}
5975
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005976struct btf *bpf_object__btf(const struct bpf_object *obj)
Andrey Ignatov789f6ba2019-02-14 15:01:43 -08005977{
5978 return obj ? obj->btf : NULL;
5979}
5980
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005981int bpf_object__btf_fd(const struct bpf_object *obj)
5982{
5983 return obj->btf ? btf__fd(obj->btf) : -1;
5984}
5985
Wang Nan10931d22016-11-26 07:03:26 +00005986int bpf_object__set_priv(struct bpf_object *obj, void *priv,
5987 bpf_object_clear_priv_t clear_priv)
5988{
5989 if (obj->priv && obj->clear_priv)
5990 obj->clear_priv(obj, obj->priv);
5991
5992 obj->priv = priv;
5993 obj->clear_priv = clear_priv;
5994 return 0;
5995}
5996
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005997void *bpf_object__priv(const struct bpf_object *obj)
Wang Nan10931d22016-11-26 07:03:26 +00005998{
5999 return obj ? obj->priv : ERR_PTR(-EINVAL);
6000}
6001
Jakub Kicinskieac7d842018-06-28 14:41:39 -07006002static struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006003__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
6004 bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006005{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08006006 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006007 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006008
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08006009 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006010 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006011
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08006012 if (!p)
6013 /* Iter from the beginning */
6014 return forward ? &obj->programs[0] :
6015 &obj->programs[nr_programs - 1];
6016
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006017 if (p->obj != obj) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006018 pr_warn("error: program handler doesn't match object\n");
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006019 return NULL;
6020 }
6021
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08006022 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006023 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006024 return NULL;
6025 return &obj->programs[idx];
6026}
6027
Jakub Kicinskieac7d842018-06-28 14:41:39 -07006028struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006029bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
Jakub Kicinskieac7d842018-06-28 14:41:39 -07006030{
6031 struct bpf_program *prog = prev;
6032
6033 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08006034 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006035 } while (prog && bpf_program__is_function_storage(prog, obj));
6036
6037 return prog;
6038}
6039
6040struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006041bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006042{
6043 struct bpf_program *prog = next;
6044
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006045 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08006046 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07006047 } while (prog && bpf_program__is_function_storage(prog, obj));
6048
6049 return prog;
6050}
6051
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03006052int bpf_program__set_priv(struct bpf_program *prog, void *priv,
6053 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006054{
6055 if (prog->priv && prog->clear_priv)
6056 prog->clear_priv(prog, prog->priv);
6057
6058 prog->priv = priv;
6059 prog->clear_priv = clear_priv;
6060 return 0;
6061}
6062
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006063void *bpf_program__priv(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006064{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03006065 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006066}
6067
Jakub Kicinski9aba3612018-06-28 14:41:37 -07006068void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
6069{
6070 prog->prog_ifindex = ifindex;
6071}
6072
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08006073const char *bpf_program__name(const struct bpf_program *prog)
6074{
6075 return prog->name;
6076}
6077
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006078const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006079{
6080 const char *title;
6081
6082 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09006083 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006084 title = strdup(title);
6085 if (!title) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006086 pr_warn("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00006087 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006088 }
6089 }
6090
6091 return title;
6092}
6093
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006094int bpf_program__fd(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006095{
Wang Nanb5805632015-11-16 12:10:09 +00006096 return bpf_program__nth_fd(prog, 0);
6097}
6098
Toke Høiland-Jørgensen1a734ef2019-11-09 21:37:32 +01006099size_t bpf_program__size(const struct bpf_program *prog)
6100{
6101 return prog->insns_cnt * sizeof(struct bpf_insn);
6102}
6103
Wang Nanb5805632015-11-16 12:10:09 +00006104int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
6105 bpf_program_prep_t prep)
6106{
6107 int *instances_fds;
6108
6109 if (nr_instances <= 0 || !prep)
6110 return -EINVAL;
6111
6112 if (prog->instances.nr > 0 || prog->instances.fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006113 pr_warn("Can't set pre-processor after loading\n");
Wang Nanb5805632015-11-16 12:10:09 +00006114 return -EINVAL;
6115 }
6116
6117 instances_fds = malloc(sizeof(int) * nr_instances);
6118 if (!instances_fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006119 pr_warn("alloc memory failed for fds\n");
Wang Nanb5805632015-11-16 12:10:09 +00006120 return -ENOMEM;
6121 }
6122
6123 /* fill all fd with -1 */
6124 memset(instances_fds, -1, sizeof(int) * nr_instances);
6125
6126 prog->instances.nr = nr_instances;
6127 prog->instances.fds = instances_fds;
6128 prog->preprocessor = prep;
6129 return 0;
6130}
6131
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006132int bpf_program__nth_fd(const struct bpf_program *prog, int n)
Wang Nanb5805632015-11-16 12:10:09 +00006133{
6134 int fd;
6135
Jakub Kicinski1e960042018-07-26 14:32:18 -07006136 if (!prog)
6137 return -EINVAL;
6138
Wang Nanb5805632015-11-16 12:10:09 +00006139 if (n >= prog->instances.nr || n < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006140 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
6141 n, prog->section_name, prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +00006142 return -EINVAL;
6143 }
6144
6145 fd = prog->instances.fds[n];
6146 if (fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006147 pr_warn("%dth instance of program '%s' is invalid\n",
6148 n, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00006149 return -ENOENT;
6150 }
6151
6152 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00006153}
Wang Nan9d759a92015-11-27 08:47:35 +00006154
Andrii Nakryikof1eead92019-10-20 20:38:57 -07006155enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
6156{
6157 return prog->type;
6158}
6159
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07006160void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00006161{
6162 prog->type = type;
6163}
6164
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006165static bool bpf_program__is_type(const struct bpf_program *prog,
Wang Nan5f44e4c82016-07-13 10:44:01 +00006166 enum bpf_prog_type type)
6167{
6168 return prog ? (prog->type == type) : false;
6169}
6170
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006171#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
6172int bpf_program__set_##NAME(struct bpf_program *prog) \
6173{ \
6174 if (!prog) \
6175 return -EINVAL; \
6176 bpf_program__set_type(prog, TYPE); \
6177 return 0; \
6178} \
6179 \
6180bool bpf_program__is_##NAME(const struct bpf_program *prog) \
6181{ \
6182 return bpf_program__is_type(prog, TYPE); \
6183} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00006184
Joe Stringer7803ba72017-01-22 17:11:24 -08006185BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08006186BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08006187BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
6188BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08006189BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07006190BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08006191BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
6192BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006193BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
Martin KaFai Lau590a0082020-01-08 16:35:14 -08006194BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
Alexei Starovoitov2db6eab2020-01-20 16:53:47 -08006195BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00006196
Andrii Nakryikof1eead92019-10-20 20:38:57 -07006197enum bpf_attach_type
6198bpf_program__get_expected_attach_type(struct bpf_program *prog)
6199{
6200 return prog->expected_attach_type;
6201}
6202
John Fastabend16962b22018-04-23 14:30:38 -07006203void bpf_program__set_expected_attach_type(struct bpf_program *prog,
6204 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006205{
6206 prog->expected_attach_type = type;
6207}
6208
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07006209#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
6210 { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006211
Andrey Ignatov956b6202018-09-26 15:24:53 -07006212/* Programs that can NOT be attached. */
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07006213#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006214
Andrey Ignatov956b6202018-09-26 15:24:53 -07006215/* Programs that can be attached. */
6216#define BPF_APROG_SEC(string, ptype, atype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07006217 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07006218
Andrey Ignatov956b6202018-09-26 15:24:53 -07006219/* Programs that must specify expected attach type at load time. */
6220#define BPF_EAPROG_SEC(string, ptype, eatype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07006221 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
6222
6223/* Programs that use BTF to identify attach point */
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006224#define BPF_PROG_BTF(string, ptype, eatype) \
6225 BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
Andrey Ignatov956b6202018-09-26 15:24:53 -07006226
6227/* Programs that can be attached but attach type can't be identified by section
6228 * name. Kept for backward compatibility.
6229 */
6230#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07006231
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006232#define SEC_DEF(sec_pfx, ptype, ...) { \
6233 .sec = sec_pfx, \
6234 .len = sizeof(sec_pfx) - 1, \
6235 .prog_type = BPF_PROG_TYPE_##ptype, \
6236 __VA_ARGS__ \
6237}
6238
6239struct bpf_sec_def;
6240
6241typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
6242 struct bpf_program *prog);
6243
6244static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
6245 struct bpf_program *prog);
6246static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
6247 struct bpf_program *prog);
6248static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
6249 struct bpf_program *prog);
6250static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
6251 struct bpf_program *prog);
6252
6253struct bpf_sec_def {
Roman Gushchin583c9002017-12-13 15:18:51 +00006254 const char *sec;
6255 size_t len;
6256 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006257 enum bpf_attach_type expected_attach_type;
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07006258 bool is_attachable;
6259 bool is_attach_btf;
Andrey Ignatov956b6202018-09-26 15:24:53 -07006260 enum bpf_attach_type attach_type;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006261 attach_fn_t attach_fn;
6262};
6263
6264static const struct bpf_sec_def section_defs[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07006265 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
Jakub Sitnicki67d69cc2019-12-12 11:22:50 +01006266 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006267 SEC_DEF("kprobe/", KPROBE,
6268 .attach_fn = attach_kprobe),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07006269 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006270 SEC_DEF("kretprobe/", KPROBE,
6271 .attach_fn = attach_kprobe),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07006272 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
Andrey Ignatov956b6202018-09-26 15:24:53 -07006273 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
6274 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006275 SEC_DEF("tracepoint/", TRACEPOINT,
6276 .attach_fn = attach_tp),
6277 SEC_DEF("tp/", TRACEPOINT,
6278 .attach_fn = attach_tp),
6279 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
6280 .attach_fn = attach_raw_tp),
6281 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
6282 .attach_fn = attach_raw_tp),
6283 SEC_DEF("tp_btf/", TRACING,
6284 .expected_attach_type = BPF_TRACE_RAW_TP,
6285 .is_attach_btf = true,
6286 .attach_fn = attach_trace),
6287 SEC_DEF("fentry/", TRACING,
6288 .expected_attach_type = BPF_TRACE_FENTRY,
6289 .is_attach_btf = true,
6290 .attach_fn = attach_trace),
KP Singhaca228c2020-03-04 20:18:51 +01006291 SEC_DEF("fmod_ret/", TRACING,
6292 .expected_attach_type = BPF_MODIFY_RETURN,
6293 .is_attach_btf = true,
6294 .attach_fn = attach_trace),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006295 SEC_DEF("fexit/", TRACING,
6296 .expected_attach_type = BPF_TRACE_FEXIT,
6297 .is_attach_btf = true,
6298 .attach_fn = attach_trace),
Alexei Starovoitov2db6eab2020-01-20 16:53:47 -08006299 SEC_DEF("freplace/", EXT,
6300 .is_attach_btf = true,
6301 .attach_fn = attach_trace),
Andrey Ignatov956b6202018-09-26 15:24:53 -07006302 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
6303 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
6304 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
6305 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
6306 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
6307 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07006308 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
6309 BPF_CGROUP_INET_INGRESS),
6310 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
6311 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07006312 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
6313 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
6314 BPF_CGROUP_INET_SOCK_CREATE),
6315 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
6316 BPF_CGROUP_INET4_POST_BIND),
6317 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
6318 BPF_CGROUP_INET6_POST_BIND),
6319 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
6320 BPF_CGROUP_DEVICE),
6321 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
6322 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07006323 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
6324 BPF_SK_SKB_STREAM_PARSER),
6325 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
6326 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07006327 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
6328 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
6329 BPF_SK_MSG_VERDICT),
6330 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
6331 BPF_LIRC_MODE2),
6332 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
6333 BPF_FLOW_DISSECTOR),
6334 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6335 BPF_CGROUP_INET4_BIND),
6336 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6337 BPF_CGROUP_INET6_BIND),
6338 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6339 BPF_CGROUP_INET4_CONNECT),
6340 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6341 BPF_CGROUP_INET6_CONNECT),
6342 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6343 BPF_CGROUP_UDP4_SENDMSG),
6344 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6345 BPF_CGROUP_UDP6_SENDMSG),
Daniel Borkmann9bb59ac2019-06-07 01:48:59 +02006346 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6347 BPF_CGROUP_UDP4_RECVMSG),
6348 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
6349 BPF_CGROUP_UDP6_RECVMSG),
Andrey Ignatov063cc9f2019-03-08 09:15:26 -08006350 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
6351 BPF_CGROUP_SYSCTL),
Stanislav Fomichev4cdbfb52019-06-27 13:38:49 -07006352 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
6353 BPF_CGROUP_GETSOCKOPT),
6354 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
6355 BPF_CGROUP_SETSOCKOPT),
Martin KaFai Lau590a0082020-01-08 16:35:14 -08006356 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
Roman Gushchin583c9002017-12-13 15:18:51 +00006357};
Roman Gushchin583c9002017-12-13 15:18:51 +00006358
Andrey Ignatov956b6202018-09-26 15:24:53 -07006359#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006360#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07006361#undef BPF_APROG_SEC
6362#undef BPF_EAPROG_SEC
6363#undef BPF_APROG_COMPAT
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006364#undef SEC_DEF
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006365
Taeung Songc76e4c22019-01-21 22:06:38 +09006366#define MAX_TYPE_NAME_SIZE 32
6367
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006368static const struct bpf_sec_def *find_sec_def(const char *sec_name)
6369{
6370 int i, n = ARRAY_SIZE(section_defs);
6371
6372 for (i = 0; i < n; i++) {
6373 if (strncmp(sec_name,
6374 section_defs[i].sec, section_defs[i].len))
6375 continue;
6376 return &section_defs[i];
6377 }
6378 return NULL;
6379}
6380
Taeung Songc76e4c22019-01-21 22:06:38 +09006381static char *libbpf_get_type_names(bool attach_type)
6382{
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006383 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
Taeung Songc76e4c22019-01-21 22:06:38 +09006384 char *buf;
6385
6386 buf = malloc(len);
6387 if (!buf)
6388 return NULL;
6389
6390 buf[0] = '\0';
6391 /* Forge string buf with all available names */
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006392 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
6393 if (attach_type && !section_defs[i].is_attachable)
Taeung Songc76e4c22019-01-21 22:06:38 +09006394 continue;
6395
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006396 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
Taeung Songc76e4c22019-01-21 22:06:38 +09006397 free(buf);
6398 return NULL;
6399 }
6400 strcat(buf, " ");
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006401 strcat(buf, section_defs[i].sec);
Taeung Songc76e4c22019-01-21 22:06:38 +09006402 }
6403
6404 return buf;
6405}
6406
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07006407int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
6408 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00006409{
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006410 const struct bpf_sec_def *sec_def;
Taeung Songc76e4c22019-01-21 22:06:38 +09006411 char *type_names;
Roman Gushchin583c9002017-12-13 15:18:51 +00006412
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07006413 if (!name)
6414 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00006415
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006416 sec_def = find_sec_def(name);
6417 if (sec_def) {
6418 *prog_type = sec_def->prog_type;
6419 *expected_attach_type = sec_def->expected_attach_type;
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07006420 return 0;
6421 }
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006422
Andrii Nakryiko4a3d6c62019-12-17 15:42:28 -08006423 pr_debug("failed to guess program type from ELF section '%s'\n", name);
Taeung Songc76e4c22019-01-21 22:06:38 +09006424 type_names = libbpf_get_type_names(false);
6425 if (type_names != NULL) {
Andrii Nakryiko3f519352019-12-13 17:43:35 -08006426 pr_debug("supported section(type) names are:%s\n", type_names);
Taeung Songc76e4c22019-01-21 22:06:38 +09006427 free(type_names);
6428 }
6429
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006430 return -ESRCH;
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07006431}
Roman Gushchin583c9002017-12-13 15:18:51 +00006432
Martin KaFai Lau590a0082020-01-08 16:35:14 -08006433static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
6434 size_t offset)
6435{
6436 struct bpf_map *map;
6437 size_t i;
6438
6439 for (i = 0; i < obj->nr_maps; i++) {
6440 map = &obj->maps[i];
6441 if (!bpf_map__is_struct_ops(map))
6442 continue;
6443 if (map->sec_offset <= offset &&
6444 offset - map->sec_offset < map->def.value_size)
6445 return map;
6446 }
6447
6448 return NULL;
6449}
6450
6451/* Collect the reloc from ELF and populate the st_ops->progs[] */
6452static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
6453 GElf_Shdr *shdr,
6454 Elf_Data *data)
6455{
6456 const struct btf_member *member;
6457 struct bpf_struct_ops *st_ops;
6458 struct bpf_program *prog;
6459 unsigned int shdr_idx;
6460 const struct btf *btf;
6461 struct bpf_map *map;
6462 Elf_Data *symbols;
6463 unsigned int moff;
6464 const char *name;
Andrii Nakryiko1d1a3bc2020-01-10 10:19:16 -08006465 __u32 member_idx;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08006466 GElf_Sym sym;
6467 GElf_Rel rel;
6468 int i, nrels;
6469
6470 symbols = obj->efile.symbols;
6471 btf = obj->btf;
6472 nrels = shdr->sh_size / shdr->sh_entsize;
6473 for (i = 0; i < nrels; i++) {
6474 if (!gelf_getrel(data, i, &rel)) {
6475 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
6476 return -LIBBPF_ERRNO__FORMAT;
6477 }
6478
6479 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6480 pr_warn("struct_ops reloc: symbol %zx not found\n",
6481 (size_t)GELF_R_SYM(rel.r_info));
6482 return -LIBBPF_ERRNO__FORMAT;
6483 }
6484
6485 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
6486 sym.st_name) ? : "<?>";
6487 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
6488 if (!map) {
6489 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
6490 (size_t)rel.r_offset);
6491 return -EINVAL;
6492 }
6493
6494 moff = rel.r_offset - map->sec_offset;
6495 shdr_idx = sym.st_shndx;
6496 st_ops = map->st_ops;
6497 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
6498 map->name,
6499 (long long)(rel.r_info >> 32),
6500 (long long)sym.st_value,
6501 shdr_idx, (size_t)rel.r_offset,
6502 map->sec_offset, sym.st_name, name);
6503
6504 if (shdr_idx >= SHN_LORESERVE) {
6505 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
6506 map->name, (size_t)rel.r_offset, shdr_idx);
6507 return -LIBBPF_ERRNO__RELOC;
6508 }
6509
6510 member = find_member_by_offset(st_ops->type, moff * 8);
6511 if (!member) {
6512 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
6513 map->name, moff);
6514 return -EINVAL;
6515 }
6516 member_idx = member - btf_members(st_ops->type);
6517 name = btf__name_by_offset(btf, member->name_off);
6518
6519 if (!resolve_func_ptr(btf, member->type, NULL)) {
6520 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
6521 map->name, name);
6522 return -EINVAL;
6523 }
6524
6525 prog = bpf_object__find_prog_by_idx(obj, shdr_idx);
6526 if (!prog) {
6527 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
6528 map->name, shdr_idx, name);
6529 return -EINVAL;
6530 }
6531
6532 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6533 const struct bpf_sec_def *sec_def;
6534
6535 sec_def = find_sec_def(prog->section_name);
6536 if (sec_def &&
6537 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
6538 /* for pr_warn */
6539 prog->type = sec_def->prog_type;
6540 goto invalid_prog;
6541 }
6542
6543 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
6544 prog->attach_btf_id = st_ops->type_id;
6545 prog->expected_attach_type = member_idx;
6546 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
6547 prog->attach_btf_id != st_ops->type_id ||
6548 prog->expected_attach_type != member_idx) {
6549 goto invalid_prog;
6550 }
6551 st_ops->progs[member_idx] = prog;
6552 }
6553
6554 return 0;
6555
6556invalid_prog:
6557 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
6558 map->name, prog->name, prog->section_name, prog->type,
6559 prog->attach_btf_id, prog->expected_attach_type, name);
6560 return -EINVAL;
6561}
6562
KP Singha6ed02c2020-01-17 22:28:25 +01006563#define BTF_TRACE_PREFIX "btf_trace_"
6564#define BTF_MAX_NAME_SIZE 128
6565
6566static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
6567 const char *name, __u32 kind)
6568{
6569 char btf_type_name[BTF_MAX_NAME_SIZE];
6570 int ret;
6571
6572 ret = snprintf(btf_type_name, sizeof(btf_type_name),
6573 "%s%s", prefix, name);
6574 /* snprintf returns the number of characters written excluding the
6575 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
6576 * indicates truncation.
6577 */
6578 if (ret < 0 || ret >= sizeof(btf_type_name))
6579 return -ENAMETOOLONG;
6580 return btf__find_by_name_kind(btf, btf_type_name, kind);
6581}
6582
6583static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
6584 enum bpf_attach_type attach_type)
6585{
6586 int err;
6587
6588 if (attach_type == BPF_TRACE_RAW_TP)
6589 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
6590 BTF_KIND_TYPEDEF);
6591 else
6592 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
6593
Eelco Chaudronff26ce52020-02-20 13:26:35 +00006594 if (err <= 0)
6595 pr_warn("%s is not found in vmlinux BTF\n", name);
6596
KP Singha6ed02c2020-01-17 22:28:25 +01006597 return err;
6598}
6599
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006600int libbpf_find_vmlinux_btf_id(const char *name,
6601 enum bpf_attach_type attach_type)
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006602{
KP Singha6ed02c2020-01-17 22:28:25 +01006603 struct btf *btf;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006604
KP Singha6ed02c2020-01-17 22:28:25 +01006605 btf = libbpf_find_kernel_btf();
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006606 if (IS_ERR(btf)) {
6607 pr_warn("vmlinux BTF is not found\n");
6608 return -EINVAL;
6609 }
6610
KP Singha6ed02c2020-01-17 22:28:25 +01006611 return __find_vmlinux_btf_id(btf, name, attach_type);
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006612}
6613
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08006614static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
6615{
6616 struct bpf_prog_info_linear *info_linear;
6617 struct bpf_prog_info *info;
6618 struct btf *btf = NULL;
6619 int err = -EINVAL;
6620
6621 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
6622 if (IS_ERR_OR_NULL(info_linear)) {
6623 pr_warn("failed get_prog_info_linear for FD %d\n",
6624 attach_prog_fd);
6625 return -EINVAL;
6626 }
6627 info = &info_linear->info;
6628 if (!info->btf_id) {
6629 pr_warn("The target program doesn't have BTF\n");
6630 goto out;
6631 }
6632 if (btf__get_from_id(info->btf_id, &btf)) {
6633 pr_warn("Failed to get BTF of the program\n");
6634 goto out;
6635 }
6636 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
6637 btf__free(btf);
6638 if (err <= 0) {
6639 pr_warn("%s is not found in prog's BTF\n", name);
6640 goto out;
6641 }
6642out:
6643 free(info_linear);
6644 return err;
6645}
6646
KP Singha6ed02c2020-01-17 22:28:25 +01006647static int libbpf_find_attach_btf_id(struct bpf_program *prog)
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006648{
KP Singha6ed02c2020-01-17 22:28:25 +01006649 enum bpf_attach_type attach_type = prog->expected_attach_type;
6650 __u32 attach_prog_fd = prog->attach_prog_fd;
6651 const char *name = prog->section_name;
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006652 int i, err;
6653
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006654 if (!name)
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006655 return -EINVAL;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006656
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006657 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
6658 if (!section_defs[i].is_attach_btf)
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006659 continue;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006660 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006661 continue;
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08006662 if (attach_prog_fd)
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006663 err = libbpf_find_prog_btf_id(name + section_defs[i].len,
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08006664 attach_prog_fd);
6665 else
KP Singha6ed02c2020-01-17 22:28:25 +01006666 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
6667 name + section_defs[i].len,
6668 attach_type);
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006669 return err;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006670 }
6671 pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006672 return -ESRCH;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07006673}
6674
Andrey Ignatov956b6202018-09-26 15:24:53 -07006675int libbpf_attach_type_by_name(const char *name,
6676 enum bpf_attach_type *attach_type)
6677{
Taeung Songc76e4c22019-01-21 22:06:38 +09006678 char *type_names;
Andrey Ignatov956b6202018-09-26 15:24:53 -07006679 int i;
6680
6681 if (!name)
6682 return -EINVAL;
6683
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006684 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
6685 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
Andrey Ignatov956b6202018-09-26 15:24:53 -07006686 continue;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006687 if (!section_defs[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07006688 return -EINVAL;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006689 *attach_type = section_defs[i].attach_type;
Andrey Ignatov956b6202018-09-26 15:24:53 -07006690 return 0;
6691 }
Andrii Nakryiko4a3d6c62019-12-17 15:42:28 -08006692 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
Taeung Songc76e4c22019-01-21 22:06:38 +09006693 type_names = libbpf_get_type_names(true);
6694 if (type_names != NULL) {
Andrii Nakryiko4a3d6c62019-12-17 15:42:28 -08006695 pr_debug("attachable section(type) names are:%s\n", type_names);
Taeung Songc76e4c22019-01-21 22:06:38 +09006696 free(type_names);
6697 }
6698
Andrey Ignatov956b6202018-09-26 15:24:53 -07006699 return -EINVAL;
6700}
6701
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006702int bpf_map__fd(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00006703{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03006704 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00006705}
6706
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006707const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00006708{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03006709 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00006710}
6711
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006712const char *bpf_map__name(const struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00006713{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03006714 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00006715}
6716
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07006717__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07006718{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07006719 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07006720}
6721
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07006722__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07006723{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07006724 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07006725}
6726
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03006727int bpf_map__set_priv(struct bpf_map *map, void *priv,
6728 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00006729{
6730 if (!map)
6731 return -EINVAL;
6732
6733 if (map->priv) {
6734 if (map->clear_priv)
6735 map->clear_priv(map, map->priv);
6736 }
6737
6738 map->priv = priv;
6739 map->clear_priv = clear_priv;
6740 return 0;
6741}
6742
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006743void *bpf_map__priv(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00006744{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03006745 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00006746}
6747
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006748bool bpf_map__is_offload_neutral(const struct bpf_map *map)
Jakub Kicinskif83fb222018-07-10 14:43:01 -07006749{
6750 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
6751}
6752
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006753bool bpf_map__is_internal(const struct bpf_map *map)
Daniel Borkmannd8599002019-04-09 23:20:13 +02006754{
6755 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
6756}
6757
Jakub Kicinski9aba3612018-06-28 14:41:37 -07006758void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
6759{
6760 map->map_ifindex = ifindex;
6761}
6762
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08006763int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
6764{
6765 if (!bpf_map_type__is_map_in_map(map->def.type)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006766 pr_warn("error: unsupported map type\n");
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08006767 return -EINVAL;
6768 }
6769 if (map->inner_map_fd != -1) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006770 pr_warn("error: inner_map_fd already specified\n");
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08006771 return -EINVAL;
6772 }
6773 map->inner_map_fd = fd;
6774 return 0;
6775}
6776
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006777static struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006778__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00006779{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006780 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00006781 struct bpf_map *s, *e;
6782
6783 if (!obj || !obj->maps)
6784 return NULL;
6785
6786 s = obj->maps;
6787 e = obj->maps + obj->nr_maps;
6788
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006789 if ((m < s) || (m >= e)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006790 pr_warn("error in %s: map handler doesn't belong to object\n",
6791 __func__);
Wang Nan9d759a92015-11-27 08:47:35 +00006792 return NULL;
6793 }
6794
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006795 idx = (m - obj->maps) + i;
6796 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00006797 return NULL;
6798 return &obj->maps[idx];
6799}
Wang Nan561bbcc2015-11-27 08:47:36 +00006800
6801struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006802bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006803{
6804 if (prev == NULL)
6805 return obj->maps;
6806
6807 return __bpf_map__iter(prev, obj, 1);
6808}
6809
6810struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006811bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006812{
6813 if (next == NULL) {
6814 if (!obj->nr_maps)
6815 return NULL;
6816 return obj->maps + obj->nr_maps - 1;
6817 }
6818
6819 return __bpf_map__iter(next, obj, -1);
6820}
6821
6822struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006823bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00006824{
6825 struct bpf_map *pos;
6826
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08006827 bpf_object__for_each_map(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00006828 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00006829 return pos;
6830 }
6831 return NULL;
6832}
Wang Nan5a6acad2016-11-26 07:03:27 +00006833
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01006834int
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006835bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01006836{
6837 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
6838}
6839
Wang Nan5a6acad2016-11-26 07:03:27 +00006840struct bpf_map *
6841bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
6842{
Andrii Nakryikodb488142019-06-17 12:26:54 -07006843 return ERR_PTR(-ENOTSUP);
Wang Nan5a6acad2016-11-26 07:03:27 +00006844}
Joe Stringere28ff1a2017-01-22 17:11:25 -08006845
6846long libbpf_get_error(const void *ptr)
6847{
Hariprasad Kelamd98363b2019-05-25 14:32:57 +05306848 return PTR_ERR_OR_ZERO(ptr);
Joe Stringere28ff1a2017-01-22 17:11:25 -08006849}
John Fastabend6f6d33f2017-08-15 22:34:22 -07006850
6851int bpf_prog_load(const char *file, enum bpf_prog_type type,
6852 struct bpf_object **pobj, int *prog_fd)
6853{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006854 struct bpf_prog_load_attr attr;
6855
6856 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
6857 attr.file = file;
6858 attr.prog_type = type;
6859 attr.expected_attach_type = 0;
6860
6861 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
6862}
6863
6864int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
6865 struct bpf_object **pobj, int *prog_fd)
6866{
Leo Yan33bae182019-07-02 18:25:31 +08006867 struct bpf_object_open_attr open_attr = {};
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006868 struct bpf_program *prog, *first_prog = NULL;
John Fastabend6f6d33f2017-08-15 22:34:22 -07006869 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07006870 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07006871 int err;
6872
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006873 if (!attr)
6874 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07006875 if (!attr->file)
6876 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006877
Leo Yan33bae182019-07-02 18:25:31 +08006878 open_attr.file = attr->file;
6879 open_attr.prog_type = attr->prog_type;
6880
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07006881 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07006882 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07006883 return -ENOENT;
6884
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006885 bpf_object__for_each_program(prog, obj) {
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006886 enum bpf_attach_type attach_type = attr->expected_attach_type;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006887 /*
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006888 * to preserve backwards compatibility, bpf_prog_load treats
6889 * attr->prog_type, if specified, as an override to whatever
6890 * bpf_object__open guessed
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006891 */
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006892 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
6893 bpf_program__set_type(prog, attr->prog_type);
6894 bpf_program__set_expected_attach_type(prog,
6895 attach_type);
6896 }
6897 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
6898 /*
6899 * we haven't guessed from section name and user
6900 * didn't provide a fallback type, too bad...
6901 */
6902 bpf_object__close(obj);
6903 return -EINVAL;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006904 }
6905
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006906 prog->prog_ifindex = attr->ifindex;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07006907 prog->log_level = attr->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01006908 prog->prog_flags = attr->prog_flags;
Taeung Song69495d22018-09-03 08:30:07 +09006909 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006910 first_prog = prog;
6911 }
6912
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08006913 bpf_object__for_each_map(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07006914 if (!bpf_map__is_offload_neutral(map))
6915 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07006916 }
6917
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006918 if (!first_prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006919 pr_warn("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07006920 bpf_object__close(obj);
6921 return -ENOENT;
6922 }
6923
John Fastabend6f6d33f2017-08-15 22:34:22 -07006924 err = bpf_object__load(obj);
6925 if (err) {
6926 bpf_object__close(obj);
6927 return -EINVAL;
6928 }
6929
6930 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006931 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07006932 return 0;
6933}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006934
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006935struct bpf_link {
Andrii Nakryikod6958702019-12-18 14:50:39 -08006936 int (*detach)(struct bpf_link *link);
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006937 int (*destroy)(struct bpf_link *link);
Andrii Nakryikoc016b682020-03-02 20:31:58 -08006938 char *pin_path; /* NULL, if not pinned */
6939 int fd; /* hook FD, -1 if not applicable */
Andrii Nakryikod6958702019-12-18 14:50:39 -08006940 bool disconnected;
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006941};
6942
Andrii Nakryikod6958702019-12-18 14:50:39 -08006943/* Release "ownership" of underlying BPF resource (typically, BPF program
6944 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
6945 * link, when destructed through bpf_link__destroy() call won't attempt to
6946 * detach/unregisted that BPF resource. This is useful in situations where,
6947 * say, attached BPF program has to outlive userspace program that attached it
6948 * in the system. Depending on type of BPF program, though, there might be
6949 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
6950 * exit of userspace program doesn't trigger automatic detachment and clean up
6951 * inside the kernel.
6952 */
6953void bpf_link__disconnect(struct bpf_link *link)
6954{
6955 link->disconnected = true;
6956}
6957
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006958int bpf_link__destroy(struct bpf_link *link)
6959{
Andrii Nakryikod6958702019-12-18 14:50:39 -08006960 int err = 0;
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006961
6962 if (!link)
6963 return 0;
6964
Andrii Nakryikod6958702019-12-18 14:50:39 -08006965 if (!link->disconnected && link->detach)
6966 err = link->detach(link);
6967 if (link->destroy)
6968 link->destroy(link);
Andrii Nakryikoc016b682020-03-02 20:31:58 -08006969 if (link->pin_path)
6970 free(link->pin_path);
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006971 free(link);
6972
6973 return err;
6974}
6975
Andrii Nakryikoc016b682020-03-02 20:31:58 -08006976int bpf_link__fd(const struct bpf_link *link)
6977{
6978 return link->fd;
6979}
6980
6981const char *bpf_link__pin_path(const struct bpf_link *link)
6982{
6983 return link->pin_path;
6984}
6985
6986static int bpf_link__detach_fd(struct bpf_link *link)
6987{
6988 return close(link->fd);
6989}
6990
6991struct bpf_link *bpf_link__open(const char *path)
6992{
6993 struct bpf_link *link;
6994 int fd;
6995
6996 fd = bpf_obj_get(path);
6997 if (fd < 0) {
6998 fd = -errno;
6999 pr_warn("failed to open link at %s: %d\n", path, fd);
7000 return ERR_PTR(fd);
7001 }
7002
7003 link = calloc(1, sizeof(*link));
7004 if (!link) {
7005 close(fd);
7006 return ERR_PTR(-ENOMEM);
7007 }
7008 link->detach = &bpf_link__detach_fd;
7009 link->fd = fd;
7010
7011 link->pin_path = strdup(path);
7012 if (!link->pin_path) {
7013 bpf_link__destroy(link);
7014 return ERR_PTR(-ENOMEM);
7015 }
7016
7017 return link;
7018}
7019
7020int bpf_link__pin(struct bpf_link *link, const char *path)
7021{
7022 int err;
7023
7024 if (link->pin_path)
7025 return -EBUSY;
7026 err = make_parent_dir(path);
7027 if (err)
7028 return err;
7029 err = check_path(path);
7030 if (err)
7031 return err;
7032
7033 link->pin_path = strdup(path);
7034 if (!link->pin_path)
7035 return -ENOMEM;
7036
7037 if (bpf_obj_pin(link->fd, link->pin_path)) {
7038 err = -errno;
7039 zfree(&link->pin_path);
7040 return err;
7041 }
7042
7043 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
7044 return 0;
7045}
7046
7047int bpf_link__unpin(struct bpf_link *link)
7048{
7049 int err;
7050
7051 if (!link->pin_path)
7052 return -EINVAL;
7053
7054 err = unlink(link->pin_path);
7055 if (err != 0)
7056 return -errno;
7057
7058 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
7059 zfree(&link->pin_path);
7060 return 0;
7061}
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007062
Andrii Nakryikod6958702019-12-18 14:50:39 -08007063static int bpf_link__detach_perf_event(struct bpf_link *link)
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007064{
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007065 int err;
7066
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007067 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007068 if (err)
7069 err = -errno;
7070
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007071 close(link->fd);
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007072 return err;
7073}
7074
7075struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
7076 int pfd)
7077{
7078 char errmsg[STRERR_BUFSIZE];
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007079 struct bpf_link *link;
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007080 int prog_fd, err;
7081
7082 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007083 pr_warn("program '%s': invalid perf event FD %d\n",
7084 bpf_program__title(prog, false), pfd);
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007085 return ERR_PTR(-EINVAL);
7086 }
7087 prog_fd = bpf_program__fd(prog);
7088 if (prog_fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007089 pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
7090 bpf_program__title(prog, false));
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007091 return ERR_PTR(-EINVAL);
7092 }
7093
Andrii Nakryikod6958702019-12-18 14:50:39 -08007094 link = calloc(1, sizeof(*link));
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007095 if (!link)
7096 return ERR_PTR(-ENOMEM);
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007097 link->detach = &bpf_link__detach_perf_event;
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007098 link->fd = pfd;
7099
7100 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
7101 err = -errno;
7102 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08007103 pr_warn("program '%s': failed to attach to pfd %d: %s\n",
7104 bpf_program__title(prog, false), pfd,
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007105 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7106 return ERR_PTR(err);
7107 }
7108 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
7109 err = -errno;
7110 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08007111 pr_warn("program '%s': failed to enable pfd %d: %s\n",
7112 bpf_program__title(prog, false), pfd,
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007113 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
7114 return ERR_PTR(err);
7115 }
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007116 return link;
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07007117}
7118
Andrii Nakryikob2650022019-07-01 16:58:58 -07007119/*
7120 * this function is expected to parse integer in the range of [0, 2^31-1] from
7121 * given file using scanf format string fmt. If actual parsed value is
7122 * negative, the result might be indistinguishable from error
7123 */
7124static int parse_uint_from_file(const char *file, const char *fmt)
7125{
7126 char buf[STRERR_BUFSIZE];
7127 int err, ret;
7128 FILE *f;
7129
7130 f = fopen(file, "r");
7131 if (!f) {
7132 err = -errno;
7133 pr_debug("failed to open '%s': %s\n", file,
7134 libbpf_strerror_r(err, buf, sizeof(buf)));
7135 return err;
7136 }
7137 err = fscanf(f, fmt, &ret);
7138 if (err != 1) {
7139 err = err == EOF ? -EIO : -errno;
7140 pr_debug("failed to parse '%s': %s\n", file,
7141 libbpf_strerror_r(err, buf, sizeof(buf)));
7142 fclose(f);
7143 return err;
7144 }
7145 fclose(f);
7146 return ret;
7147}
7148
7149static int determine_kprobe_perf_type(void)
7150{
7151 const char *file = "/sys/bus/event_source/devices/kprobe/type";
7152
7153 return parse_uint_from_file(file, "%d\n");
7154}
7155
7156static int determine_uprobe_perf_type(void)
7157{
7158 const char *file = "/sys/bus/event_source/devices/uprobe/type";
7159
7160 return parse_uint_from_file(file, "%d\n");
7161}
7162
7163static int determine_kprobe_retprobe_bit(void)
7164{
7165 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
7166
7167 return parse_uint_from_file(file, "config:%d\n");
7168}
7169
7170static int determine_uprobe_retprobe_bit(void)
7171{
7172 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
7173
7174 return parse_uint_from_file(file, "config:%d\n");
7175}
7176
7177static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
7178 uint64_t offset, int pid)
7179{
7180 struct perf_event_attr attr = {};
7181 char errmsg[STRERR_BUFSIZE];
7182 int type, pfd, err;
7183
7184 type = uprobe ? determine_uprobe_perf_type()
7185 : determine_kprobe_perf_type();
7186 if (type < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007187 pr_warn("failed to determine %s perf type: %s\n",
7188 uprobe ? "uprobe" : "kprobe",
7189 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007190 return type;
7191 }
7192 if (retprobe) {
7193 int bit = uprobe ? determine_uprobe_retprobe_bit()
7194 : determine_kprobe_retprobe_bit();
7195
7196 if (bit < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007197 pr_warn("failed to determine %s retprobe bit: %s\n",
7198 uprobe ? "uprobe" : "kprobe",
7199 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007200 return bit;
7201 }
7202 attr.config |= 1 << bit;
7203 }
7204 attr.size = sizeof(attr);
7205 attr.type = type;
Andrii Nakryiko36db2a92019-07-08 21:00:07 -07007206 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
7207 attr.config2 = offset; /* kprobe_addr or probe_offset */
Andrii Nakryikob2650022019-07-01 16:58:58 -07007208
7209 /* pid filter is meaningful only for uprobes */
7210 pfd = syscall(__NR_perf_event_open, &attr,
7211 pid < 0 ? -1 : pid /* pid */,
7212 pid == -1 ? 0 : -1 /* cpu */,
7213 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
7214 if (pfd < 0) {
7215 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007216 pr_warn("%s perf_event_open() failed: %s\n",
7217 uprobe ? "uprobe" : "kprobe",
7218 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007219 return err;
7220 }
7221 return pfd;
7222}
7223
7224struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
7225 bool retprobe,
7226 const char *func_name)
7227{
7228 char errmsg[STRERR_BUFSIZE];
7229 struct bpf_link *link;
7230 int pfd, err;
7231
7232 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
7233 0 /* offset */, -1 /* pid */);
7234 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007235 pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
7236 bpf_program__title(prog, false),
7237 retprobe ? "kretprobe" : "kprobe", func_name,
7238 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007239 return ERR_PTR(pfd);
7240 }
7241 link = bpf_program__attach_perf_event(prog, pfd);
7242 if (IS_ERR(link)) {
7243 close(pfd);
7244 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08007245 pr_warn("program '%s': failed to attach to %s '%s': %s\n",
7246 bpf_program__title(prog, false),
7247 retprobe ? "kretprobe" : "kprobe", func_name,
7248 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007249 return link;
7250 }
7251 return link;
7252}
7253
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08007254static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
7255 struct bpf_program *prog)
7256{
7257 const char *func_name;
7258 bool retprobe;
7259
7260 func_name = bpf_program__title(prog, false) + sec->len;
7261 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
7262
7263 return bpf_program__attach_kprobe(prog, retprobe, func_name);
7264}
7265
Andrii Nakryikob2650022019-07-01 16:58:58 -07007266struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
7267 bool retprobe, pid_t pid,
7268 const char *binary_path,
7269 size_t func_offset)
7270{
7271 char errmsg[STRERR_BUFSIZE];
7272 struct bpf_link *link;
7273 int pfd, err;
7274
7275 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
7276 binary_path, func_offset, pid);
7277 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007278 pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
7279 bpf_program__title(prog, false),
7280 retprobe ? "uretprobe" : "uprobe",
7281 binary_path, func_offset,
7282 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007283 return ERR_PTR(pfd);
7284 }
7285 link = bpf_program__attach_perf_event(prog, pfd);
7286 if (IS_ERR(link)) {
7287 close(pfd);
7288 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08007289 pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
7290 bpf_program__title(prog, false),
7291 retprobe ? "uretprobe" : "uprobe",
7292 binary_path, func_offset,
7293 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07007294 return link;
7295 }
7296 return link;
7297}
7298
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07007299static int determine_tracepoint_id(const char *tp_category,
7300 const char *tp_name)
7301{
7302 char file[PATH_MAX];
7303 int ret;
7304
7305 ret = snprintf(file, sizeof(file),
7306 "/sys/kernel/debug/tracing/events/%s/%s/id",
7307 tp_category, tp_name);
7308 if (ret < 0)
7309 return -errno;
7310 if (ret >= sizeof(file)) {
7311 pr_debug("tracepoint %s/%s path is too long\n",
7312 tp_category, tp_name);
7313 return -E2BIG;
7314 }
7315 return parse_uint_from_file(file, "%d\n");
7316}
7317
7318static int perf_event_open_tracepoint(const char *tp_category,
7319 const char *tp_name)
7320{
7321 struct perf_event_attr attr = {};
7322 char errmsg[STRERR_BUFSIZE];
7323 int tp_id, pfd, err;
7324
7325 tp_id = determine_tracepoint_id(tp_category, tp_name);
7326 if (tp_id < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007327 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
7328 tp_category, tp_name,
7329 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07007330 return tp_id;
7331 }
7332
7333 attr.type = PERF_TYPE_TRACEPOINT;
7334 attr.size = sizeof(attr);
7335 attr.config = tp_id;
7336
7337 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
7338 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
7339 if (pfd < 0) {
7340 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007341 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
7342 tp_category, tp_name,
7343 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07007344 return err;
7345 }
7346 return pfd;
7347}
7348
7349struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
7350 const char *tp_category,
7351 const char *tp_name)
7352{
7353 char errmsg[STRERR_BUFSIZE];
7354 struct bpf_link *link;
7355 int pfd, err;
7356
7357 pfd = perf_event_open_tracepoint(tp_category, tp_name);
7358 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007359 pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
7360 bpf_program__title(prog, false),
7361 tp_category, tp_name,
7362 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07007363 return ERR_PTR(pfd);
7364 }
7365 link = bpf_program__attach_perf_event(prog, pfd);
7366 if (IS_ERR(link)) {
7367 close(pfd);
7368 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08007369 pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
7370 bpf_program__title(prog, false),
7371 tp_category, tp_name,
7372 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07007373 return link;
7374 }
7375 return link;
7376}
7377
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08007378static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
7379 struct bpf_program *prog)
7380{
7381 char *sec_name, *tp_cat, *tp_name;
7382 struct bpf_link *link;
7383
7384 sec_name = strdup(bpf_program__title(prog, false));
7385 if (!sec_name)
7386 return ERR_PTR(-ENOMEM);
7387
7388 /* extract "tp/<category>/<name>" */
7389 tp_cat = sec_name + sec->len;
7390 tp_name = strchr(tp_cat, '/');
7391 if (!tp_name) {
7392 link = ERR_PTR(-EINVAL);
7393 goto out;
7394 }
7395 *tp_name = '\0';
7396 tp_name++;
7397
7398 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
7399out:
7400 free(sec_name);
7401 return link;
7402}
7403
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007404struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
7405 const char *tp_name)
7406{
7407 char errmsg[STRERR_BUFSIZE];
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007408 struct bpf_link *link;
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007409 int prog_fd, pfd;
7410
7411 prog_fd = bpf_program__fd(prog);
7412 if (prog_fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007413 pr_warn("program '%s': can't attach before loaded\n",
7414 bpf_program__title(prog, false));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007415 return ERR_PTR(-EINVAL);
7416 }
7417
Andrii Nakryikod6958702019-12-18 14:50:39 -08007418 link = calloc(1, sizeof(*link));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007419 if (!link)
7420 return ERR_PTR(-ENOMEM);
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007421 link->detach = &bpf_link__detach_fd;
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007422
7423 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
7424 if (pfd < 0) {
7425 pfd = -errno;
7426 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08007427 pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
7428 bpf_program__title(prog, false), tp_name,
7429 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007430 return ERR_PTR(pfd);
7431 }
7432 link->fd = pfd;
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007433 return link;
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07007434}
7435
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08007436static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
7437 struct bpf_program *prog)
7438{
7439 const char *tp_name = bpf_program__title(prog, false) + sec->len;
7440
7441 return bpf_program__attach_raw_tracepoint(prog, tp_name);
7442}
7443
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08007444struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
7445{
7446 char errmsg[STRERR_BUFSIZE];
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007447 struct bpf_link *link;
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08007448 int prog_fd, pfd;
7449
7450 prog_fd = bpf_program__fd(prog);
7451 if (prog_fd < 0) {
7452 pr_warn("program '%s': can't attach before loaded\n",
7453 bpf_program__title(prog, false));
7454 return ERR_PTR(-EINVAL);
7455 }
7456
Andrii Nakryikod6958702019-12-18 14:50:39 -08007457 link = calloc(1, sizeof(*link));
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08007458 if (!link)
7459 return ERR_PTR(-ENOMEM);
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007460 link->detach = &bpf_link__detach_fd;
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08007461
7462 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
7463 if (pfd < 0) {
7464 pfd = -errno;
7465 free(link);
7466 pr_warn("program '%s': failed to attach to trace: %s\n",
7467 bpf_program__title(prog, false),
7468 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
7469 return ERR_PTR(pfd);
7470 }
7471 link->fd = pfd;
7472 return (struct bpf_link *)link;
7473}
7474
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08007475static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
7476 struct bpf_program *prog)
7477{
7478 return bpf_program__attach_trace(prog);
7479}
7480
7481struct bpf_link *bpf_program__attach(struct bpf_program *prog)
7482{
7483 const struct bpf_sec_def *sec_def;
7484
7485 sec_def = find_sec_def(bpf_program__title(prog, false));
7486 if (!sec_def || !sec_def->attach_fn)
7487 return ERR_PTR(-ESRCH);
7488
7489 return sec_def->attach_fn(sec_def, prog);
7490}
7491
Martin KaFai Lau590a0082020-01-08 16:35:14 -08007492static int bpf_link__detach_struct_ops(struct bpf_link *link)
7493{
Martin KaFai Lau590a0082020-01-08 16:35:14 -08007494 __u32 zero = 0;
7495
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007496 if (bpf_map_delete_elem(link->fd, &zero))
Martin KaFai Lau590a0082020-01-08 16:35:14 -08007497 return -errno;
7498
7499 return 0;
7500}
7501
7502struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
7503{
7504 struct bpf_struct_ops *st_ops;
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007505 struct bpf_link *link;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08007506 __u32 i, zero = 0;
7507 int err;
7508
7509 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
7510 return ERR_PTR(-EINVAL);
7511
7512 link = calloc(1, sizeof(*link));
7513 if (!link)
7514 return ERR_PTR(-EINVAL);
7515
7516 st_ops = map->st_ops;
7517 for (i = 0; i < btf_vlen(st_ops->type); i++) {
7518 struct bpf_program *prog = st_ops->progs[i];
7519 void *kern_data;
7520 int prog_fd;
7521
7522 if (!prog)
7523 continue;
7524
7525 prog_fd = bpf_program__fd(prog);
7526 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
7527 *(unsigned long *)kern_data = prog_fd;
7528 }
7529
7530 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
7531 if (err) {
7532 err = -errno;
7533 free(link);
7534 return ERR_PTR(err);
7535 }
7536
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007537 link->detach = bpf_link__detach_struct_ops;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08007538 link->fd = map->fd;
7539
Andrii Nakryikoc016b682020-03-02 20:31:58 -08007540 return link;
Martin KaFai Lau590a0082020-01-08 16:35:14 -08007541}
7542
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007543enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007544bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
7545 void **copy_mem, size_t *copy_size,
7546 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007547{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007548 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02007549 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007550 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007551 void *base = ((__u8 *)header) + page_size;
7552 int ret = LIBBPF_PERF_EVENT_CONT;
7553 struct perf_event_header *ehdr;
7554 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007555
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007556 while (data_head != data_tail) {
7557 ehdr = base + (data_tail & (mmap_size - 1));
7558 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007559
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007560 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
7561 void *copy_start = ehdr;
7562 size_t len_first = base + mmap_size - copy_start;
7563 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007564
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007565 if (*copy_size < ehdr_size) {
7566 free(*copy_mem);
7567 *copy_mem = malloc(ehdr_size);
7568 if (!*copy_mem) {
7569 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007570 ret = LIBBPF_PERF_EVENT_ERROR;
7571 break;
7572 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007573 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007574 }
7575
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007576 memcpy(*copy_mem, copy_start, len_first);
7577 memcpy(*copy_mem + len_first, base, len_secnd);
7578 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007579 }
7580
Daniel Borkmann3dca2112018-10-21 02:09:28 +02007581 ret = fn(ehdr, private_data);
7582 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007583 if (ret != LIBBPF_PERF_EVENT_CONT)
7584 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007585 }
7586
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02007587 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07007588 return ret;
7589}
Song Liu34be16462019-03-11 22:30:38 -07007590
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007591struct perf_buffer;
7592
7593struct perf_buffer_params {
7594 struct perf_event_attr *attr;
7595 /* if event_cb is specified, it takes precendence */
7596 perf_buffer_event_fn event_cb;
7597 /* sample_cb and lost_cb are higher-level common-case callbacks */
7598 perf_buffer_sample_fn sample_cb;
7599 perf_buffer_lost_fn lost_cb;
7600 void *ctx;
7601 int cpu_cnt;
7602 int *cpus;
7603 int *map_keys;
7604};
7605
7606struct perf_cpu_buf {
7607 struct perf_buffer *pb;
7608 void *base; /* mmap()'ed memory */
7609 void *buf; /* for reconstructing segmented data */
7610 size_t buf_size;
7611 int fd;
7612 int cpu;
7613 int map_key;
7614};
7615
7616struct perf_buffer {
7617 perf_buffer_event_fn event_cb;
7618 perf_buffer_sample_fn sample_cb;
7619 perf_buffer_lost_fn lost_cb;
7620 void *ctx; /* passed into callbacks */
7621
7622 size_t page_size;
7623 size_t mmap_size;
7624 struct perf_cpu_buf **cpu_bufs;
7625 struct epoll_event *events;
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007626 int cpu_cnt; /* number of allocated CPU buffers */
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007627 int epoll_fd; /* perf event FD */
7628 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
7629};
7630
7631static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
7632 struct perf_cpu_buf *cpu_buf)
7633{
7634 if (!cpu_buf)
7635 return;
7636 if (cpu_buf->base &&
7637 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
Kefeng Wangbe180102019-10-21 13:55:32 +08007638 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007639 if (cpu_buf->fd >= 0) {
7640 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
7641 close(cpu_buf->fd);
7642 }
7643 free(cpu_buf->buf);
7644 free(cpu_buf);
7645}
7646
7647void perf_buffer__free(struct perf_buffer *pb)
7648{
7649 int i;
7650
7651 if (!pb)
7652 return;
7653 if (pb->cpu_bufs) {
7654 for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
7655 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
7656
7657 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
7658 perf_buffer__free_cpu_buf(pb, cpu_buf);
7659 }
7660 free(pb->cpu_bufs);
7661 }
7662 if (pb->epoll_fd >= 0)
7663 close(pb->epoll_fd);
7664 free(pb->events);
7665 free(pb);
7666}
7667
7668static struct perf_cpu_buf *
7669perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
7670 int cpu, int map_key)
7671{
7672 struct perf_cpu_buf *cpu_buf;
7673 char msg[STRERR_BUFSIZE];
7674 int err;
7675
7676 cpu_buf = calloc(1, sizeof(*cpu_buf));
7677 if (!cpu_buf)
7678 return ERR_PTR(-ENOMEM);
7679
7680 cpu_buf->pb = pb;
7681 cpu_buf->cpu = cpu;
7682 cpu_buf->map_key = map_key;
7683
7684 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
7685 -1, PERF_FLAG_FD_CLOEXEC);
7686 if (cpu_buf->fd < 0) {
7687 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007688 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
7689 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007690 goto error;
7691 }
7692
7693 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
7694 PROT_READ | PROT_WRITE, MAP_SHARED,
7695 cpu_buf->fd, 0);
7696 if (cpu_buf->base == MAP_FAILED) {
7697 cpu_buf->base = NULL;
7698 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007699 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
7700 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007701 goto error;
7702 }
7703
7704 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
7705 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007706 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
7707 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007708 goto error;
7709 }
7710
7711 return cpu_buf;
7712
7713error:
7714 perf_buffer__free_cpu_buf(pb, cpu_buf);
7715 return (struct perf_cpu_buf *)ERR_PTR(err);
7716}
7717
7718static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
7719 struct perf_buffer_params *p);
7720
7721struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
7722 const struct perf_buffer_opts *opts)
7723{
7724 struct perf_buffer_params p = {};
Arnaldo Carvalho de Melo4be6e052019-07-19 11:34:07 -03007725 struct perf_event_attr attr = { 0, };
7726
7727 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
7728 attr.type = PERF_TYPE_SOFTWARE;
7729 attr.sample_type = PERF_SAMPLE_RAW;
7730 attr.sample_period = 1;
7731 attr.wakeup_events = 1;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007732
7733 p.attr = &attr;
7734 p.sample_cb = opts ? opts->sample_cb : NULL;
7735 p.lost_cb = opts ? opts->lost_cb : NULL;
7736 p.ctx = opts ? opts->ctx : NULL;
7737
7738 return __perf_buffer__new(map_fd, page_cnt, &p);
7739}
7740
7741struct perf_buffer *
7742perf_buffer__new_raw(int map_fd, size_t page_cnt,
7743 const struct perf_buffer_raw_opts *opts)
7744{
7745 struct perf_buffer_params p = {};
7746
7747 p.attr = opts->attr;
7748 p.event_cb = opts->event_cb;
7749 p.ctx = opts->ctx;
7750 p.cpu_cnt = opts->cpu_cnt;
7751 p.cpus = opts->cpus;
7752 p.map_keys = opts->map_keys;
7753
7754 return __perf_buffer__new(map_fd, page_cnt, &p);
7755}
7756
7757static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
7758 struct perf_buffer_params *p)
7759{
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007760 const char *online_cpus_file = "/sys/devices/system/cpu/online";
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007761 struct bpf_map_info map = {};
7762 char msg[STRERR_BUFSIZE];
7763 struct perf_buffer *pb;
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007764 bool *online = NULL;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007765 __u32 map_info_len;
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007766 int err, i, j, n;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007767
7768 if (page_cnt & (page_cnt - 1)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007769 pr_warn("page count should be power of two, but is %zu\n",
7770 page_cnt);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007771 return ERR_PTR(-EINVAL);
7772 }
7773
7774 map_info_len = sizeof(map);
7775 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
7776 if (err) {
7777 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007778 pr_warn("failed to get map info for map FD %d: %s\n",
7779 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007780 return ERR_PTR(err);
7781 }
7782
7783 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007784 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
7785 map.name);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007786 return ERR_PTR(-EINVAL);
7787 }
7788
7789 pb = calloc(1, sizeof(*pb));
7790 if (!pb)
7791 return ERR_PTR(-ENOMEM);
7792
7793 pb->event_cb = p->event_cb;
7794 pb->sample_cb = p->sample_cb;
7795 pb->lost_cb = p->lost_cb;
7796 pb->ctx = p->ctx;
7797
7798 pb->page_size = getpagesize();
7799 pb->mmap_size = pb->page_size * page_cnt;
7800 pb->map_fd = map_fd;
7801
7802 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
7803 if (pb->epoll_fd < 0) {
7804 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007805 pr_warn("failed to create epoll instance: %s\n",
7806 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007807 goto error;
7808 }
7809
7810 if (p->cpu_cnt > 0) {
7811 pb->cpu_cnt = p->cpu_cnt;
7812 } else {
7813 pb->cpu_cnt = libbpf_num_possible_cpus();
7814 if (pb->cpu_cnt < 0) {
7815 err = pb->cpu_cnt;
7816 goto error;
7817 }
7818 if (map.max_entries < pb->cpu_cnt)
7819 pb->cpu_cnt = map.max_entries;
7820 }
7821
7822 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
7823 if (!pb->events) {
7824 err = -ENOMEM;
Kefeng Wangbe180102019-10-21 13:55:32 +08007825 pr_warn("failed to allocate events: out of memory\n");
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007826 goto error;
7827 }
7828 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
7829 if (!pb->cpu_bufs) {
7830 err = -ENOMEM;
Kefeng Wangbe180102019-10-21 13:55:32 +08007831 pr_warn("failed to allocate buffers: out of memory\n");
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007832 goto error;
7833 }
7834
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007835 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
7836 if (err) {
7837 pr_warn("failed to get online CPU mask: %d\n", err);
7838 goto error;
7839 }
7840
7841 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007842 struct perf_cpu_buf *cpu_buf;
7843 int cpu, map_key;
7844
7845 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
7846 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
7847
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007848 /* in case user didn't explicitly requested particular CPUs to
7849 * be attached to, skip offline/not present CPUs
7850 */
7851 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
7852 continue;
7853
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007854 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
7855 if (IS_ERR(cpu_buf)) {
7856 err = PTR_ERR(cpu_buf);
7857 goto error;
7858 }
7859
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007860 pb->cpu_bufs[j] = cpu_buf;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007861
7862 err = bpf_map_update_elem(pb->map_fd, &map_key,
7863 &cpu_buf->fd, 0);
7864 if (err) {
7865 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007866 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
7867 cpu, map_key, cpu_buf->fd,
7868 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007869 goto error;
7870 }
7871
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007872 pb->events[j].events = EPOLLIN;
7873 pb->events[j].data.ptr = cpu_buf;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007874 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007875 &pb->events[j]) < 0) {
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007876 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08007877 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
7878 cpu, cpu_buf->fd,
7879 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007880 goto error;
7881 }
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007882 j++;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007883 }
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007884 pb->cpu_cnt = j;
7885 free(online);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007886
7887 return pb;
7888
7889error:
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007890 free(online);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007891 if (pb)
7892 perf_buffer__free(pb);
7893 return ERR_PTR(err);
7894}
7895
7896struct perf_sample_raw {
7897 struct perf_event_header header;
7898 uint32_t size;
7899 char data[0];
7900};
7901
7902struct perf_sample_lost {
7903 struct perf_event_header header;
7904 uint64_t id;
7905 uint64_t lost;
7906 uint64_t sample_id;
7907};
7908
7909static enum bpf_perf_event_ret
7910perf_buffer__process_record(struct perf_event_header *e, void *ctx)
7911{
7912 struct perf_cpu_buf *cpu_buf = ctx;
7913 struct perf_buffer *pb = cpu_buf->pb;
7914 void *data = e;
7915
7916 /* user wants full control over parsing perf event */
7917 if (pb->event_cb)
7918 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
7919
7920 switch (e->type) {
7921 case PERF_RECORD_SAMPLE: {
7922 struct perf_sample_raw *s = data;
7923
7924 if (pb->sample_cb)
7925 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
7926 break;
7927 }
7928 case PERF_RECORD_LOST: {
7929 struct perf_sample_lost *s = data;
7930
7931 if (pb->lost_cb)
7932 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
7933 break;
7934 }
7935 default:
Kefeng Wangbe180102019-10-21 13:55:32 +08007936 pr_warn("unknown perf sample type %d\n", e->type);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007937 return LIBBPF_PERF_EVENT_ERROR;
7938 }
7939 return LIBBPF_PERF_EVENT_CONT;
7940}
7941
7942static int perf_buffer__process_records(struct perf_buffer *pb,
7943 struct perf_cpu_buf *cpu_buf)
7944{
7945 enum bpf_perf_event_ret ret;
7946
7947 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
7948 pb->page_size, &cpu_buf->buf,
7949 &cpu_buf->buf_size,
7950 perf_buffer__process_record, cpu_buf);
7951 if (ret != LIBBPF_PERF_EVENT_CONT)
7952 return ret;
7953 return 0;
7954}
7955
7956int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
7957{
7958 int i, cnt, err;
7959
7960 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
7961 for (i = 0; i < cnt; i++) {
7962 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
7963
7964 err = perf_buffer__process_records(pb, cpu_buf);
7965 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007966 pr_warn("error while processing records: %d\n", err);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007967 return err;
7968 }
7969 }
7970 return cnt < 0 ? -errno : cnt;
7971}
7972
Song Liu34be16462019-03-11 22:30:38 -07007973struct bpf_prog_info_array_desc {
7974 int array_offset; /* e.g. offset of jited_prog_insns */
7975 int count_offset; /* e.g. offset of jited_prog_len */
7976 int size_offset; /* > 0: offset of rec size,
7977 * < 0: fix size of -size_offset
7978 */
7979};
7980
7981static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
7982 [BPF_PROG_INFO_JITED_INSNS] = {
7983 offsetof(struct bpf_prog_info, jited_prog_insns),
7984 offsetof(struct bpf_prog_info, jited_prog_len),
7985 -1,
7986 },
7987 [BPF_PROG_INFO_XLATED_INSNS] = {
7988 offsetof(struct bpf_prog_info, xlated_prog_insns),
7989 offsetof(struct bpf_prog_info, xlated_prog_len),
7990 -1,
7991 },
7992 [BPF_PROG_INFO_MAP_IDS] = {
7993 offsetof(struct bpf_prog_info, map_ids),
7994 offsetof(struct bpf_prog_info, nr_map_ids),
7995 -(int)sizeof(__u32),
7996 },
7997 [BPF_PROG_INFO_JITED_KSYMS] = {
7998 offsetof(struct bpf_prog_info, jited_ksyms),
7999 offsetof(struct bpf_prog_info, nr_jited_ksyms),
8000 -(int)sizeof(__u64),
8001 },
8002 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
8003 offsetof(struct bpf_prog_info, jited_func_lens),
8004 offsetof(struct bpf_prog_info, nr_jited_func_lens),
8005 -(int)sizeof(__u32),
8006 },
8007 [BPF_PROG_INFO_FUNC_INFO] = {
8008 offsetof(struct bpf_prog_info, func_info),
8009 offsetof(struct bpf_prog_info, nr_func_info),
8010 offsetof(struct bpf_prog_info, func_info_rec_size),
8011 },
8012 [BPF_PROG_INFO_LINE_INFO] = {
8013 offsetof(struct bpf_prog_info, line_info),
8014 offsetof(struct bpf_prog_info, nr_line_info),
8015 offsetof(struct bpf_prog_info, line_info_rec_size),
8016 },
8017 [BPF_PROG_INFO_JITED_LINE_INFO] = {
8018 offsetof(struct bpf_prog_info, jited_line_info),
8019 offsetof(struct bpf_prog_info, nr_jited_line_info),
8020 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
8021 },
8022 [BPF_PROG_INFO_PROG_TAGS] = {
8023 offsetof(struct bpf_prog_info, prog_tags),
8024 offsetof(struct bpf_prog_info, nr_prog_tags),
8025 -(int)sizeof(__u8) * BPF_TAG_SIZE,
8026 },
8027
8028};
8029
Andrii Nakryiko8983b732019-11-20 23:07:42 -08008030static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
8031 int offset)
Song Liu34be16462019-03-11 22:30:38 -07008032{
8033 __u32 *array = (__u32 *)info;
8034
8035 if (offset >= 0)
8036 return array[offset / sizeof(__u32)];
8037 return -(int)offset;
8038}
8039
Andrii Nakryiko8983b732019-11-20 23:07:42 -08008040static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
8041 int offset)
Song Liu34be16462019-03-11 22:30:38 -07008042{
8043 __u64 *array = (__u64 *)info;
8044
8045 if (offset >= 0)
8046 return array[offset / sizeof(__u64)];
8047 return -(int)offset;
8048}
8049
8050static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
8051 __u32 val)
8052{
8053 __u32 *array = (__u32 *)info;
8054
8055 if (offset >= 0)
8056 array[offset / sizeof(__u32)] = val;
8057}
8058
8059static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
8060 __u64 val)
8061{
8062 __u64 *array = (__u64 *)info;
8063
8064 if (offset >= 0)
8065 array[offset / sizeof(__u64)] = val;
8066}
8067
8068struct bpf_prog_info_linear *
8069bpf_program__get_prog_info_linear(int fd, __u64 arrays)
8070{
8071 struct bpf_prog_info_linear *info_linear;
8072 struct bpf_prog_info info = {};
8073 __u32 info_len = sizeof(info);
8074 __u32 data_len = 0;
8075 int i, err;
8076 void *ptr;
8077
8078 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
8079 return ERR_PTR(-EINVAL);
8080
8081 /* step 1: get array dimensions */
8082 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
8083 if (err) {
8084 pr_debug("can't get prog info: %s", strerror(errno));
8085 return ERR_PTR(-EFAULT);
8086 }
8087
8088 /* step 2: calculate total size of all arrays */
8089 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8090 bool include_array = (arrays & (1UL << i)) > 0;
8091 struct bpf_prog_info_array_desc *desc;
8092 __u32 count, size;
8093
8094 desc = bpf_prog_info_array_desc + i;
8095
8096 /* kernel is too old to support this field */
8097 if (info_len < desc->array_offset + sizeof(__u32) ||
8098 info_len < desc->count_offset + sizeof(__u32) ||
8099 (desc->size_offset > 0 && info_len < desc->size_offset))
8100 include_array = false;
8101
8102 if (!include_array) {
8103 arrays &= ~(1UL << i); /* clear the bit */
8104 continue;
8105 }
8106
8107 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
8108 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
8109
8110 data_len += count * size;
8111 }
8112
8113 /* step 3: allocate continuous memory */
8114 data_len = roundup(data_len, sizeof(__u64));
8115 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
8116 if (!info_linear)
8117 return ERR_PTR(-ENOMEM);
8118
8119 /* step 4: fill data to info_linear->info */
8120 info_linear->arrays = arrays;
8121 memset(&info_linear->info, 0, sizeof(info));
8122 ptr = info_linear->data;
8123
8124 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8125 struct bpf_prog_info_array_desc *desc;
8126 __u32 count, size;
8127
8128 if ((arrays & (1UL << i)) == 0)
8129 continue;
8130
8131 desc = bpf_prog_info_array_desc + i;
8132 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
8133 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
8134 bpf_prog_info_set_offset_u32(&info_linear->info,
8135 desc->count_offset, count);
8136 bpf_prog_info_set_offset_u32(&info_linear->info,
8137 desc->size_offset, size);
8138 bpf_prog_info_set_offset_u64(&info_linear->info,
8139 desc->array_offset,
8140 ptr_to_u64(ptr));
8141 ptr += count * size;
8142 }
8143
8144 /* step 5: call syscall again to get required arrays */
8145 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
8146 if (err) {
8147 pr_debug("can't get prog info: %s", strerror(errno));
8148 free(info_linear);
8149 return ERR_PTR(-EFAULT);
8150 }
8151
8152 /* step 6: verify the data */
8153 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8154 struct bpf_prog_info_array_desc *desc;
8155 __u32 v1, v2;
8156
8157 if ((arrays & (1UL << i)) == 0)
8158 continue;
8159
8160 desc = bpf_prog_info_array_desc + i;
8161 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
8162 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
8163 desc->count_offset);
8164 if (v1 != v2)
Kefeng Wangbe180102019-10-21 13:55:32 +08008165 pr_warn("%s: mismatch in element count\n", __func__);
Song Liu34be16462019-03-11 22:30:38 -07008166
8167 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
8168 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
8169 desc->size_offset);
8170 if (v1 != v2)
Kefeng Wangbe180102019-10-21 13:55:32 +08008171 pr_warn("%s: mismatch in rec size\n", __func__);
Song Liu34be16462019-03-11 22:30:38 -07008172 }
8173
8174 /* step 7: update info_len and data_len */
8175 info_linear->info_len = sizeof(struct bpf_prog_info);
8176 info_linear->data_len = data_len;
8177
8178 return info_linear;
8179}
8180
8181void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
8182{
8183 int i;
8184
8185 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8186 struct bpf_prog_info_array_desc *desc;
8187 __u64 addr, offs;
8188
8189 if ((info_linear->arrays & (1UL << i)) == 0)
8190 continue;
8191
8192 desc = bpf_prog_info_array_desc + i;
8193 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
8194 desc->array_offset);
8195 offs = addr - ptr_to_u64(info_linear->data);
8196 bpf_prog_info_set_offset_u64(&info_linear->info,
8197 desc->array_offset, offs);
8198 }
8199}
8200
8201void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
8202{
8203 int i;
8204
8205 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
8206 struct bpf_prog_info_array_desc *desc;
8207 __u64 addr, offs;
8208
8209 if ((info_linear->arrays & (1UL << i)) == 0)
8210 continue;
8211
8212 desc = bpf_prog_info_array_desc + i;
8213 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
8214 desc->array_offset);
8215 addr = offs + ptr_to_u64(info_linear->data);
8216 bpf_prog_info_set_offset_u64(&info_linear->info,
8217 desc->array_offset, addr);
8218 }
8219}
Hechao Li6446b312019-06-10 17:56:50 -07008220
Eelco Chaudronff26ce52020-02-20 13:26:35 +00008221int bpf_program__set_attach_target(struct bpf_program *prog,
8222 int attach_prog_fd,
8223 const char *attach_func_name)
8224{
8225 int btf_id;
8226
8227 if (!prog || attach_prog_fd < 0 || !attach_func_name)
8228 return -EINVAL;
8229
8230 if (attach_prog_fd)
8231 btf_id = libbpf_find_prog_btf_id(attach_func_name,
8232 attach_prog_fd);
8233 else
8234 btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
8235 attach_func_name,
8236 prog->expected_attach_type);
8237
8238 if (btf_id < 0)
8239 return btf_id;
8240
8241 prog->attach_btf_id = btf_id;
8242 prog->attach_prog_fd = attach_prog_fd;
8243 return 0;
8244}
8245
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08008246int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
8247{
8248 int err = 0, n, len, start, end = -1;
8249 bool *tmp;
8250
8251 *mask = NULL;
8252 *mask_sz = 0;
8253
8254 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
8255 while (*s) {
8256 if (*s == ',' || *s == '\n') {
8257 s++;
8258 continue;
8259 }
8260 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
8261 if (n <= 0 || n > 2) {
8262 pr_warn("Failed to get CPU range %s: %d\n", s, n);
8263 err = -EINVAL;
8264 goto cleanup;
8265 } else if (n == 1) {
8266 end = start;
8267 }
8268 if (start < 0 || start > end) {
8269 pr_warn("Invalid CPU range [%d,%d] in %s\n",
8270 start, end, s);
8271 err = -EINVAL;
8272 goto cleanup;
8273 }
8274 tmp = realloc(*mask, end + 1);
8275 if (!tmp) {
8276 err = -ENOMEM;
8277 goto cleanup;
8278 }
8279 *mask = tmp;
8280 memset(tmp + *mask_sz, 0, start - *mask_sz);
8281 memset(tmp + start, 1, end - start + 1);
8282 *mask_sz = end + 1;
8283 s += len;
8284 }
8285 if (!*mask_sz) {
8286 pr_warn("Empty CPU range\n");
8287 return -EINVAL;
8288 }
8289 return 0;
8290cleanup:
8291 free(*mask);
8292 *mask = NULL;
8293 return err;
8294}
8295
8296int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
8297{
8298 int fd, err = 0, len;
8299 char buf[128];
8300
8301 fd = open(fcpu, O_RDONLY);
8302 if (fd < 0) {
8303 err = -errno;
8304 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
8305 return err;
8306 }
8307 len = read(fd, buf, sizeof(buf));
8308 close(fd);
8309 if (len <= 0) {
8310 err = len ? -errno : -EINVAL;
8311 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
8312 return err;
8313 }
8314 if (len >= sizeof(buf)) {
8315 pr_warn("CPU mask is too big in file %s\n", fcpu);
8316 return -E2BIG;
8317 }
8318 buf[len] = '\0';
8319
8320 return parse_cpu_mask_str(buf, mask, mask_sz);
8321}
8322
Hechao Li6446b312019-06-10 17:56:50 -07008323int libbpf_num_possible_cpus(void)
8324{
8325 static const char *fcpu = "/sys/devices/system/cpu/possible";
Hechao Li6446b312019-06-10 17:56:50 -07008326 static int cpus;
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08008327 int err, n, i, tmp_cpus;
8328 bool *mask;
Hechao Li6446b312019-06-10 17:56:50 -07008329
Takshak Chahande56fbc242019-07-31 15:10:55 -07008330 tmp_cpus = READ_ONCE(cpus);
8331 if (tmp_cpus > 0)
8332 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07008333
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08008334 err = parse_cpu_mask_file(fcpu, &mask, &n);
8335 if (err)
8336 return err;
Hechao Li6446b312019-06-10 17:56:50 -07008337
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08008338 tmp_cpus = 0;
8339 for (i = 0; i < n; i++) {
8340 if (mask[i])
8341 tmp_cpus++;
Hechao Li6446b312019-06-10 17:56:50 -07008342 }
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08008343 free(mask);
Takshak Chahande56fbc242019-07-31 15:10:55 -07008344
8345 WRITE_ONCE(cpus, tmp_cpus);
8346 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07008347}
Andrii Nakryikod66562f2019-12-13 17:43:36 -08008348
8349int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
8350 const struct bpf_object_open_opts *opts)
8351{
8352 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
8353 .object_name = s->name,
8354 );
8355 struct bpf_object *obj;
8356 int i;
8357
8358 /* Attempt to preserve opts->object_name, unless overriden by user
8359 * explicitly. Overwriting object name for skeletons is discouraged,
8360 * as it breaks global data maps, because they contain object name
8361 * prefix as their own map name prefix. When skeleton is generated,
8362 * bpftool is making an assumption that this name will stay the same.
8363 */
8364 if (opts) {
8365 memcpy(&skel_opts, opts, sizeof(*opts));
8366 if (!opts->object_name)
8367 skel_opts.object_name = s->name;
8368 }
8369
8370 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
8371 if (IS_ERR(obj)) {
8372 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
8373 s->name, PTR_ERR(obj));
8374 return PTR_ERR(obj);
8375 }
8376
8377 *s->obj = obj;
8378
8379 for (i = 0; i < s->map_cnt; i++) {
8380 struct bpf_map **map = s->maps[i].map;
8381 const char *name = s->maps[i].name;
8382 void **mmaped = s->maps[i].mmaped;
8383
8384 *map = bpf_object__find_map_by_name(obj, name);
8385 if (!*map) {
8386 pr_warn("failed to find skeleton map '%s'\n", name);
8387 return -ESRCH;
8388 }
8389
Andrii Nakryiko2ad97d42019-12-13 17:47:09 -08008390 /* externs shouldn't be pre-setup from user code */
Andrii Nakryiko81bfdd02019-12-18 16:28:34 -08008391 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
Andrii Nakryikod66562f2019-12-13 17:43:36 -08008392 *mmaped = (*map)->mmaped;
8393 }
8394
8395 for (i = 0; i < s->prog_cnt; i++) {
8396 struct bpf_program **prog = s->progs[i].prog;
8397 const char *name = s->progs[i].name;
8398
8399 *prog = bpf_object__find_program_by_name(obj, name);
8400 if (!*prog) {
8401 pr_warn("failed to find skeleton program '%s'\n", name);
8402 return -ESRCH;
8403 }
8404 }
8405
8406 return 0;
8407}
8408
8409int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
8410{
8411 int i, err;
8412
8413 err = bpf_object__load(*s->obj);
8414 if (err) {
8415 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
8416 return err;
8417 }
8418
8419 for (i = 0; i < s->map_cnt; i++) {
8420 struct bpf_map *map = *s->maps[i].map;
8421 size_t mmap_sz = bpf_map_mmap_sz(map);
8422 int prot, map_fd = bpf_map__fd(map);
8423 void **mmaped = s->maps[i].mmaped;
Andrii Nakryikod66562f2019-12-13 17:43:36 -08008424
8425 if (!mmaped)
8426 continue;
8427
8428 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
8429 *mmaped = NULL;
8430 continue;
8431 }
8432
8433 if (map->def.map_flags & BPF_F_RDONLY_PROG)
8434 prot = PROT_READ;
8435 else
8436 prot = PROT_READ | PROT_WRITE;
8437
8438 /* Remap anonymous mmap()-ed "map initialization image" as
8439 * a BPF map-backed mmap()-ed memory, but preserving the same
8440 * memory address. This will cause kernel to change process'
8441 * page table to point to a different piece of kernel memory,
8442 * but from userspace point of view memory address (and its
8443 * contents, being identical at this point) will stay the
8444 * same. This mapping will be released by bpf_object__close()
8445 * as per normal clean up procedure, so we don't need to worry
8446 * about it from skeleton's clean up perspective.
8447 */
Andrii Nakryiko2ad97d42019-12-13 17:47:09 -08008448 *mmaped = mmap(map->mmaped, mmap_sz, prot,
8449 MAP_SHARED | MAP_FIXED, map_fd, 0);
8450 if (*mmaped == MAP_FAILED) {
Andrii Nakryikod66562f2019-12-13 17:43:36 -08008451 err = -errno;
8452 *mmaped = NULL;
8453 pr_warn("failed to re-mmap() map '%s': %d\n",
8454 bpf_map__name(map), err);
8455 return err;
8456 }
8457 }
8458
8459 return 0;
8460}
8461
8462int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
8463{
8464 int i;
8465
8466 for (i = 0; i < s->prog_cnt; i++) {
8467 struct bpf_program *prog = *s->progs[i].prog;
8468 struct bpf_link **link = s->progs[i].link;
8469 const struct bpf_sec_def *sec_def;
8470 const char *sec_name = bpf_program__title(prog, false);
8471
8472 sec_def = find_sec_def(sec_name);
8473 if (!sec_def || !sec_def->attach_fn)
8474 continue;
8475
8476 *link = sec_def->attach_fn(sec_def, prog);
8477 if (IS_ERR(*link)) {
8478 pr_warn("failed to auto-attach program '%s': %ld\n",
8479 bpf_program__name(prog), PTR_ERR(*link));
8480 return PTR_ERR(*link);
8481 }
8482 }
8483
8484 return 0;
8485}
8486
8487void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
8488{
8489 int i;
8490
8491 for (i = 0; i < s->prog_cnt; i++) {
8492 struct bpf_link **link = s->progs[i].link;
8493
8494 if (!IS_ERR_OR_NULL(*link))
8495 bpf_link__destroy(*link);
8496 *link = NULL;
8497 }
8498}
8499
8500void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
8501{
8502 if (s->progs)
8503 bpf_object__detach_skeleton(s);
8504 if (s->obj)
8505 bpf_object__close(*s->obj);
8506 free(s->maps);
8507 free(s->progs);
8508 free(s);
8509}