blob: cccfd9355134f7b0721ca5fe83a6708445ffe0e1 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Daniel Borkmannd8599002019-04-09 23:20:13 +020010 * Copyright (C) 2019 Isovalent, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000011 */
12
Yonghong Songb4269952018-11-29 15:31:45 -080013#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070014#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080015#endif
Wang Nan1b76c132015-07-01 02:13:51 +000016#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000017#include <stdio.h>
18#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080019#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000020#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000021#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <unistd.h>
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -030023#include <endian.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000024#include <fcntl.h>
25#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000026#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080027#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000028#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000029#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070030#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080031#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000032#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080033#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070034#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020035#include <linux/ring_buffer.h>
Andrii Nakryiko5e61f272019-10-04 15:40:34 -070036#include <linux/version.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070037#include <sys/epoll.h>
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -070038#include <sys/ioctl.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070039#include <sys/mman.h>
Joe Stringerf3675402017-01-26 13:19:56 -080040#include <sys/stat.h>
41#include <sys/types.h>
42#include <sys/vfs.h>
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070043#include <sys/utsname.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070044#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000045#include <libelf.h>
46#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000047
48#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000049#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070050#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030051#include "str_error.h"
Andrii Nakryikod7c4b392019-05-10 14:13:15 -070052#include "libbpf_internal.h"
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070053#include "hashmap.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000054
Wang Nan9b161372016-07-18 06:01:08 +000055#ifndef EM_BPF
56#define EM_BPF 247
57#endif
58
Joe Stringerf3675402017-01-26 13:19:56 -080059#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
Andrey Ignatovff466b52019-04-06 22:37:34 -070063/* vsprintf() in __base_pr() uses nonliteral format string. It may break
64 * compilation if user enables corresponding warning. Disable it explicitly.
65 */
66#pragma GCC diagnostic ignored "-Wformat-nonliteral"
67
Wang Nanb3f59d62015-07-01 02:13:52 +000068#define __printf(a, b) __attribute__((format(printf, a, b)))
69
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080070static int __base_pr(enum libbpf_print_level level, const char *format,
71 va_list args)
Wang Nanb3f59d62015-07-01 02:13:52 +000072{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080073 if (level == LIBBPF_DEBUG)
74 return 0;
75
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080076 return vfprintf(stderr, format, args);
Wang Nanb3f59d62015-07-01 02:13:52 +000077}
78
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080079static libbpf_print_fn_t __libbpf_pr = __base_pr;
Wang Nanb3f59d62015-07-01 02:13:52 +000080
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070081libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
Wang Nanb3f59d62015-07-01 02:13:52 +000082{
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070083 libbpf_print_fn_t old_print_fn = __libbpf_pr;
84
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080085 __libbpf_pr = fn;
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070086 return old_print_fn;
Wang Nanb3f59d62015-07-01 02:13:52 +000087}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000088
Yonghong Song8461ef82019-02-01 16:14:14 -080089__printf(2, 3)
90void libbpf_print(enum libbpf_print_level level, const char *format, ...)
91{
92 va_list args;
93
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080094 if (!__libbpf_pr)
95 return;
96
Yonghong Song8461ef82019-02-01 16:14:14 -080097 va_start(args, format);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080098 __libbpf_pr(level, format, args);
Yonghong Song8461ef82019-02-01 16:14:14 -080099 va_end(args);
100}
101
Wang Nan6371ca3b2015-11-06 13:49:37 +0000102#define STRERR_BUFSIZE 128
103
Wang Nan6371ca3b2015-11-06 13:49:37 +0000104#define CHECK_ERR(action, err, out) do { \
105 err = action; \
106 if (err) \
107 goto out; \
108} while(0)
109
110
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000111/* Copied from tools/perf/util/util.h */
112#ifndef zfree
113# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
114#endif
115
116#ifndef zclose
117# define zclose(fd) ({ \
118 int ___err = 0; \
119 if ((fd) >= 0) \
120 ___err = close((fd)); \
121 fd = -1; \
122 ___err; })
123#endif
124
125#ifdef HAVE_LIBELF_MMAP_SUPPORT
126# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
127#else
128# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
129#endif
130
Song Liu34be16462019-03-11 22:30:38 -0700131static inline __u64 ptr_to_u64(const void *ptr)
132{
133 return (__u64) (unsigned long) ptr;
134}
135
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800136struct bpf_capabilities {
137 /* v4.14: kernel support for program & map names. */
138 __u32 name:1;
Daniel Borkmann8837fe52019-04-24 00:45:56 +0200139 /* v5.2: kernel support for global data sections. */
140 __u32 global_data:1;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -0700141 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
142 __u32 btf_func:1;
143 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
144 __u32 btf_datasec:1;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800145};
146
Wang Nana5b8bd42015-07-01 02:14:00 +0000147/*
148 * bpf_prog should be a better name but it has been used in
149 * linux/filter.h.
150 */
151struct bpf_program {
152 /* Index in elf obj file, for relocation use. */
153 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700154 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700155 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000156 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800157 /* section_name with / replaced by _; makes recursive pinning
158 * in bpf_object__pin_programs easier
159 */
160 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000161 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800162 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000163 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000164
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800165 struct reloc_desc {
166 enum {
167 RELO_LD64,
168 RELO_CALL,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200169 RELO_DATA,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800170 } type;
Wang Nan34090912015-07-01 02:14:02 +0000171 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800172 union {
173 int map_idx;
174 int text_off;
175 };
Wang Nan34090912015-07-01 02:14:02 +0000176 } *reloc_desc;
177 int nr_reloc;
Alexei Starovoitovda11b412019-04-01 21:27:47 -0700178 int log_level;
Wang Nan55cffde2015-07-01 02:14:07 +0000179
Wang Nanb5805632015-11-16 12:10:09 +0000180 struct {
181 int nr;
182 int *fds;
183 } instances;
184 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000185
186 struct bpf_object *obj;
187 void *priv;
188 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700189
190 enum bpf_attach_type expected_attach_type;
Yonghong Song2993e052018-11-19 15:29:16 -0800191 void *func_info;
192 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800193 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800194
195 struct bpf_capabilities *caps;
Martin KaFai Lau3d650142018-12-07 16:42:31 -0800196
197 void *line_info;
198 __u32 line_info_rec_size;
199 __u32 line_info_cnt;
Jiong Wang04656192019-05-24 23:25:19 +0100200 __u32 prog_flags;
Wang Nana5b8bd42015-07-01 02:14:00 +0000201};
202
Daniel Borkmannd8599002019-04-09 23:20:13 +0200203enum libbpf_map_type {
204 LIBBPF_MAP_UNSPEC,
205 LIBBPF_MAP_DATA,
206 LIBBPF_MAP_BSS,
207 LIBBPF_MAP_RODATA,
208};
209
210static const char * const libbpf_type_to_btf_name[] = {
211 [LIBBPF_MAP_DATA] = ".data",
212 [LIBBPF_MAP_BSS] = ".bss",
213 [LIBBPF_MAP_RODATA] = ".rodata",
214};
215
Wang Nan9d759a92015-11-27 08:47:35 +0000216struct bpf_map {
217 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000218 char *name;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700219 int sec_idx;
220 size_t sec_offset;
David Beckettf0307a72018-05-16 14:02:49 -0700221 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800222 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000223 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700224 __u32 btf_key_type_id;
225 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000226 void *priv;
227 bpf_map_clear_priv_t clear_priv;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200228 enum libbpf_map_type libbpf_type;
229};
230
231struct bpf_secdata {
232 void *rodata;
233 void *data;
Wang Nan9d759a92015-11-27 08:47:35 +0000234};
235
Wang Nan9a208ef2015-07-01 02:14:10 +0000236static LIST_HEAD(bpf_objects_list);
237
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000238struct bpf_object {
Daniel Borkmannd8599002019-04-09 23:20:13 +0200239 char name[BPF_OBJ_NAME_LEN];
Wang Nancb1e5e92015-07-01 02:13:57 +0000240 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700241 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000242
Wang Nana5b8bd42015-07-01 02:14:00 +0000243 struct bpf_program *programs;
244 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000245 struct bpf_map *maps;
246 size_t nr_maps;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700247 size_t maps_cap;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200248 struct bpf_secdata sections;
Wang Nan9d759a92015-11-27 08:47:35 +0000249
Wang Nan52d33522015-07-01 02:14:04 +0000250 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700251 bool has_pseudo_calls;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -0700252 bool relaxed_core_relocs;
Wang Nana5b8bd42015-07-01 02:14:00 +0000253
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000254 /*
255 * Information when doing elf related work. Only valid if fd
256 * is valid.
257 */
258 struct {
259 int fd;
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700260 const void *obj_buf;
Wang Nan6c956392015-07-01 02:13:54 +0000261 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000262 Elf *elf;
263 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000264 Elf_Data *symbols;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200265 Elf_Data *data;
266 Elf_Data *rodata;
267 Elf_Data *bss;
Wang Nan77ba9a52015-12-08 02:25:30 +0000268 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000269 struct {
270 GElf_Shdr shdr;
271 Elf_Data *data;
272 } *reloc;
273 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000274 int maps_shndx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700275 int btf_maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800276 int text_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200277 int data_shndx;
278 int rodata_shndx;
279 int bss_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000280 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000281 /*
282 * All loaded bpf_object is linked in a list, which is
283 * hidden to caller. bpf_objects__<func> handlers deal with
284 * all objects.
285 */
286 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000287
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700288 struct btf *btf;
Yonghong Song2993e052018-11-19 15:29:16 -0800289 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700290
Wang Nan10931d22016-11-26 07:03:26 +0000291 void *priv;
292 bpf_object_clear_priv_t clear_priv;
293
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800294 struct bpf_capabilities caps;
295
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000296 char path[];
297};
298#define obj_elf_valid(o) ((o)->efile.elf)
299
Joe Stringer29cd77f2018-10-02 13:35:39 -0700300void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000301{
Wang Nanb5805632015-11-16 12:10:09 +0000302 int i;
303
Wang Nan55cffde2015-07-01 02:14:07 +0000304 if (!prog)
305 return;
306
Wang Nanb5805632015-11-16 12:10:09 +0000307 /*
308 * If the object is opened but the program was never loaded,
309 * it is possible that prog->instances.nr == -1.
310 */
311 if (prog->instances.nr > 0) {
312 for (i = 0; i < prog->instances.nr; i++)
313 zclose(prog->instances.fds[i]);
314 } else if (prog->instances.nr != -1) {
315 pr_warning("Internal error: instances.nr is %d\n",
316 prog->instances.nr);
317 }
318
319 prog->instances.nr = -1;
320 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800321
Yonghong Song2993e052018-11-19 15:29:16 -0800322 zfree(&prog->func_info);
Prashant Bhole07a09d12018-12-17 16:57:50 +0900323 zfree(&prog->line_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000324}
325
Wang Nana5b8bd42015-07-01 02:14:00 +0000326static void bpf_program__exit(struct bpf_program *prog)
327{
328 if (!prog)
329 return;
330
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000331 if (prog->clear_priv)
332 prog->clear_priv(prog, prog->priv);
333
334 prog->priv = NULL;
335 prog->clear_priv = NULL;
336
Wang Nan55cffde2015-07-01 02:14:07 +0000337 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700338 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000339 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800340 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000341 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000342 zfree(&prog->reloc_desc);
343
344 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000345 prog->insns_cnt = 0;
346 prog->idx = -1;
347}
348
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800349static char *__bpf_program__pin_name(struct bpf_program *prog)
350{
351 char *name, *p;
352
353 name = p = strdup(prog->section_name);
354 while ((p = strchr(p, '/')))
355 *p = '_';
356
357 return name;
358}
359
Wang Nana5b8bd42015-07-01 02:14:00 +0000360static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700361bpf_program__init(void *data, size_t size, char *section_name, int idx,
362 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000363{
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700364 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
365
366 if (size == 0 || size % bpf_insn_sz) {
367 pr_warning("corrupted section '%s', size: %zu\n",
368 section_name, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000369 return -EINVAL;
370 }
371
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -0800372 memset(prog, 0, sizeof(*prog));
Wang Nana5b8bd42015-07-01 02:14:00 +0000373
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700374 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000375 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100376 pr_warning("failed to alloc name for prog under section(%d) %s\n",
377 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000378 goto errout;
379 }
380
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800381 prog->pin_name = __bpf_program__pin_name(prog);
382 if (!prog->pin_name) {
383 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
384 idx, section_name);
385 goto errout;
386 }
387
Wang Nana5b8bd42015-07-01 02:14:00 +0000388 prog->insns = malloc(size);
389 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700390 pr_warning("failed to alloc insns for prog under section %s\n",
391 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000392 goto errout;
393 }
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700394 prog->insns_cnt = size / bpf_insn_sz;
395 memcpy(prog->insns, data, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000396 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000397 prog->instances.fds = NULL;
398 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800399 prog->type = BPF_PROG_TYPE_UNSPEC;
Wang Nana5b8bd42015-07-01 02:14:00 +0000400
401 return 0;
402errout:
403 bpf_program__exit(prog);
404 return -ENOMEM;
405}
406
407static int
408bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700409 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000410{
411 struct bpf_program prog, *progs;
412 int nr_progs, err;
413
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700414 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000415 if (err)
416 return err;
417
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800418 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000419 progs = obj->programs;
420 nr_progs = obj->nr_programs;
421
Jakub Kicinski531b0142018-07-10 14:43:05 -0700422 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000423 if (!progs) {
424 /*
425 * In this case the original obj->programs
426 * is still valid, so don't need special treat for
427 * bpf_close_object().
428 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700429 pr_warning("failed to alloc a new program under section '%s'\n",
430 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000431 bpf_program__exit(&prog);
432 return -ENOMEM;
433 }
434
435 pr_debug("found program %s\n", prog.section_name);
436 obj->programs = progs;
437 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000438 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000439 progs[nr_progs] = prog;
440 return 0;
441}
442
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700443static int
444bpf_object__init_prog_names(struct bpf_object *obj)
445{
446 Elf_Data *symbols = obj->efile.symbols;
447 struct bpf_program *prog;
448 size_t pi, si;
449
450 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800451 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700452
453 prog = &obj->programs[pi];
454
455 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
456 si++) {
457 GElf_Sym sym;
458
459 if (!gelf_getsym(symbols, si, &sym))
460 continue;
461 if (sym.st_shndx != prog->idx)
462 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000463 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
464 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700465
466 name = elf_strptr(obj->efile.elf,
467 obj->efile.strtabidx,
468 sym.st_name);
469 if (!name) {
470 pr_warning("failed to get sym name string for prog %s\n",
471 prog->section_name);
472 return -LIBBPF_ERRNO__LIBELF;
473 }
474 }
475
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700476 if (!name && prog->idx == obj->efile.text_shndx)
477 name = ".text";
478
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700479 if (!name) {
480 pr_warning("failed to find sym for prog %s\n",
481 prog->section_name);
482 return -EINVAL;
483 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700484
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700485 prog->name = strdup(name);
486 if (!prog->name) {
487 pr_warning("failed to allocate memory for prog sym %s\n",
488 name);
489 return -ENOMEM;
490 }
491 }
492
493 return 0;
494}
495
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700496static __u32 get_kernel_version(void)
497{
498 __u32 major, minor, patch;
499 struct utsname info;
500
501 uname(&info);
502 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
503 return 0;
504 return KERNEL_VERSION(major, minor, patch);
505}
506
Wang Nan6c956392015-07-01 02:13:54 +0000507static struct bpf_object *bpf_object__new(const char *path,
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700508 const void *obj_buf,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700509 size_t obj_buf_sz,
510 const char *obj_name)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000511{
512 struct bpf_object *obj;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200513 char *end;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000514
515 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
516 if (!obj) {
517 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000518 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000519 }
520
521 strcpy(obj->path, path);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700522 if (obj_name) {
523 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
524 obj->name[sizeof(obj->name) - 1] = 0;
525 } else {
526 /* Using basename() GNU version which doesn't modify arg. */
527 strncpy(obj->name, basename((void *)path),
528 sizeof(obj->name) - 1);
529 end = strchr(obj->name, '.');
530 if (end)
531 *end = 0;
532 }
Wang Nan6c956392015-07-01 02:13:54 +0000533
Daniel Borkmannd8599002019-04-09 23:20:13 +0200534 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000535 /*
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700536 * Caller of this function should also call
Wang Nan6c956392015-07-01 02:13:54 +0000537 * bpf_object__elf_finish() after data collection to return
538 * obj_buf to user. If not, we should duplicate the buffer to
539 * avoid user freeing them before elf finish.
540 */
541 obj->efile.obj_buf = obj_buf;
542 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000543 obj->efile.maps_shndx = -1;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700544 obj->efile.btf_maps_shndx = -1;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200545 obj->efile.data_shndx = -1;
546 obj->efile.rodata_shndx = -1;
547 obj->efile.bss_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000548
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700549 obj->kern_version = get_kernel_version();
Wang Nan52d33522015-07-01 02:14:04 +0000550 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000551
552 INIT_LIST_HEAD(&obj->list);
553 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000554 return obj;
555}
556
557static void bpf_object__elf_finish(struct bpf_object *obj)
558{
559 if (!obj_elf_valid(obj))
560 return;
561
562 if (obj->efile.elf) {
563 elf_end(obj->efile.elf);
564 obj->efile.elf = NULL;
565 }
Wang Nanbec7d682015-07-01 02:13:59 +0000566 obj->efile.symbols = NULL;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200567 obj->efile.data = NULL;
568 obj->efile.rodata = NULL;
569 obj->efile.bss = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000570
571 zfree(&obj->efile.reloc);
572 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000573 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000574 obj->efile.obj_buf = NULL;
575 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000576}
577
578static int bpf_object__elf_init(struct bpf_object *obj)
579{
580 int err = 0;
581 GElf_Ehdr *ep;
582
583 if (obj_elf_valid(obj)) {
584 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000585 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000586 }
587
Wang Nan6c956392015-07-01 02:13:54 +0000588 if (obj->efile.obj_buf_sz > 0) {
589 /*
590 * obj_buf should have been validated by
591 * bpf_object__open_buffer().
592 */
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700593 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
Wang Nan6c956392015-07-01 02:13:54 +0000594 obj->efile.obj_buf_sz);
595 } else {
596 obj->efile.fd = open(obj->path, O_RDONLY);
597 if (obj->efile.fd < 0) {
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700598 char errmsg[STRERR_BUFSIZE], *cp;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200599
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700600 err = -errno;
601 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200602 pr_warning("failed to open %s: %s\n", obj->path, cp);
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700603 return err;
Wang Nan6c956392015-07-01 02:13:54 +0000604 }
605
606 obj->efile.elf = elf_begin(obj->efile.fd,
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700607 LIBBPF_ELF_C_READ_MMAP, NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000608 }
609
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000610 if (!obj->efile.elf) {
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700611 pr_warning("failed to open %s as ELF file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000612 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000613 goto errout;
614 }
615
616 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700617 pr_warning("failed to get EHDR from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000618 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000619 goto errout;
620 }
621 ep = &obj->efile.ehdr;
622
Wang Nan9b161372016-07-18 06:01:08 +0000623 /* Old LLVM set e_machine to EM_NONE */
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700624 if (ep->e_type != ET_REL ||
625 (ep->e_machine && ep->e_machine != EM_BPF)) {
626 pr_warning("%s is not an eBPF object file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000627 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000628 goto errout;
629 }
630
631 return 0;
632errout:
633 bpf_object__elf_finish(obj);
634 return err;
635}
636
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700637static int bpf_object__check_endianness(struct bpf_object *obj)
Wang Nancc4228d2015-07-01 02:13:55 +0000638{
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -0300639#if __BYTE_ORDER == __LITTLE_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700640 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
641 return 0;
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -0300642#elif __BYTE_ORDER == __BIG_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700643 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
644 return 0;
645#else
646# error "Unrecognized __BYTE_ORDER__"
647#endif
648 pr_warning("endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000649 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000650}
651
Wang Nancb1e5e92015-07-01 02:13:57 +0000652static int
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700653bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
Wang Nancb1e5e92015-07-01 02:13:57 +0000654{
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700655 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
Wang Nancb1e5e92015-07-01 02:13:57 +0000656 pr_debug("license of %s is %s\n", obj->path, obj->license);
657 return 0;
658}
659
John Fastabend54b86252019-10-18 07:41:26 -0700660static int
661bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
662{
663 __u32 kver;
664
665 if (size != sizeof(kver)) {
666 pr_warning("invalid kver section in %s\n", obj->path);
667 return -LIBBPF_ERRNO__FORMAT;
668 }
669 memcpy(&kver, data, sizeof(kver));
670 obj->kern_version = kver;
671 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
672 return 0;
673}
674
Eric Leblond4708bbd2016-11-15 04:05:47 +0000675static int compare_bpf_map(const void *_a, const void *_b)
676{
677 const struct bpf_map *a = _a;
678 const struct bpf_map *b = _b;
679
Andrii Nakryikodb488142019-06-17 12:26:54 -0700680 if (a->sec_idx != b->sec_idx)
681 return a->sec_idx - b->sec_idx;
682 return a->sec_offset - b->sec_offset;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000683}
684
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800685static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
686{
687 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
688 type == BPF_MAP_TYPE_HASH_OF_MAPS)
689 return true;
690 return false;
691}
692
Daniel Borkmann1713d682019-04-09 23:20:14 +0200693static int bpf_object_search_section_size(const struct bpf_object *obj,
694 const char *name, size_t *d_size)
695{
696 const GElf_Ehdr *ep = &obj->efile.ehdr;
697 Elf *elf = obj->efile.elf;
698 Elf_Scn *scn = NULL;
699 int idx = 0;
700
701 while ((scn = elf_nextscn(elf, scn)) != NULL) {
702 const char *sec_name;
703 Elf_Data *data;
704 GElf_Shdr sh;
705
706 idx++;
707 if (gelf_getshdr(scn, &sh) != &sh) {
708 pr_warning("failed to get section(%d) header from %s\n",
709 idx, obj->path);
710 return -EIO;
711 }
712
713 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
714 if (!sec_name) {
715 pr_warning("failed to get section(%d) name from %s\n",
716 idx, obj->path);
717 return -EIO;
718 }
719
720 if (strcmp(name, sec_name))
721 continue;
722
723 data = elf_getdata(scn, 0);
724 if (!data) {
725 pr_warning("failed to get section(%d) data from %s(%s)\n",
726 idx, name, obj->path);
727 return -EIO;
728 }
729
730 *d_size = data->d_size;
731 return 0;
732 }
733
734 return -ENOENT;
735}
736
737int bpf_object__section_size(const struct bpf_object *obj, const char *name,
738 __u32 *size)
739{
740 int ret = -ENOENT;
741 size_t d_size;
742
743 *size = 0;
744 if (!name) {
745 return -EINVAL;
746 } else if (!strcmp(name, ".data")) {
747 if (obj->efile.data)
748 *size = obj->efile.data->d_size;
749 } else if (!strcmp(name, ".bss")) {
750 if (obj->efile.bss)
751 *size = obj->efile.bss->d_size;
752 } else if (!strcmp(name, ".rodata")) {
753 if (obj->efile.rodata)
754 *size = obj->efile.rodata->d_size;
755 } else {
756 ret = bpf_object_search_section_size(obj, name, &d_size);
757 if (!ret)
758 *size = d_size;
759 }
760
761 return *size ? 0 : ret;
762}
763
764int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
765 __u32 *off)
766{
767 Elf_Data *symbols = obj->efile.symbols;
768 const char *sname;
769 size_t si;
770
771 if (!name || !off)
772 return -EINVAL;
773
774 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
775 GElf_Sym sym;
776
777 if (!gelf_getsym(symbols, si, &sym))
778 continue;
779 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
780 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
781 continue;
782
783 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
784 sym.st_name);
785 if (!sname) {
786 pr_warning("failed to get sym name string for var %s\n",
787 name);
788 return -EIO;
789 }
790 if (strcmp(name, sname) == 0) {
791 *off = sym.st_value;
792 return 0;
793 }
794 }
795
796 return -ENOENT;
797}
798
Andrii Nakryikobf829272019-06-17 12:26:53 -0700799static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
Daniel Borkmannd8599002019-04-09 23:20:13 +0200800{
Andrii Nakryikobf829272019-06-17 12:26:53 -0700801 struct bpf_map *new_maps;
802 size_t new_cap;
803 int i;
804
805 if (obj->nr_maps < obj->maps_cap)
806 return &obj->maps[obj->nr_maps++];
807
Ivan Khoronzhuk95064972019-06-26 13:38:37 +0300808 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700809 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
810 if (!new_maps) {
811 pr_warning("alloc maps for object failed\n");
812 return ERR_PTR(-ENOMEM);
813 }
814
815 obj->maps_cap = new_cap;
816 obj->maps = new_maps;
817
818 /* zero out new maps */
819 memset(obj->maps + obj->nr_maps, 0,
820 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
821 /*
822 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
823 * when failure (zclose won't close negative fd)).
824 */
825 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
826 obj->maps[i].fd = -1;
827 obj->maps[i].inner_map_fd = -1;
828 }
829
830 return &obj->maps[obj->nr_maps++];
Daniel Borkmannd8599002019-04-09 23:20:13 +0200831}
832
833static int
Andrii Nakryikobf829272019-06-17 12:26:53 -0700834bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700835 int sec_idx, Elf_Data *data, void **data_buff)
Daniel Borkmannd8599002019-04-09 23:20:13 +0200836{
Daniel Borkmannd8599002019-04-09 23:20:13 +0200837 char map_name[BPF_OBJ_NAME_LEN];
Andrii Nakryikobf829272019-06-17 12:26:53 -0700838 struct bpf_map_def *def;
839 struct bpf_map *map;
840
841 map = bpf_object__add_map(obj);
842 if (IS_ERR(map))
843 return PTR_ERR(map);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200844
845 map->libbpf_type = type;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700846 map->sec_idx = sec_idx;
847 map->sec_offset = 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200848 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
849 libbpf_type_to_btf_name[type]);
850 map->name = strdup(map_name);
851 if (!map->name) {
852 pr_warning("failed to alloc map name\n");
853 return -ENOMEM;
854 }
Andrii Nakryikodb488142019-06-17 12:26:54 -0700855 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
856 map_name, map->sec_idx, map->sec_offset);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200857
Andrii Nakryikobf829272019-06-17 12:26:53 -0700858 def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200859 def->type = BPF_MAP_TYPE_ARRAY;
860 def->key_size = sizeof(int);
861 def->value_size = data->d_size;
862 def->max_entries = 1;
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700863 def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200864 if (data_buff) {
865 *data_buff = malloc(data->d_size);
866 if (!*data_buff) {
867 zfree(&map->name);
868 pr_warning("failed to alloc map content buffer\n");
869 return -ENOMEM;
870 }
871 memcpy(*data_buff, data->d_buf, data->d_size);
872 }
873
Andrii Nakryikoe1d1dc42019-04-16 11:47:17 -0700874 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200875 return 0;
876}
877
Andrii Nakryikobf829272019-06-17 12:26:53 -0700878static int bpf_object__init_global_data_maps(struct bpf_object *obj)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000879{
Andrii Nakryikobf829272019-06-17 12:26:53 -0700880 int err;
881
882 if (!obj->caps.global_data)
883 return 0;
884 /*
885 * Populate obj->maps with libbpf internal maps.
886 */
887 if (obj->efile.data_shndx >= 0) {
888 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700889 obj->efile.data_shndx,
Andrii Nakryikobf829272019-06-17 12:26:53 -0700890 obj->efile.data,
891 &obj->sections.data);
892 if (err)
893 return err;
894 }
895 if (obj->efile.rodata_shndx >= 0) {
896 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700897 obj->efile.rodata_shndx,
Andrii Nakryikobf829272019-06-17 12:26:53 -0700898 obj->efile.rodata,
899 &obj->sections.rodata);
900 if (err)
901 return err;
902 }
903 if (obj->efile.bss_shndx >= 0) {
904 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700905 obj->efile.bss_shndx,
Andrii Nakryikobf829272019-06-17 12:26:53 -0700906 obj->efile.bss, NULL);
907 if (err)
908 return err;
909 }
910 return 0;
911}
912
913static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
914{
Eric Leblond4708bbd2016-11-15 04:05:47 +0000915 Elf_Data *symbols = obj->efile.symbols;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700916 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200917 Elf_Data *data = NULL;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700918 Elf_Scn *scn;
919
920 if (obj->efile.maps_shndx < 0)
921 return 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000922
Eric Leblond4708bbd2016-11-15 04:05:47 +0000923 if (!symbols)
924 return -EINVAL;
925
Andrii Nakryikobf829272019-06-17 12:26:53 -0700926 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
927 if (scn)
928 data = elf_getdata(scn, NULL);
929 if (!scn || !data) {
930 pr_warning("failed to get Elf_Data from map section %d\n",
931 obj->efile.maps_shndx);
932 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000933 }
934
Eric Leblond4708bbd2016-11-15 04:05:47 +0000935 /*
936 * Count number of maps. Each map has a name.
937 * Array of maps is not supported: only the first element is
938 * considered.
939 *
940 * TODO: Detect array of map and report error.
941 */
Andrii Nakryikobf829272019-06-17 12:26:53 -0700942 nr_syms = symbols->d_size / sizeof(GElf_Sym);
943 for (i = 0; i < nr_syms; i++) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000944 GElf_Sym sym;
945
946 if (!gelf_getsym(symbols, i, &sym))
947 continue;
948 if (sym.st_shndx != obj->efile.maps_shndx)
949 continue;
950 nr_maps++;
951 }
Craig Gallekb13c5c12017-10-05 10:41:57 -0400952 /* Assume equally sized map definitions */
Andrii Nakryikobf829272019-06-17 12:26:53 -0700953 pr_debug("maps in %s: %d maps in %zd bytes\n",
954 obj->path, nr_maps, data->d_size);
Daniel Borkmann4f8827d2019-04-24 00:45:57 +0200955
Andrii Nakryikobf829272019-06-17 12:26:53 -0700956 map_def_sz = data->d_size / nr_maps;
957 if (!data->d_size || (data->d_size % nr_maps) != 0) {
958 pr_warning("unable to determine map definition size "
959 "section %s, %d maps in %zd bytes\n",
960 obj->path, nr_maps, data->d_size);
961 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400962 }
963
Andrii Nakryikobf829272019-06-17 12:26:53 -0700964 /* Fill obj->maps using data in "maps" section. */
965 for (i = 0; i < nr_syms; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000966 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000967 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000968 struct bpf_map_def *def;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700969 struct bpf_map *map;
Wang Nan561bbcc2015-11-27 08:47:36 +0000970
971 if (!gelf_getsym(symbols, i, &sym))
972 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000973 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000974 continue;
975
Andrii Nakryikobf829272019-06-17 12:26:53 -0700976 map = bpf_object__add_map(obj);
977 if (IS_ERR(map))
978 return PTR_ERR(map);
979
980 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000981 sym.st_name);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -0700982 if (!map_name) {
983 pr_warning("failed to get map #%d name sym string for obj %s\n",
Andrii Nakryikobf829272019-06-17 12:26:53 -0700984 i, obj->path);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -0700985 return -LIBBPF_ERRNO__FORMAT;
986 }
Daniel Borkmannd8599002019-04-09 23:20:13 +0200987
Andrii Nakryikobf829272019-06-17 12:26:53 -0700988 map->libbpf_type = LIBBPF_MAP_UNSPEC;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700989 map->sec_idx = sym.st_shndx;
990 map->sec_offset = sym.st_value;
991 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
992 map_name, map->sec_idx, map->sec_offset);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400993 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000994 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
995 obj->path, map_name);
996 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000997 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000998
Andrii Nakryikobf829272019-06-17 12:26:53 -0700999 map->name = strdup(map_name);
1000 if (!map->name) {
Wang Nan973170e2015-12-08 02:25:29 +00001001 pr_warning("failed to alloc map name\n");
1002 return -ENOMEM;
1003 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001004 pr_debug("map %d is \"%s\"\n", i, map->name);
Eric Leblond4708bbd2016-11-15 04:05:47 +00001005 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001006 /*
1007 * If the definition of the map in the object file fits in
1008 * bpf_map_def, copy it. Any extra fields in our version
1009 * of bpf_map_def will default to zero as a result of the
1010 * calloc above.
1011 */
1012 if (map_def_sz <= sizeof(struct bpf_map_def)) {
Andrii Nakryikobf829272019-06-17 12:26:53 -07001013 memcpy(&map->def, def, map_def_sz);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001014 } else {
1015 /*
1016 * Here the map structure being read is bigger than what
1017 * we expect, truncate if the excess bits are all zero.
1018 * If they are not zero, reject this map as
1019 * incompatible.
1020 */
1021 char *b;
1022 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1023 b < ((char *)def) + map_def_sz; b++) {
1024 if (*b != 0) {
1025 pr_warning("maps section in %s: \"%s\" "
1026 "has unrecognized, non-zero "
1027 "options\n",
1028 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -07001029 if (strict)
1030 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001031 }
1032 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001033 memcpy(&map->def, def, sizeof(struct bpf_map_def));
Craig Gallekb13c5c12017-10-05 10:41:57 -04001034 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001035 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001036 return 0;
1037}
Eric Leblond4708bbd2016-11-15 04:05:47 +00001038
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001039static const struct btf_type *
1040skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001041{
1042 const struct btf_type *t = btf__type_by_id(btf, id);
1043
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001044 if (res_id)
1045 *res_id = id;
1046
1047 while (btf_is_mod(t) || btf_is_typedef(t)) {
1048 if (res_id)
1049 *res_id = t->type;
1050 t = btf__type_by_id(btf, t->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001051 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001052
1053 return t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001054}
1055
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001056/*
1057 * Fetch integer attribute of BTF map definition. Such attributes are
1058 * represented using a pointer to an array, in which dimensionality of array
1059 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1060 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1061 * type definition, while using only sizeof(void *) space in ELF data section.
1062 */
1063static bool get_map_field_int(const char *map_name, const struct btf *btf,
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001064 const struct btf_type *def,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001065 const struct btf_member *m, __u32 *res) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001066 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001067 const char *name = btf__name_by_offset(btf, m->name_off);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001068 const struct btf_array *arr_info;
1069 const struct btf_type *arr_t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001070
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001071 if (!btf_is_ptr(t)) {
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001072 pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001073 map_name, name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001074 return false;
1075 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001076
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001077 arr_t = btf__type_by_id(btf, t->type);
1078 if (!arr_t) {
1079 pr_warning("map '%s': attr '%s': type [%u] not found.\n",
1080 map_name, name, t->type);
1081 return false;
1082 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001083 if (!btf_is_array(arr_t)) {
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001084 pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001085 map_name, name, btf_kind(arr_t));
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001086 return false;
1087 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001088 arr_info = btf_array(arr_t);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001089 *res = arr_info->nelems;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001090 return true;
1091}
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001092
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001093static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1094 const struct btf_type *sec,
1095 int var_idx, int sec_idx,
1096 const Elf_Data *data, bool strict)
1097{
1098 const struct btf_type *var, *def, *t;
1099 const struct btf_var_secinfo *vi;
1100 const struct btf_var *var_extra;
1101 const struct btf_member *m;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001102 const char *map_name;
1103 struct bpf_map *map;
1104 int vlen, i;
1105
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001106 vi = btf_var_secinfos(sec) + var_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001107 var = btf__type_by_id(obj->btf, vi->type);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001108 var_extra = btf_var(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001109 map_name = btf__name_by_offset(obj->btf, var->name_off);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001110 vlen = btf_vlen(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001111
1112 if (map_name == NULL || map_name[0] == '\0') {
1113 pr_warning("map #%d: empty name.\n", var_idx);
1114 return -EINVAL;
1115 }
1116 if ((__u64)vi->offset + vi->size > data->d_size) {
1117 pr_warning("map '%s' BTF data is corrupted.\n", map_name);
1118 return -EINVAL;
1119 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001120 if (!btf_is_var(var)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001121 pr_warning("map '%s': unexpected var kind %u.\n",
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001122 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001123 return -EINVAL;
1124 }
1125 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1126 var_extra->linkage != BTF_VAR_STATIC) {
1127 pr_warning("map '%s': unsupported var linkage %u.\n",
1128 map_name, var_extra->linkage);
1129 return -EOPNOTSUPP;
1130 }
1131
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001132 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001133 if (!btf_is_struct(def)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001134 pr_warning("map '%s': unexpected def kind %u.\n",
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001135 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001136 return -EINVAL;
1137 }
1138 if (def->size > vi->size) {
1139 pr_warning("map '%s': invalid def size.\n", map_name);
1140 return -EINVAL;
1141 }
1142
1143 map = bpf_object__add_map(obj);
1144 if (IS_ERR(map))
1145 return PTR_ERR(map);
1146 map->name = strdup(map_name);
1147 if (!map->name) {
1148 pr_warning("map '%s': failed to alloc map name.\n", map_name);
1149 return -ENOMEM;
1150 }
1151 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1152 map->def.type = BPF_MAP_TYPE_UNSPEC;
1153 map->sec_idx = sec_idx;
1154 map->sec_offset = vi->offset;
1155 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1156 map_name, map->sec_idx, map->sec_offset);
1157
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001158 vlen = btf_vlen(def);
1159 m = btf_members(def);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001160 for (i = 0; i < vlen; i++, m++) {
1161 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1162
1163 if (!name) {
1164 pr_warning("map '%s': invalid field #%d.\n",
1165 map_name, i);
1166 return -EINVAL;
1167 }
1168 if (strcmp(name, "type") == 0) {
1169 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001170 &map->def.type))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001171 return -EINVAL;
1172 pr_debug("map '%s': found type = %u.\n",
1173 map_name, map->def.type);
1174 } else if (strcmp(name, "max_entries") == 0) {
1175 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001176 &map->def.max_entries))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001177 return -EINVAL;
1178 pr_debug("map '%s': found max_entries = %u.\n",
1179 map_name, map->def.max_entries);
1180 } else if (strcmp(name, "map_flags") == 0) {
1181 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001182 &map->def.map_flags))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001183 return -EINVAL;
1184 pr_debug("map '%s': found map_flags = %u.\n",
1185 map_name, map->def.map_flags);
1186 } else if (strcmp(name, "key_size") == 0) {
1187 __u32 sz;
1188
1189 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001190 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001191 return -EINVAL;
1192 pr_debug("map '%s': found key_size = %u.\n",
1193 map_name, sz);
1194 if (map->def.key_size && map->def.key_size != sz) {
Colin Ian King900de4a2019-06-19 17:27:42 +01001195 pr_warning("map '%s': conflicting key size %u != %u.\n",
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001196 map_name, map->def.key_size, sz);
1197 return -EINVAL;
1198 }
1199 map->def.key_size = sz;
1200 } else if (strcmp(name, "key") == 0) {
1201 __s64 sz;
1202
1203 t = btf__type_by_id(obj->btf, m->type);
1204 if (!t) {
1205 pr_warning("map '%s': key type [%d] not found.\n",
1206 map_name, m->type);
1207 return -EINVAL;
1208 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001209 if (!btf_is_ptr(t)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001210 pr_warning("map '%s': key spec is not PTR: %u.\n",
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001211 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001212 return -EINVAL;
1213 }
1214 sz = btf__resolve_size(obj->btf, t->type);
1215 if (sz < 0) {
1216 pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
1217 map_name, t->type, sz);
1218 return sz;
1219 }
1220 pr_debug("map '%s': found key [%u], sz = %lld.\n",
1221 map_name, t->type, sz);
1222 if (map->def.key_size && map->def.key_size != sz) {
Colin Ian King900de4a2019-06-19 17:27:42 +01001223 pr_warning("map '%s': conflicting key size %u != %lld.\n",
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001224 map_name, map->def.key_size, sz);
1225 return -EINVAL;
1226 }
1227 map->def.key_size = sz;
1228 map->btf_key_type_id = t->type;
1229 } else if (strcmp(name, "value_size") == 0) {
1230 __u32 sz;
1231
1232 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001233 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001234 return -EINVAL;
1235 pr_debug("map '%s': found value_size = %u.\n",
1236 map_name, sz);
1237 if (map->def.value_size && map->def.value_size != sz) {
Colin Ian King900de4a2019-06-19 17:27:42 +01001238 pr_warning("map '%s': conflicting value size %u != %u.\n",
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001239 map_name, map->def.value_size, sz);
1240 return -EINVAL;
1241 }
1242 map->def.value_size = sz;
1243 } else if (strcmp(name, "value") == 0) {
1244 __s64 sz;
1245
1246 t = btf__type_by_id(obj->btf, m->type);
1247 if (!t) {
1248 pr_warning("map '%s': value type [%d] not found.\n",
1249 map_name, m->type);
1250 return -EINVAL;
1251 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001252 if (!btf_is_ptr(t)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001253 pr_warning("map '%s': value spec is not PTR: %u.\n",
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001254 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001255 return -EINVAL;
1256 }
1257 sz = btf__resolve_size(obj->btf, t->type);
1258 if (sz < 0) {
1259 pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
1260 map_name, t->type, sz);
1261 return sz;
1262 }
1263 pr_debug("map '%s': found value [%u], sz = %lld.\n",
1264 map_name, t->type, sz);
1265 if (map->def.value_size && map->def.value_size != sz) {
Colin Ian King900de4a2019-06-19 17:27:42 +01001266 pr_warning("map '%s': conflicting value size %u != %lld.\n",
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001267 map_name, map->def.value_size, sz);
1268 return -EINVAL;
1269 }
1270 map->def.value_size = sz;
1271 map->btf_value_type_id = t->type;
1272 } else {
1273 if (strict) {
1274 pr_warning("map '%s': unknown field '%s'.\n",
1275 map_name, name);
1276 return -ENOTSUP;
1277 }
1278 pr_debug("map '%s': ignoring unknown field '%s'.\n",
1279 map_name, name);
1280 }
1281 }
1282
1283 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
1284 pr_warning("map '%s': map type isn't specified.\n", map_name);
1285 return -EINVAL;
1286 }
1287
1288 return 0;
1289}
1290
1291static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
1292{
1293 const struct btf_type *sec = NULL;
1294 int nr_types, i, vlen, err;
1295 const struct btf_type *t;
1296 const char *name;
1297 Elf_Data *data;
1298 Elf_Scn *scn;
1299
1300 if (obj->efile.btf_maps_shndx < 0)
1301 return 0;
1302
1303 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
1304 if (scn)
1305 data = elf_getdata(scn, NULL);
1306 if (!scn || !data) {
1307 pr_warning("failed to get Elf_Data from map section %d (%s)\n",
1308 obj->efile.maps_shndx, MAPS_ELF_SEC);
1309 return -EINVAL;
1310 }
1311
1312 nr_types = btf__get_nr_types(obj->btf);
1313 for (i = 1; i <= nr_types; i++) {
1314 t = btf__type_by_id(obj->btf, i);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001315 if (!btf_is_datasec(t))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001316 continue;
1317 name = btf__name_by_offset(obj->btf, t->name_off);
1318 if (strcmp(name, MAPS_ELF_SEC) == 0) {
1319 sec = t;
1320 break;
1321 }
1322 }
1323
1324 if (!sec) {
1325 pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
1326 return -ENOENT;
1327 }
1328
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001329 vlen = btf_vlen(sec);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001330 for (i = 0; i < vlen; i++) {
1331 err = bpf_object__init_user_btf_map(obj, sec, i,
1332 obj->efile.btf_maps_shndx,
1333 data, strict);
1334 if (err)
1335 return err;
1336 }
1337
1338 return 0;
1339}
1340
Andrii Nakryiko291ee022019-10-15 11:28:46 -07001341static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps)
Andrii Nakryikobf829272019-06-17 12:26:53 -07001342{
Andrii Nakryiko291ee022019-10-15 11:28:46 -07001343 bool strict = !relaxed_maps;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001344 int err;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001345
Andrii Nakryikobf829272019-06-17 12:26:53 -07001346 err = bpf_object__init_user_maps(obj, strict);
1347 if (err)
1348 return err;
1349
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001350 err = bpf_object__init_user_btf_maps(obj, strict);
1351 if (err)
1352 return err;
1353
Andrii Nakryikobf829272019-06-17 12:26:53 -07001354 err = bpf_object__init_global_data_maps(obj);
1355 if (err)
1356 return err;
1357
1358 if (obj->nr_maps) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001359 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1360 compare_bpf_map);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001361 }
1362 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +00001363}
1364
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01001365static bool section_have_execinstr(struct bpf_object *obj, int idx)
1366{
1367 Elf_Scn *scn;
1368 GElf_Shdr sh;
1369
1370 scn = elf_getscn(obj->efile.elf, idx);
1371 if (!scn)
1372 return false;
1373
1374 if (gelf_getshdr(scn, &sh) != &sh)
1375 return false;
1376
1377 if (sh.sh_flags & SHF_EXECINSTR)
1378 return true;
1379
1380 return false;
1381}
1382
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001383static void bpf_object__sanitize_btf(struct bpf_object *obj)
1384{
1385 bool has_datasec = obj->caps.btf_datasec;
1386 bool has_func = obj->caps.btf_func;
1387 struct btf *btf = obj->btf;
1388 struct btf_type *t;
1389 int i, j, vlen;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001390
1391 if (!obj->btf || (has_func && has_datasec))
1392 return;
1393
1394 for (i = 1; i <= btf__get_nr_types(btf); i++) {
1395 t = (struct btf_type *)btf__type_by_id(btf, i);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001396
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001397 if (!has_datasec && btf_is_var(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001398 /* replace VAR with INT */
1399 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
Andrii Nakryiko1d4126c2019-07-19 12:46:03 -07001400 /*
1401 * using size = 1 is the safest choice, 4 will be too
1402 * big and cause kernel BTF validation failure if
1403 * original variable took less than 4 bytes
1404 */
1405 t->size = 1;
Jakub Kicinski708852d2019-08-13 16:24:57 -07001406 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001407 } else if (!has_datasec && btf_is_datasec(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001408 /* replace DATASEC with STRUCT */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001409 const struct btf_var_secinfo *v = btf_var_secinfos(t);
1410 struct btf_member *m = btf_members(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001411 struct btf_type *vt;
1412 char *name;
1413
1414 name = (char *)btf__name_by_offset(btf, t->name_off);
1415 while (*name) {
1416 if (*name == '.')
1417 *name = '_';
1418 name++;
1419 }
1420
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001421 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001422 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1423 for (j = 0; j < vlen; j++, v++, m++) {
1424 /* order of field assignments is important */
1425 m->offset = v->offset * 8;
1426 m->type = v->type;
1427 /* preserve variable name as member name */
1428 vt = (void *)btf__type_by_id(btf, v->type);
1429 m->name_off = vt->name_off;
1430 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001431 } else if (!has_func && btf_is_func_proto(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001432 /* replace FUNC_PROTO with ENUM */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001433 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001434 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1435 t->size = sizeof(__u32); /* kernel enforced */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001436 } else if (!has_func && btf_is_func(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001437 /* replace FUNC with TYPEDEF */
1438 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1439 }
1440 }
1441}
1442
1443static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1444{
1445 if (!obj->btf_ext)
1446 return;
1447
1448 if (!obj->caps.btf_func) {
1449 btf_ext__free(obj->btf_ext);
1450 obj->btf_ext = NULL;
1451 }
1452}
1453
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001454static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
1455{
1456 return obj->efile.btf_maps_shndx >= 0;
1457}
1458
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001459static int bpf_object__init_btf(struct bpf_object *obj,
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001460 Elf_Data *btf_data,
1461 Elf_Data *btf_ext_data)
1462{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001463 bool btf_required = bpf_object__is_btf_mandatory(obj);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001464 int err = 0;
1465
1466 if (btf_data) {
1467 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1468 if (IS_ERR(obj->btf)) {
1469 pr_warning("Error loading ELF section %s: %d.\n",
1470 BTF_ELF_SEC, err);
1471 goto out;
1472 }
1473 err = btf__finalize_data(obj, obj->btf);
1474 if (err) {
1475 pr_warning("Error finalizing %s: %d.\n",
1476 BTF_ELF_SEC, err);
1477 goto out;
1478 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001479 }
1480 if (btf_ext_data) {
1481 if (!obj->btf) {
1482 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1483 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1484 goto out;
1485 }
1486 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1487 btf_ext_data->d_size);
1488 if (IS_ERR(obj->btf_ext)) {
1489 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1490 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
1491 obj->btf_ext = NULL;
1492 goto out;
1493 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001494 }
1495out:
1496 if (err || IS_ERR(obj->btf)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001497 if (btf_required)
1498 err = err ? : PTR_ERR(obj->btf);
1499 else
1500 err = 0;
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001501 if (!IS_ERR_OR_NULL(obj->btf))
1502 btf__free(obj->btf);
1503 obj->btf = NULL;
1504 }
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001505 if (btf_required && !obj->btf) {
1506 pr_warning("BTF is required, but is missing or corrupted.\n");
1507 return err == 0 ? -ENOENT : err;
1508 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001509 return 0;
1510}
1511
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001512static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1513{
1514 int err = 0;
1515
1516 if (!obj->btf)
1517 return 0;
1518
1519 bpf_object__sanitize_btf(obj);
1520 bpf_object__sanitize_btf_ext(obj);
1521
1522 err = btf__load(obj->btf);
1523 if (err) {
1524 pr_warning("Error loading %s into kernel: %d.\n",
1525 BTF_ELF_SEC, err);
1526 btf__free(obj->btf);
1527 obj->btf = NULL;
Andrii Nakryiko04efe592019-07-19 12:32:42 -07001528 /* btf_ext can't exist without btf, so free it as well */
1529 if (obj->btf_ext) {
1530 btf_ext__free(obj->btf_ext);
1531 obj->btf_ext = NULL;
1532 }
1533
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001534 if (bpf_object__is_btf_mandatory(obj))
1535 return err;
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001536 }
1537 return 0;
1538}
1539
Andrii Nakryiko291ee022019-10-15 11:28:46 -07001540static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps)
Wang Nan29603662015-07-01 02:13:56 +00001541{
1542 Elf *elf = obj->efile.elf;
1543 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001544 Elf_Data *btf_ext_data = NULL;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001545 Elf_Data *btf_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +00001546 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +00001547 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +00001548
1549 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1550 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001551 pr_warning("failed to get e_shstrndx from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001552 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001553 }
1554
1555 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1556 char *name;
1557 GElf_Shdr sh;
1558 Elf_Data *data;
1559
1560 idx++;
1561 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001562 pr_warning("failed to get section(%d) header from %s\n",
1563 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001564 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001565 }
1566
1567 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1568 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001569 pr_warning("failed to get section(%d) name from %s\n",
1570 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001571 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001572 }
1573
1574 data = elf_getdata(scn, 0);
1575 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001576 pr_warning("failed to get section(%d) data from %s(%s)\n",
1577 idx, name, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001578 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001579 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001580 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1581 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +00001582 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1583 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +00001584
Daniel Borkmann1713d682019-04-09 23:20:14 +02001585 if (strcmp(name, "license") == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001586 err = bpf_object__init_license(obj,
1587 data->d_buf,
1588 data->d_size);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001589 if (err)
1590 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001591 } else if (strcmp(name, "version") == 0) {
John Fastabend54b86252019-10-18 07:41:26 -07001592 err = bpf_object__init_kversion(obj,
1593 data->d_buf,
1594 data->d_size);
1595 if (err)
1596 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001597 } else if (strcmp(name, "maps") == 0) {
Wang Nan666810e2016-01-25 09:55:49 +00001598 obj->efile.maps_shndx = idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001599 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
1600 obj->efile.btf_maps_shndx = idx;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001601 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
1602 btf_data = data;
Yonghong Song2993e052018-11-19 15:29:16 -08001603 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001604 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001605 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +00001606 if (obj->efile.symbols) {
1607 pr_warning("bpf: multiple SYMTAB in %s\n",
1608 obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001609 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00001610 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001611 obj->efile.symbols = data;
1612 obj->efile.strtabidx = sh.sh_link;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001613 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1614 if (sh.sh_flags & SHF_EXECINSTR) {
1615 if (strcmp(name, ".text") == 0)
1616 obj->efile.text_shndx = idx;
1617 err = bpf_object__add_program(obj, data->d_buf,
1618 data->d_size, name, idx);
1619 if (err) {
1620 char errmsg[STRERR_BUFSIZE];
1621 char *cp = libbpf_strerror_r(-err, errmsg,
1622 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001623
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001624 pr_warning("failed to alloc program %s (%s): %s",
1625 name, obj->path, cp);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001626 return err;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001627 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001628 } else if (strcmp(name, ".data") == 0) {
1629 obj->efile.data = data;
1630 obj->efile.data_shndx = idx;
1631 } else if (strcmp(name, ".rodata") == 0) {
1632 obj->efile.rodata = data;
1633 obj->efile.rodata_shndx = idx;
1634 } else {
1635 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nana5b8bd42015-07-01 02:14:00 +00001636 }
Wang Nanb62f06e2015-07-01 02:14:01 +00001637 } else if (sh.sh_type == SHT_REL) {
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001638 int nr_reloc = obj->efile.nr_reloc;
Wang Nanb62f06e2015-07-01 02:14:01 +00001639 void *reloc = obj->efile.reloc;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01001640 int sec = sh.sh_info; /* points to other section */
1641
1642 /* Only do relo for section with exec instructions */
1643 if (!section_have_execinstr(obj, sec)) {
1644 pr_debug("skip relo %s(%d) for section(%d)\n",
1645 name, idx, sec);
1646 continue;
1647 }
Wang Nanb62f06e2015-07-01 02:14:01 +00001648
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001649 reloc = reallocarray(reloc, nr_reloc + 1,
Jakub Kicinski531b0142018-07-10 14:43:05 -07001650 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +00001651 if (!reloc) {
1652 pr_warning("realloc failed\n");
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001653 return -ENOMEM;
Wang Nanb62f06e2015-07-01 02:14:01 +00001654 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001655
1656 obj->efile.reloc = reloc;
1657 obj->efile.nr_reloc++;
1658
1659 obj->efile.reloc[nr_reloc].shdr = sh;
1660 obj->efile.reloc[nr_reloc].data = data;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001661 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1662 obj->efile.bss = data;
1663 obj->efile.bss_shndx = idx;
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001664 } else {
1665 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +00001666 }
Wang Nan29603662015-07-01 02:13:56 +00001667 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001668
Wang Nan77ba9a52015-12-08 02:25:30 +00001669 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1670 pr_warning("Corrupted ELF file: index of strtab invalid\n");
Andrii Nakryikof1021542019-05-29 10:36:07 -07001671 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00001672 }
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001673 err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001674 if (!err)
Andrii Nakryiko291ee022019-10-15 11:28:46 -07001675 err = bpf_object__init_maps(obj, relaxed_maps);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001676 if (!err)
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001677 err = bpf_object__sanitize_and_load_btf(obj);
1678 if (!err)
Andrii Nakryikobf829272019-06-17 12:26:53 -07001679 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +00001680 return err;
1681}
1682
Wang Nan34090912015-07-01 02:14:02 +00001683static struct bpf_program *
1684bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1685{
1686 struct bpf_program *prog;
1687 size_t i;
1688
1689 for (i = 0; i < obj->nr_programs; i++) {
1690 prog = &obj->programs[i];
1691 if (prog->idx == idx)
1692 return prog;
1693 }
1694 return NULL;
1695}
1696
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07001697struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07001698bpf_object__find_program_by_title(const struct bpf_object *obj,
1699 const char *title)
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07001700{
1701 struct bpf_program *pos;
1702
1703 bpf_object__for_each_program(pos, obj) {
1704 if (pos->section_name && !strcmp(pos->section_name, title))
1705 return pos;
1706 }
1707 return NULL;
1708}
1709
Daniel Borkmannd8599002019-04-09 23:20:13 +02001710static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1711 int shndx)
1712{
1713 return shndx == obj->efile.data_shndx ||
1714 shndx == obj->efile.bss_shndx ||
1715 shndx == obj->efile.rodata_shndx;
1716}
1717
1718static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1719 int shndx)
1720{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001721 return shndx == obj->efile.maps_shndx ||
1722 shndx == obj->efile.btf_maps_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001723}
1724
1725static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1726 int shndx)
1727{
1728 return shndx == obj->efile.text_shndx ||
1729 bpf_object__shndx_is_maps(obj, shndx) ||
1730 bpf_object__shndx_is_data(obj, shndx);
1731}
1732
1733static enum libbpf_map_type
1734bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1735{
1736 if (shndx == obj->efile.data_shndx)
1737 return LIBBPF_MAP_DATA;
1738 else if (shndx == obj->efile.bss_shndx)
1739 return LIBBPF_MAP_BSS;
1740 else if (shndx == obj->efile.rodata_shndx)
1741 return LIBBPF_MAP_RODATA;
1742 else
1743 return LIBBPF_MAP_UNSPEC;
1744}
1745
Wang Nan34090912015-07-01 02:14:02 +00001746static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001747bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1748 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +00001749{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001750 Elf_Data *symbols = obj->efile.symbols;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001751 struct bpf_map *maps = obj->maps;
1752 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +00001753 int i, nrels;
1754
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001755 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
Wang Nan34090912015-07-01 02:14:02 +00001756 nrels = shdr->sh_size / shdr->sh_entsize;
1757
1758 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1759 if (!prog->reloc_desc) {
1760 pr_warning("failed to alloc memory in relocation\n");
1761 return -ENOMEM;
1762 }
1763 prog->nr_reloc = nrels;
1764
1765 for (i = 0; i < nrels; i++) {
Wang Nan34090912015-07-01 02:14:02 +00001766 struct bpf_insn *insns = prog->insns;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001767 enum libbpf_map_type type;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001768 unsigned int insn_idx;
1769 unsigned int shdr_idx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001770 const char *name;
Wang Nan34090912015-07-01 02:14:02 +00001771 size_t map_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001772 GElf_Sym sym;
1773 GElf_Rel rel;
Wang Nan34090912015-07-01 02:14:02 +00001774
1775 if (!gelf_getrel(data, i, &rel)) {
1776 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001777 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001778 }
1779
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001780 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
Wang Nan34090912015-07-01 02:14:02 +00001781 pr_warning("relocation: symbol %"PRIx64" not found\n",
1782 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001783 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001784 }
1785
Daniel Borkmannd8599002019-04-09 23:20:13 +02001786 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1787 sym.st_name) ? : "<?>";
1788
1789 pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1790 (long long) (rel.r_info >> 32),
1791 (long long) sym.st_value, sym.st_name, name);
1792
1793 shdr_idx = sym.st_shndx;
Andrii Nakryikof2a3e4e2019-07-23 14:11:33 -07001794 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1795 pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
1796 insn_idx, shdr_idx);
1797
1798 if (shdr_idx >= SHN_LORESERVE) {
1799 pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
1800 name, shdr_idx, insn_idx,
1801 insns[insn_idx].code);
1802 return -LIBBPF_ERRNO__RELOC;
1803 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001804 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1805 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
1806 prog->section_name, shdr_idx);
Wang Nan666810e2016-01-25 09:55:49 +00001807 return -LIBBPF_ERRNO__RELOC;
1808 }
1809
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001810 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1811 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1812 pr_warning("incorrect bpf_call opcode\n");
1813 return -LIBBPF_ERRNO__RELOC;
1814 }
1815 prog->reloc_desc[i].type = RELO_CALL;
1816 prog->reloc_desc[i].insn_idx = insn_idx;
1817 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001818 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001819 continue;
1820 }
1821
Wang Nan34090912015-07-01 02:14:02 +00001822 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1823 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1824 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001825 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001826 }
1827
Daniel Borkmannd8599002019-04-09 23:20:13 +02001828 if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1829 bpf_object__shndx_is_data(obj, shdr_idx)) {
1830 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001831 if (type != LIBBPF_MAP_UNSPEC) {
1832 if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1833 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1834 name, insn_idx, insns[insn_idx].code);
1835 return -LIBBPF_ERRNO__RELOC;
1836 }
1837 if (!obj->caps.global_data) {
1838 pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1839 name, insn_idx);
1840 return -LIBBPF_ERRNO__RELOC;
1841 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001842 }
1843
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001844 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001845 if (maps[map_idx].libbpf_type != type)
1846 continue;
1847 if (type != LIBBPF_MAP_UNSPEC ||
Andrii Nakryikodb488142019-06-17 12:26:54 -07001848 (maps[map_idx].sec_idx == sym.st_shndx &&
1849 maps[map_idx].sec_offset == sym.st_value)) {
1850 pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
1851 map_idx, maps[map_idx].name,
1852 maps[map_idx].sec_idx,
1853 maps[map_idx].sec_offset,
1854 insn_idx);
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001855 break;
1856 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001857 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001858
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001859 if (map_idx >= nr_maps) {
Andrii Nakryiko76e10222019-05-29 10:36:10 -07001860 pr_warning("bpf relocation: map_idx %d larger than %d\n",
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001861 (int)map_idx, (int)nr_maps - 1);
1862 return -LIBBPF_ERRNO__RELOC;
1863 }
Wang Nan34090912015-07-01 02:14:02 +00001864
Daniel Borkmannd8599002019-04-09 23:20:13 +02001865 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1866 RELO_DATA : RELO_LD64;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001867 prog->reloc_desc[i].insn_idx = insn_idx;
1868 prog->reloc_desc[i].map_idx = map_idx;
1869 }
Wang Nan34090912015-07-01 02:14:02 +00001870 }
1871 return 0;
1872}
1873
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001874static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001875{
1876 struct bpf_map_def *def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001877 __u32 key_type_id = 0, value_type_id = 0;
Yonghong Song96408c42019-02-04 11:00:58 -08001878 int ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001879
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001880 /* if it's BTF-defined map, we don't need to search for type IDs */
1881 if (map->sec_idx == obj->efile.btf_maps_shndx)
1882 return 0;
1883
Daniel Borkmannd8599002019-04-09 23:20:13 +02001884 if (!bpf_map__is_internal(map)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001885 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
Daniel Borkmannd8599002019-04-09 23:20:13 +02001886 def->value_size, &key_type_id,
1887 &value_type_id);
1888 } else {
1889 /*
1890 * LLVM annotates global data differently in BTF, that is,
1891 * only as '.data', '.bss' or '.rodata'.
1892 */
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001893 ret = btf__find_by_name(obj->btf,
Daniel Borkmannd8599002019-04-09 23:20:13 +02001894 libbpf_type_to_btf_name[map->libbpf_type]);
1895 }
1896 if (ret < 0)
Yonghong Song96408c42019-02-04 11:00:58 -08001897 return ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001898
Yonghong Song96408c42019-02-04 11:00:58 -08001899 map->btf_key_type_id = key_type_id;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001900 map->btf_value_type_id = bpf_map__is_internal(map) ?
1901 ret : value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001902 return 0;
1903}
1904
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001905int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1906{
1907 struct bpf_map_info info = {};
1908 __u32 len = sizeof(info);
1909 int new_fd, err;
1910 char *new_name;
1911
1912 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1913 if (err)
1914 return err;
1915
1916 new_name = strdup(info.name);
1917 if (!new_name)
1918 return -errno;
1919
1920 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1921 if (new_fd < 0)
1922 goto err_free_new_name;
1923
1924 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1925 if (new_fd < 0)
1926 goto err_close_new_fd;
1927
1928 err = zclose(map->fd);
1929 if (err)
1930 goto err_close_new_fd;
1931 free(map->name);
1932
1933 map->fd = new_fd;
1934 map->name = new_name;
1935 map->def.type = info.type;
1936 map->def.key_size = info.key_size;
1937 map->def.value_size = info.value_size;
1938 map->def.max_entries = info.max_entries;
1939 map->def.map_flags = info.map_flags;
1940 map->btf_key_type_id = info.btf_key_type_id;
1941 map->btf_value_type_id = info.btf_value_type_id;
1942
1943 return 0;
1944
1945err_close_new_fd:
1946 close(new_fd);
1947err_free_new_name:
1948 free(new_name);
1949 return -errno;
1950}
1951
Andrey Ignatov1a11a4c2019-02-14 15:01:42 -08001952int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1953{
1954 if (!map || !max_entries)
1955 return -EINVAL;
1956
1957 /* If map already created, its attributes can't be changed. */
1958 if (map->fd >= 0)
1959 return -EBUSY;
1960
1961 map->def.max_entries = max_entries;
1962
1963 return 0;
1964}
1965
Wang Nan52d33522015-07-01 02:14:04 +00001966static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08001967bpf_object__probe_name(struct bpf_object *obj)
1968{
1969 struct bpf_load_program_attr attr;
1970 char *cp, errmsg[STRERR_BUFSIZE];
1971 struct bpf_insn insns[] = {
1972 BPF_MOV64_IMM(BPF_REG_0, 0),
1973 BPF_EXIT_INSN(),
1974 };
1975 int ret;
1976
1977 /* make sure basic loading works */
1978
1979 memset(&attr, 0, sizeof(attr));
1980 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1981 attr.insns = insns;
1982 attr.insns_cnt = ARRAY_SIZE(insns);
1983 attr.license = "GPL";
1984
1985 ret = bpf_load_program_xattr(&attr, NULL, 0);
1986 if (ret < 0) {
1987 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1988 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1989 __func__, cp, errno);
1990 return -errno;
1991 }
1992 close(ret);
1993
1994 /* now try the same program, but with the name */
1995
1996 attr.name = "test";
1997 ret = bpf_load_program_xattr(&attr, NULL, 0);
1998 if (ret >= 0) {
1999 obj->caps.name = 1;
2000 close(ret);
2001 }
2002
2003 return 0;
2004}
2005
2006static int
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002007bpf_object__probe_global_data(struct bpf_object *obj)
2008{
2009 struct bpf_load_program_attr prg_attr;
2010 struct bpf_create_map_attr map_attr;
2011 char *cp, errmsg[STRERR_BUFSIZE];
2012 struct bpf_insn insns[] = {
2013 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
2014 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
2015 BPF_MOV64_IMM(BPF_REG_0, 0),
2016 BPF_EXIT_INSN(),
2017 };
2018 int ret, map;
2019
2020 memset(&map_attr, 0, sizeof(map_attr));
2021 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
2022 map_attr.key_size = sizeof(int);
2023 map_attr.value_size = 32;
2024 map_attr.max_entries = 1;
2025
2026 map = bpf_create_map_xattr(&map_attr);
2027 if (map < 0) {
2028 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2029 pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
2030 __func__, cp, errno);
2031 return -errno;
2032 }
2033
2034 insns[0].imm = map;
2035
2036 memset(&prg_attr, 0, sizeof(prg_attr));
2037 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2038 prg_attr.insns = insns;
2039 prg_attr.insns_cnt = ARRAY_SIZE(insns);
2040 prg_attr.license = "GPL";
2041
2042 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
2043 if (ret >= 0) {
2044 obj->caps.global_data = 1;
2045 close(ret);
2046 }
2047
2048 close(map);
2049 return 0;
2050}
2051
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002052static int bpf_object__probe_btf_func(struct bpf_object *obj)
2053{
2054 const char strs[] = "\0int\0x\0a";
2055 /* void x(int a) {} */
2056 __u32 types[] = {
2057 /* int */
2058 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2059 /* FUNC_PROTO */ /* [2] */
2060 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
2061 BTF_PARAM_ENC(7, 1),
2062 /* FUNC x */ /* [3] */
2063 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
2064 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02002065 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002066
Michal Rosteckicfd49212019-05-29 20:31:09 +02002067 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2068 strs, sizeof(strs));
2069 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002070 obj->caps.btf_func = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02002071 close(btf_fd);
2072 return 1;
2073 }
2074
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002075 return 0;
2076}
2077
2078static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
2079{
2080 const char strs[] = "\0x\0.data";
2081 /* static int a; */
2082 __u32 types[] = {
2083 /* int */
2084 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2085 /* VAR x */ /* [2] */
2086 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
2087 BTF_VAR_STATIC,
2088 /* DATASEC val */ /* [3] */
2089 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
2090 BTF_VAR_SECINFO_ENC(2, 0, 4),
2091 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02002092 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002093
Michal Rosteckicfd49212019-05-29 20:31:09 +02002094 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2095 strs, sizeof(strs));
2096 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002097 obj->caps.btf_datasec = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02002098 close(btf_fd);
2099 return 1;
2100 }
2101
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002102 return 0;
2103}
2104
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002105static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002106bpf_object__probe_caps(struct bpf_object *obj)
2107{
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002108 int (*probe_fn[])(struct bpf_object *obj) = {
2109 bpf_object__probe_name,
2110 bpf_object__probe_global_data,
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002111 bpf_object__probe_btf_func,
2112 bpf_object__probe_btf_datasec,
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002113 };
2114 int i, ret;
2115
2116 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
2117 ret = probe_fn[i](obj);
2118 if (ret < 0)
Stanislav Fomichev15ea1642019-05-14 20:38:49 -07002119 pr_debug("Probe #%d failed with %d.\n", i, ret);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002120 }
2121
2122 return 0;
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002123}
2124
2125static int
Daniel Borkmannd8599002019-04-09 23:20:13 +02002126bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
2127{
2128 char *cp, errmsg[STRERR_BUFSIZE];
2129 int err, zero = 0;
2130 __u8 *data;
2131
2132 /* Nothing to do here since kernel already zero-initializes .bss map. */
2133 if (map->libbpf_type == LIBBPF_MAP_BSS)
2134 return 0;
2135
2136 data = map->libbpf_type == LIBBPF_MAP_DATA ?
2137 obj->sections.data : obj->sections.rodata;
2138
2139 err = bpf_map_update_elem(map->fd, &zero, data, 0);
2140 /* Freeze .rodata map as read-only from syscall side. */
2141 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
2142 err = bpf_map_freeze(map->fd);
2143 if (err) {
2144 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2145 pr_warning("Error freezing map(%s) as read-only: %s\n",
2146 map->name, cp);
2147 err = 0;
2148 }
2149 }
2150 return err;
2151}
2152
2153static int
Wang Nan52d33522015-07-01 02:14:04 +00002154bpf_object__create_maps(struct bpf_object *obj)
2155{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002156 struct bpf_create_map_attr create_attr = {};
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002157 int nr_cpus = 0;
Wang Nan52d33522015-07-01 02:14:04 +00002158 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002159 int err;
Wang Nan52d33522015-07-01 02:14:04 +00002160
Wang Nan9d759a92015-11-27 08:47:35 +00002161 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002162 struct bpf_map *map = &obj->maps[i];
2163 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002164 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002165 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00002166
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002167 if (map->fd >= 0) {
2168 pr_debug("skip map create (preset) %s: fd=%d\n",
2169 map->name, map->fd);
2170 continue;
2171 }
2172
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08002173 if (obj->caps.name)
2174 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07002175 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002176 create_attr.map_type = def->type;
2177 create_attr.map_flags = def->map_flags;
2178 create_attr.key_size = def->key_size;
2179 create_attr.value_size = def->value_size;
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002180 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
2181 !def->max_entries) {
2182 if (!nr_cpus)
2183 nr_cpus = libbpf_num_possible_cpus();
2184 if (nr_cpus < 0) {
2185 pr_warning("failed to determine number of system CPUs: %d\n",
2186 nr_cpus);
2187 err = nr_cpus;
2188 goto err_out;
2189 }
2190 pr_debug("map '%s': setting size to %d\n",
2191 map->name, nr_cpus);
2192 create_attr.max_entries = nr_cpus;
2193 } else {
2194 create_attr.max_entries = def->max_entries;
2195 }
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002196 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002197 create_attr.btf_key_type_id = 0;
2198 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08002199 if (bpf_map_type__is_map_in_map(def->type) &&
2200 map->inner_map_fd >= 0)
2201 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002202
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002203 if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002204 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002205 create_attr.btf_key_type_id = map->btf_key_type_id;
2206 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002207 }
2208
2209 *pfd = bpf_create_map_xattr(&create_attr);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002210 if (*pfd < 0 && (create_attr.btf_key_type_id ||
2211 create_attr.btf_value_type_id)) {
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002212 err = -errno;
2213 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002214 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002215 map->name, cp, err);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002216 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002217 create_attr.btf_key_type_id = 0;
2218 create_attr.btf_value_type_id = 0;
2219 map->btf_key_type_id = 0;
2220 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002221 *pfd = bpf_create_map_xattr(&create_attr);
2222 }
2223
Wang Nan52d33522015-07-01 02:14:04 +00002224 if (*pfd < 0) {
2225 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00002226
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002227 err = -errno;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002228err_out:
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002229 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2230 pr_warning("failed to create map (name: '%s'): %s(%d)\n",
2231 map->name, cp, err);
Wang Nan52d33522015-07-01 02:14:04 +00002232 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00002233 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00002234 return err;
2235 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02002236
2237 if (bpf_map__is_internal(map)) {
2238 err = bpf_object__populate_internal_map(obj, map);
2239 if (err < 0) {
2240 zclose(*pfd);
2241 goto err_out;
2242 }
2243 }
2244
Andrii Nakryiko76e10222019-05-29 10:36:10 -07002245 pr_debug("created map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00002246 }
2247
Wang Nan52d33522015-07-01 02:14:04 +00002248 return 0;
2249}
2250
Wang Nan8a47a6c2015-07-01 02:14:05 +00002251static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002252check_btf_ext_reloc_err(struct bpf_program *prog, int err,
2253 void *btf_prog_info, const char *info_name)
2254{
2255 if (err != -ENOENT) {
2256 pr_warning("Error in loading %s for sec %s.\n",
2257 info_name, prog->section_name);
2258 return err;
2259 }
2260
2261 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
2262
2263 if (btf_prog_info) {
2264 /*
2265 * Some info has already been found but has problem
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002266 * in the last btf_ext reloc. Must have to error out.
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002267 */
2268 pr_warning("Error in relocating %s for sec %s.\n",
2269 info_name, prog->section_name);
2270 return err;
2271 }
2272
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002273 /* Have problem loading the very first info. Ignore the rest. */
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002274 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
2275 info_name, prog->section_name, info_name);
2276 return 0;
2277}
2278
2279static int
2280bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2281 const char *section_name, __u32 insn_offset)
2282{
2283 int err;
2284
2285 if (!insn_offset || prog->func_info) {
2286 /*
2287 * !insn_offset => main program
2288 *
2289 * For sub prog, the main program's func_info has to
2290 * be loaded first (i.e. prog->func_info != NULL)
2291 */
2292 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
2293 section_name, insn_offset,
2294 &prog->func_info,
2295 &prog->func_info_cnt);
2296 if (err)
2297 return check_btf_ext_reloc_err(prog, err,
2298 prog->func_info,
2299 "bpf_func_info");
2300
2301 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
2302 }
2303
Martin KaFai Lau3d650142018-12-07 16:42:31 -08002304 if (!insn_offset || prog->line_info) {
2305 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
2306 section_name, insn_offset,
2307 &prog->line_info,
2308 &prog->line_info_cnt);
2309 if (err)
2310 return check_btf_ext_reloc_err(prog, err,
2311 prog->line_info,
2312 "bpf_line_info");
2313
2314 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2315 }
2316
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002317 return 0;
2318}
2319
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002320#define BPF_CORE_SPEC_MAX_LEN 64
2321
2322/* represents BPF CO-RE field or array element accessor */
2323struct bpf_core_accessor {
2324 __u32 type_id; /* struct/union type or array element type */
2325 __u32 idx; /* field index or array index */
2326 const char *name; /* field name or NULL for array accessor */
2327};
2328
2329struct bpf_core_spec {
2330 const struct btf *btf;
2331 /* high-level spec: named fields and array indices only */
2332 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
2333 /* high-level spec length */
2334 int len;
2335 /* raw, low-level spec: 1-to-1 with accessor spec string */
2336 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
2337 /* raw spec length */
2338 int raw_len;
2339 /* field byte offset represented by spec */
2340 __u32 offset;
2341};
2342
2343static bool str_is_empty(const char *s)
2344{
2345 return !s || !s[0];
2346}
2347
2348/*
Andrii Nakryiko511bb002019-10-15 11:28:45 -07002349 * Turn bpf_field_reloc into a low- and high-level spec representation,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002350 * validating correctness along the way, as well as calculating resulting
2351 * field offset (in bytes), specified by accessor string. Low-level spec
2352 * captures every single level of nestedness, including traversing anonymous
2353 * struct/union members. High-level one only captures semantically meaningful
2354 * "turning points": named fields and array indicies.
2355 * E.g., for this case:
2356 *
2357 * struct sample {
2358 * int __unimportant;
2359 * struct {
2360 * int __1;
2361 * int __2;
2362 * int a[7];
2363 * };
2364 * };
2365 *
2366 * struct sample *s = ...;
2367 *
2368 * int x = &s->a[3]; // access string = '0:1:2:3'
2369 *
2370 * Low-level spec has 1:1 mapping with each element of access string (it's
2371 * just a parsed access string representation): [0, 1, 2, 3].
2372 *
2373 * High-level spec will capture only 3 points:
2374 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
2375 * - field 'a' access (corresponds to '2' in low-level spec);
2376 * - array element #3 access (corresponds to '3' in low-level spec).
2377 *
2378 */
2379static int bpf_core_spec_parse(const struct btf *btf,
2380 __u32 type_id,
2381 const char *spec_str,
2382 struct bpf_core_spec *spec)
2383{
2384 int access_idx, parsed_len, i;
2385 const struct btf_type *t;
2386 const char *name;
2387 __u32 id;
2388 __s64 sz;
2389
2390 if (str_is_empty(spec_str) || *spec_str == ':')
2391 return -EINVAL;
2392
2393 memset(spec, 0, sizeof(*spec));
2394 spec->btf = btf;
2395
2396 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
2397 while (*spec_str) {
2398 if (*spec_str == ':')
2399 ++spec_str;
2400 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
2401 return -EINVAL;
2402 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2403 return -E2BIG;
2404 spec_str += parsed_len;
2405 spec->raw_spec[spec->raw_len++] = access_idx;
2406 }
2407
2408 if (spec->raw_len == 0)
2409 return -EINVAL;
2410
2411 /* first spec value is always reloc type array index */
2412 t = skip_mods_and_typedefs(btf, type_id, &id);
2413 if (!t)
2414 return -EINVAL;
2415
2416 access_idx = spec->raw_spec[0];
2417 spec->spec[0].type_id = id;
2418 spec->spec[0].idx = access_idx;
2419 spec->len++;
2420
2421 sz = btf__resolve_size(btf, id);
2422 if (sz < 0)
2423 return sz;
2424 spec->offset = access_idx * sz;
2425
2426 for (i = 1; i < spec->raw_len; i++) {
2427 t = skip_mods_and_typedefs(btf, id, &id);
2428 if (!t)
2429 return -EINVAL;
2430
2431 access_idx = spec->raw_spec[i];
2432
2433 if (btf_is_composite(t)) {
2434 const struct btf_member *m;
2435 __u32 offset;
2436
2437 if (access_idx >= btf_vlen(t))
2438 return -EINVAL;
2439 if (btf_member_bitfield_size(t, access_idx))
2440 return -EINVAL;
2441
2442 offset = btf_member_bit_offset(t, access_idx);
2443 if (offset % 8)
2444 return -EINVAL;
2445 spec->offset += offset / 8;
2446
2447 m = btf_members(t) + access_idx;
2448 if (m->name_off) {
2449 name = btf__name_by_offset(btf, m->name_off);
2450 if (str_is_empty(name))
2451 return -EINVAL;
2452
2453 spec->spec[spec->len].type_id = id;
2454 spec->spec[spec->len].idx = access_idx;
2455 spec->spec[spec->len].name = name;
2456 spec->len++;
2457 }
2458
2459 id = m->type;
2460 } else if (btf_is_array(t)) {
2461 const struct btf_array *a = btf_array(t);
2462
2463 t = skip_mods_and_typedefs(btf, a->type, &id);
2464 if (!t || access_idx >= a->nelems)
2465 return -EINVAL;
2466
2467 spec->spec[spec->len].type_id = id;
2468 spec->spec[spec->len].idx = access_idx;
2469 spec->len++;
2470
2471 sz = btf__resolve_size(btf, id);
2472 if (sz < 0)
2473 return sz;
2474 spec->offset += access_idx * sz;
2475 } else {
2476 pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
2477 type_id, spec_str, i, id, btf_kind(t));
2478 return -EINVAL;
2479 }
2480 }
2481
2482 return 0;
2483}
2484
2485static bool bpf_core_is_flavor_sep(const char *s)
2486{
2487 /* check X___Y name pattern, where X and Y are not underscores */
2488 return s[0] != '_' && /* X */
2489 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
2490 s[4] != '_'; /* Y */
2491}
2492
2493/* Given 'some_struct_name___with_flavor' return the length of a name prefix
2494 * before last triple underscore. Struct name part after last triple
2495 * underscore is ignored by BPF CO-RE relocation during relocation matching.
2496 */
2497static size_t bpf_core_essential_name_len(const char *name)
2498{
2499 size_t n = strlen(name);
2500 int i;
2501
2502 for (i = n - 5; i >= 0; i--) {
2503 if (bpf_core_is_flavor_sep(name + i))
2504 return i + 1;
2505 }
2506 return n;
2507}
2508
2509/* dynamically sized list of type IDs */
2510struct ids_vec {
2511 __u32 *data;
2512 int len;
2513};
2514
2515static void bpf_core_free_cands(struct ids_vec *cand_ids)
2516{
2517 free(cand_ids->data);
2518 free(cand_ids);
2519}
2520
2521static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
2522 __u32 local_type_id,
2523 const struct btf *targ_btf)
2524{
2525 size_t local_essent_len, targ_essent_len;
2526 const char *local_name, *targ_name;
2527 const struct btf_type *t;
2528 struct ids_vec *cand_ids;
2529 __u32 *new_ids;
2530 int i, err, n;
2531
2532 t = btf__type_by_id(local_btf, local_type_id);
2533 if (!t)
2534 return ERR_PTR(-EINVAL);
2535
2536 local_name = btf__name_by_offset(local_btf, t->name_off);
2537 if (str_is_empty(local_name))
2538 return ERR_PTR(-EINVAL);
2539 local_essent_len = bpf_core_essential_name_len(local_name);
2540
2541 cand_ids = calloc(1, sizeof(*cand_ids));
2542 if (!cand_ids)
2543 return ERR_PTR(-ENOMEM);
2544
2545 n = btf__get_nr_types(targ_btf);
2546 for (i = 1; i <= n; i++) {
2547 t = btf__type_by_id(targ_btf, i);
2548 targ_name = btf__name_by_offset(targ_btf, t->name_off);
2549 if (str_is_empty(targ_name))
2550 continue;
2551
2552 targ_essent_len = bpf_core_essential_name_len(targ_name);
2553 if (targ_essent_len != local_essent_len)
2554 continue;
2555
2556 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
2557 pr_debug("[%d] %s: found candidate [%d] %s\n",
2558 local_type_id, local_name, i, targ_name);
2559 new_ids = realloc(cand_ids->data, cand_ids->len + 1);
2560 if (!new_ids) {
2561 err = -ENOMEM;
2562 goto err_out;
2563 }
2564 cand_ids->data = new_ids;
2565 cand_ids->data[cand_ids->len++] = i;
2566 }
2567 }
2568 return cand_ids;
2569err_out:
2570 bpf_core_free_cands(cand_ids);
2571 return ERR_PTR(err);
2572}
2573
2574/* Check two types for compatibility, skipping const/volatile/restrict and
2575 * typedefs, to ensure we are relocating offset to the compatible entities:
2576 * - any two STRUCTs/UNIONs are compatible and can be mixed;
2577 * - any two FWDs are compatible;
2578 * - any two PTRs are always compatible;
2579 * - for ENUMs, check sizes, names are ignored;
2580 * - for INT, size and bitness should match, signedness is ignored;
2581 * - for ARRAY, dimensionality is ignored, element types are checked for
2582 * compatibility recursively;
2583 * - everything else shouldn't be ever a target of relocation.
2584 * These rules are not set in stone and probably will be adjusted as we get
2585 * more experience with using BPF CO-RE relocations.
2586 */
2587static int bpf_core_fields_are_compat(const struct btf *local_btf,
2588 __u32 local_id,
2589 const struct btf *targ_btf,
2590 __u32 targ_id)
2591{
2592 const struct btf_type *local_type, *targ_type;
2593
2594recur:
2595 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
2596 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2597 if (!local_type || !targ_type)
2598 return -EINVAL;
2599
2600 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
2601 return 1;
2602 if (btf_kind(local_type) != btf_kind(targ_type))
2603 return 0;
2604
2605 switch (btf_kind(local_type)) {
2606 case BTF_KIND_FWD:
2607 case BTF_KIND_PTR:
2608 return 1;
2609 case BTF_KIND_ENUM:
2610 return local_type->size == targ_type->size;
2611 case BTF_KIND_INT:
2612 return btf_int_offset(local_type) == 0 &&
2613 btf_int_offset(targ_type) == 0 &&
2614 local_type->size == targ_type->size &&
2615 btf_int_bits(local_type) == btf_int_bits(targ_type);
2616 case BTF_KIND_ARRAY:
2617 local_id = btf_array(local_type)->type;
2618 targ_id = btf_array(targ_type)->type;
2619 goto recur;
2620 default:
2621 pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
2622 btf_kind(local_type), local_id, targ_id);
2623 return 0;
2624 }
2625}
2626
2627/*
2628 * Given single high-level named field accessor in local type, find
2629 * corresponding high-level accessor for a target type. Along the way,
2630 * maintain low-level spec for target as well. Also keep updating target
2631 * offset.
2632 *
2633 * Searching is performed through recursive exhaustive enumeration of all
2634 * fields of a struct/union. If there are any anonymous (embedded)
2635 * structs/unions, they are recursively searched as well. If field with
2636 * desired name is found, check compatibility between local and target types,
2637 * before returning result.
2638 *
2639 * 1 is returned, if field is found.
2640 * 0 is returned if no compatible field is found.
2641 * <0 is returned on error.
2642 */
2643static int bpf_core_match_member(const struct btf *local_btf,
2644 const struct bpf_core_accessor *local_acc,
2645 const struct btf *targ_btf,
2646 __u32 targ_id,
2647 struct bpf_core_spec *spec,
2648 __u32 *next_targ_id)
2649{
2650 const struct btf_type *local_type, *targ_type;
2651 const struct btf_member *local_member, *m;
2652 const char *local_name, *targ_name;
2653 __u32 local_id;
2654 int i, n, found;
2655
2656 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2657 if (!targ_type)
2658 return -EINVAL;
2659 if (!btf_is_composite(targ_type))
2660 return 0;
2661
2662 local_id = local_acc->type_id;
2663 local_type = btf__type_by_id(local_btf, local_id);
2664 local_member = btf_members(local_type) + local_acc->idx;
2665 local_name = btf__name_by_offset(local_btf, local_member->name_off);
2666
2667 n = btf_vlen(targ_type);
2668 m = btf_members(targ_type);
2669 for (i = 0; i < n; i++, m++) {
2670 __u32 offset;
2671
2672 /* bitfield relocations not supported */
2673 if (btf_member_bitfield_size(targ_type, i))
2674 continue;
2675 offset = btf_member_bit_offset(targ_type, i);
2676 if (offset % 8)
2677 continue;
2678
2679 /* too deep struct/union/array nesting */
2680 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2681 return -E2BIG;
2682
2683 /* speculate this member will be the good one */
2684 spec->offset += offset / 8;
2685 spec->raw_spec[spec->raw_len++] = i;
2686
2687 targ_name = btf__name_by_offset(targ_btf, m->name_off);
2688 if (str_is_empty(targ_name)) {
2689 /* embedded struct/union, we need to go deeper */
2690 found = bpf_core_match_member(local_btf, local_acc,
2691 targ_btf, m->type,
2692 spec, next_targ_id);
2693 if (found) /* either found or error */
2694 return found;
2695 } else if (strcmp(local_name, targ_name) == 0) {
2696 /* matching named field */
2697 struct bpf_core_accessor *targ_acc;
2698
2699 targ_acc = &spec->spec[spec->len++];
2700 targ_acc->type_id = targ_id;
2701 targ_acc->idx = i;
2702 targ_acc->name = targ_name;
2703
2704 *next_targ_id = m->type;
2705 found = bpf_core_fields_are_compat(local_btf,
2706 local_member->type,
2707 targ_btf, m->type);
2708 if (!found)
2709 spec->len--; /* pop accessor */
2710 return found;
2711 }
2712 /* member turned out not to be what we looked for */
2713 spec->offset -= offset / 8;
2714 spec->raw_len--;
2715 }
2716
2717 return 0;
2718}
2719
2720/*
2721 * Try to match local spec to a target type and, if successful, produce full
2722 * target spec (high-level, low-level + offset).
2723 */
2724static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
2725 const struct btf *targ_btf, __u32 targ_id,
2726 struct bpf_core_spec *targ_spec)
2727{
2728 const struct btf_type *targ_type;
2729 const struct bpf_core_accessor *local_acc;
2730 struct bpf_core_accessor *targ_acc;
2731 int i, sz, matched;
2732
2733 memset(targ_spec, 0, sizeof(*targ_spec));
2734 targ_spec->btf = targ_btf;
2735
2736 local_acc = &local_spec->spec[0];
2737 targ_acc = &targ_spec->spec[0];
2738
2739 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
2740 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
2741 &targ_id);
2742 if (!targ_type)
2743 return -EINVAL;
2744
2745 if (local_acc->name) {
2746 matched = bpf_core_match_member(local_spec->btf,
2747 local_acc,
2748 targ_btf, targ_id,
2749 targ_spec, &targ_id);
2750 if (matched <= 0)
2751 return matched;
2752 } else {
2753 /* for i=0, targ_id is already treated as array element
2754 * type (because it's the original struct), for others
2755 * we should find array element type first
2756 */
2757 if (i > 0) {
2758 const struct btf_array *a;
2759
2760 if (!btf_is_array(targ_type))
2761 return 0;
2762
2763 a = btf_array(targ_type);
2764 if (local_acc->idx >= a->nelems)
2765 return 0;
2766 if (!skip_mods_and_typedefs(targ_btf, a->type,
2767 &targ_id))
2768 return -EINVAL;
2769 }
2770
2771 /* too deep struct/union/array nesting */
2772 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2773 return -E2BIG;
2774
2775 targ_acc->type_id = targ_id;
2776 targ_acc->idx = local_acc->idx;
2777 targ_acc->name = NULL;
2778 targ_spec->len++;
2779 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
2780 targ_spec->raw_len++;
2781
2782 sz = btf__resolve_size(targ_btf, targ_id);
2783 if (sz < 0)
2784 return sz;
2785 targ_spec->offset += local_acc->idx * sz;
2786 }
2787 }
2788
2789 return 1;
2790}
2791
2792/*
2793 * Patch relocatable BPF instruction.
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002794 *
2795 * Patched value is determined by relocation kind and target specification.
2796 * For field existence relocation target spec will be NULL if field is not
2797 * found.
2798 * Expected insn->imm value is determined using relocation kind and local
2799 * spec, and is checked before patching instruction. If actual insn->imm value
2800 * is wrong, bail out with error.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002801 *
2802 * Currently three kinds of BPF instructions are supported:
2803 * 1. rX = <imm> (assignment with immediate operand);
2804 * 2. rX += <imm> (arithmetic operations with immediate operand);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002805 */
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002806static int bpf_core_reloc_insn(struct bpf_program *prog,
2807 const struct bpf_field_reloc *relo,
2808 const struct bpf_core_spec *local_spec,
2809 const struct bpf_core_spec *targ_spec)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002810{
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002811 __u32 orig_val, new_val;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002812 struct bpf_insn *insn;
2813 int insn_idx;
2814 __u8 class;
2815
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002816 if (relo->insn_off % sizeof(struct bpf_insn))
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002817 return -EINVAL;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002818 insn_idx = relo->insn_off / sizeof(struct bpf_insn);
2819
2820 switch (relo->kind) {
2821 case BPF_FIELD_BYTE_OFFSET:
2822 orig_val = local_spec->offset;
2823 if (targ_spec) {
2824 new_val = targ_spec->offset;
2825 } else {
2826 pr_warning("prog '%s': patching insn #%d w/ failed reloc, imm %d -> %d\n",
2827 bpf_program__title(prog, false), insn_idx,
2828 orig_val, -1);
2829 new_val = (__u32)-1;
2830 }
2831 break;
2832 case BPF_FIELD_EXISTS:
2833 orig_val = 1; /* can't generate EXISTS relo w/o local field */
2834 new_val = targ_spec ? 1 : 0;
2835 break;
2836 default:
2837 pr_warning("prog '%s': unknown relo %d at insn #%d'\n",
2838 bpf_program__title(prog, false),
2839 relo->kind, insn_idx);
2840 return -EINVAL;
2841 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002842
2843 insn = &prog->insns[insn_idx];
2844 class = BPF_CLASS(insn->code);
2845
2846 if (class == BPF_ALU || class == BPF_ALU64) {
2847 if (BPF_SRC(insn->code) != BPF_K)
2848 return -EINVAL;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002849 if (insn->imm != orig_val)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002850 return -EINVAL;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002851 insn->imm = new_val;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002852 pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
2853 bpf_program__title(prog, false),
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002854 insn_idx, orig_val, new_val);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002855 } else {
2856 pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
2857 bpf_program__title(prog, false),
2858 insn_idx, insn->code, insn->src_reg, insn->dst_reg,
2859 insn->off, insn->imm);
2860 return -EINVAL;
2861 }
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07002862
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002863 return 0;
2864}
2865
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07002866static struct btf *btf_load_raw(const char *path)
2867{
2868 struct btf *btf;
2869 size_t read_cnt;
2870 struct stat st;
2871 void *data;
2872 FILE *f;
2873
2874 if (stat(path, &st))
2875 return ERR_PTR(-errno);
2876
2877 data = malloc(st.st_size);
2878 if (!data)
2879 return ERR_PTR(-ENOMEM);
2880
2881 f = fopen(path, "rb");
2882 if (!f) {
2883 btf = ERR_PTR(-errno);
2884 goto cleanup;
2885 }
2886
2887 read_cnt = fread(data, 1, st.st_size, f);
2888 fclose(f);
2889 if (read_cnt < st.st_size) {
2890 btf = ERR_PTR(-EBADF);
2891 goto cleanup;
2892 }
2893
2894 btf = btf__new(data, read_cnt);
2895
2896cleanup:
2897 free(data);
2898 return btf;
2899}
2900
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002901/*
2902 * Probe few well-known locations for vmlinux kernel image and try to load BTF
2903 * data out of it to use for target BTF.
2904 */
2905static struct btf *bpf_core_find_kernel_btf(void)
2906{
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07002907 struct {
2908 const char *path_fmt;
2909 bool raw_btf;
2910 } locations[] = {
2911 /* try canonical vmlinux BTF through sysfs first */
2912 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
2913 /* fall back to trying to find vmlinux ELF on disk otherwise */
2914 { "/boot/vmlinux-%1$s" },
2915 { "/lib/modules/%1$s/vmlinux-%1$s" },
2916 { "/lib/modules/%1$s/build/vmlinux" },
2917 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
2918 { "/usr/lib/debug/boot/vmlinux-%1$s" },
2919 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
2920 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002921 };
2922 char path[PATH_MAX + 1];
2923 struct utsname buf;
2924 struct btf *btf;
2925 int i;
2926
2927 uname(&buf);
2928
2929 for (i = 0; i < ARRAY_SIZE(locations); i++) {
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07002930 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002931
2932 if (access(path, R_OK))
2933 continue;
2934
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07002935 if (locations[i].raw_btf)
2936 btf = btf_load_raw(path);
2937 else
2938 btf = btf__parse_elf(path, NULL);
2939
2940 pr_debug("loading kernel BTF '%s': %ld\n",
2941 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002942 if (IS_ERR(btf))
2943 continue;
2944
2945 return btf;
2946 }
2947
2948 pr_warning("failed to find valid kernel BTF\n");
2949 return ERR_PTR(-ESRCH);
2950}
2951
2952/* Output spec definition in the format:
2953 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
2954 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
2955 */
2956static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
2957{
2958 const struct btf_type *t;
2959 const char *s;
2960 __u32 type_id;
2961 int i;
2962
2963 type_id = spec->spec[0].type_id;
2964 t = btf__type_by_id(spec->btf, type_id);
2965 s = btf__name_by_offset(spec->btf, t->name_off);
2966 libbpf_print(level, "[%u] %s + ", type_id, s);
2967
2968 for (i = 0; i < spec->raw_len; i++)
2969 libbpf_print(level, "%d%s", spec->raw_spec[i],
2970 i == spec->raw_len - 1 ? " => " : ":");
2971
2972 libbpf_print(level, "%u @ &x", spec->offset);
2973
2974 for (i = 0; i < spec->len; i++) {
2975 if (spec->spec[i].name)
2976 libbpf_print(level, ".%s", spec->spec[i].name);
2977 else
2978 libbpf_print(level, "[%u]", spec->spec[i].idx);
2979 }
2980
2981}
2982
2983static size_t bpf_core_hash_fn(const void *key, void *ctx)
2984{
2985 return (size_t)key;
2986}
2987
2988static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
2989{
2990 return k1 == k2;
2991}
2992
2993static void *u32_as_hash_key(__u32 x)
2994{
2995 return (void *)(uintptr_t)x;
2996}
2997
2998/*
2999 * CO-RE relocate single instruction.
3000 *
3001 * The outline and important points of the algorithm:
3002 * 1. For given local type, find corresponding candidate target types.
3003 * Candidate type is a type with the same "essential" name, ignoring
3004 * everything after last triple underscore (___). E.g., `sample`,
3005 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
3006 * for each other. Names with triple underscore are referred to as
3007 * "flavors" and are useful, among other things, to allow to
3008 * specify/support incompatible variations of the same kernel struct, which
3009 * might differ between different kernel versions and/or build
3010 * configurations.
3011 *
3012 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
3013 * converter, when deduplicated BTF of a kernel still contains more than
3014 * one different types with the same name. In that case, ___2, ___3, etc
3015 * are appended starting from second name conflict. But start flavors are
3016 * also useful to be defined "locally", in BPF program, to extract same
3017 * data from incompatible changes between different kernel
3018 * versions/configurations. For instance, to handle field renames between
3019 * kernel versions, one can use two flavors of the struct name with the
3020 * same common name and use conditional relocations to extract that field,
3021 * depending on target kernel version.
3022 * 2. For each candidate type, try to match local specification to this
3023 * candidate target type. Matching involves finding corresponding
3024 * high-level spec accessors, meaning that all named fields should match,
3025 * as well as all array accesses should be within the actual bounds. Also,
3026 * types should be compatible (see bpf_core_fields_are_compat for details).
3027 * 3. It is supported and expected that there might be multiple flavors
3028 * matching the spec. As long as all the specs resolve to the same set of
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003029 * offsets across all candidates, there is no error. If there is any
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003030 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
3031 * imprefection of BTF deduplication, which can cause slight duplication of
3032 * the same BTF type, if some directly or indirectly referenced (by
3033 * pointer) type gets resolved to different actual types in different
3034 * object files. If such situation occurs, deduplicated BTF will end up
3035 * with two (or more) structurally identical types, which differ only in
3036 * types they refer to through pointer. This should be OK in most cases and
3037 * is not an error.
3038 * 4. Candidate types search is performed by linearly scanning through all
3039 * types in target BTF. It is anticipated that this is overall more
3040 * efficient memory-wise and not significantly worse (if not better)
3041 * CPU-wise compared to prebuilding a map from all local type names to
3042 * a list of candidate type names. It's also sped up by caching resolved
3043 * list of matching candidates per each local "root" type ID, that has at
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003044 * least one bpf_field_reloc associated with it. This list is shared
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003045 * between multiple relocations for the same type ID and is updated as some
3046 * of the candidates are pruned due to structural incompatibility.
3047 */
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003048static int bpf_core_reloc_field(struct bpf_program *prog,
3049 const struct bpf_field_reloc *relo,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003050 int relo_idx,
3051 const struct btf *local_btf,
3052 const struct btf *targ_btf,
3053 struct hashmap *cand_cache)
3054{
3055 const char *prog_name = bpf_program__title(prog, false);
3056 struct bpf_core_spec local_spec, cand_spec, targ_spec;
3057 const void *type_key = u32_as_hash_key(relo->type_id);
3058 const struct btf_type *local_type, *cand_type;
3059 const char *local_name, *cand_name;
3060 struct ids_vec *cand_ids;
3061 __u32 local_id, cand_id;
3062 const char *spec_str;
3063 int i, j, err;
3064
3065 local_id = relo->type_id;
3066 local_type = btf__type_by_id(local_btf, local_id);
3067 if (!local_type)
3068 return -EINVAL;
3069
3070 local_name = btf__name_by_offset(local_btf, local_type->name_off);
3071 if (str_is_empty(local_name))
3072 return -EINVAL;
3073
3074 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
3075 if (str_is_empty(spec_str))
3076 return -EINVAL;
3077
3078 err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
3079 if (err) {
3080 pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
3081 prog_name, relo_idx, local_id, local_name, spec_str,
3082 err);
3083 return -EINVAL;
3084 }
3085
3086 pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
3087 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
3088 libbpf_print(LIBBPF_DEBUG, "\n");
3089
3090 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
3091 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
3092 if (IS_ERR(cand_ids)) {
3093 pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
3094 prog_name, relo_idx, local_id, local_name,
3095 PTR_ERR(cand_ids));
3096 return PTR_ERR(cand_ids);
3097 }
3098 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
3099 if (err) {
3100 bpf_core_free_cands(cand_ids);
3101 return err;
3102 }
3103 }
3104
3105 for (i = 0, j = 0; i < cand_ids->len; i++) {
3106 cand_id = cand_ids->data[i];
3107 cand_type = btf__type_by_id(targ_btf, cand_id);
3108 cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
3109
3110 err = bpf_core_spec_match(&local_spec, targ_btf,
3111 cand_id, &cand_spec);
3112 pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
3113 prog_name, relo_idx, i, cand_name);
3114 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
3115 libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
3116 if (err < 0) {
3117 pr_warning("prog '%s': relo #%d: matching error: %d\n",
3118 prog_name, relo_idx, err);
3119 return err;
3120 }
3121 if (err == 0)
3122 continue;
3123
3124 if (j == 0) {
3125 targ_spec = cand_spec;
3126 } else if (cand_spec.offset != targ_spec.offset) {
3127 /* if there are many candidates, they should all
3128 * resolve to the same offset
3129 */
3130 pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
3131 prog_name, relo_idx, cand_spec.offset,
3132 targ_spec.offset);
3133 return -EINVAL;
3134 }
3135
3136 cand_ids->data[j++] = cand_spec.spec[0].type_id;
3137 }
3138
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003139 /*
3140 * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
3141 * requested, it's expected that we might not find any candidates.
3142 * In this case, if field wasn't found in any candidate, the list of
3143 * candidates shouldn't change at all, we'll just handle relocating
3144 * appropriately, depending on relo's kind.
3145 */
3146 if (j > 0)
3147 cand_ids->len = j;
3148
3149 if (j == 0 && !prog->obj->relaxed_core_relocs &&
3150 relo->kind != BPF_FIELD_EXISTS) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003151 pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
3152 prog_name, relo_idx, local_id, local_name, spec_str);
3153 return -ESRCH;
3154 }
3155
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003156 /* bpf_core_reloc_insn should know how to handle missing targ_spec */
3157 err = bpf_core_reloc_insn(prog, relo, &local_spec,
3158 j ? &targ_spec : NULL);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003159 if (err) {
3160 pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
3161 prog_name, relo_idx, relo->insn_off, err);
3162 return -EINVAL;
3163 }
3164
3165 return 0;
3166}
3167
3168static int
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003169bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003170{
3171 const struct btf_ext_info_sec *sec;
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003172 const struct bpf_field_reloc *rec;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003173 const struct btf_ext_info *seg;
3174 struct hashmap_entry *entry;
3175 struct hashmap *cand_cache = NULL;
3176 struct bpf_program *prog;
3177 struct btf *targ_btf;
3178 const char *sec_name;
3179 int i, err = 0;
3180
3181 if (targ_btf_path)
3182 targ_btf = btf__parse_elf(targ_btf_path, NULL);
3183 else
3184 targ_btf = bpf_core_find_kernel_btf();
3185 if (IS_ERR(targ_btf)) {
3186 pr_warning("failed to get target BTF: %ld\n",
3187 PTR_ERR(targ_btf));
3188 return PTR_ERR(targ_btf);
3189 }
3190
3191 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
3192 if (IS_ERR(cand_cache)) {
3193 err = PTR_ERR(cand_cache);
3194 goto out;
3195 }
3196
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003197 seg = &obj->btf_ext->field_reloc_info;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003198 for_each_btf_ext_sec(seg, sec) {
3199 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3200 if (str_is_empty(sec_name)) {
3201 err = -EINVAL;
3202 goto out;
3203 }
3204 prog = bpf_object__find_program_by_title(obj, sec_name);
3205 if (!prog) {
3206 pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
3207 sec_name);
3208 err = -EINVAL;
3209 goto out;
3210 }
3211
3212 pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
3213 sec_name, sec->num_info);
3214
3215 for_each_btf_ext_rec(seg, sec, i, rec) {
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003216 err = bpf_core_reloc_field(prog, rec, i, obj->btf,
3217 targ_btf, cand_cache);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003218 if (err) {
3219 pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
3220 sec_name, i, err);
3221 goto out;
3222 }
3223 }
3224 }
3225
3226out:
3227 btf__free(targ_btf);
3228 if (!IS_ERR_OR_NULL(cand_cache)) {
3229 hashmap__for_each_entry(cand_cache, entry, i) {
3230 bpf_core_free_cands(entry->value);
3231 }
3232 hashmap__free(cand_cache);
3233 }
3234 return err;
3235}
3236
3237static int
3238bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
3239{
3240 int err = 0;
3241
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003242 if (obj->btf_ext->field_reloc_info.len)
3243 err = bpf_core_reloc_fields(obj, targ_btf_path);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003244
3245 return err;
3246}
3247
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003248static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003249bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
3250 struct reloc_desc *relo)
3251{
3252 struct bpf_insn *insn, *new_insn;
3253 struct bpf_program *text;
3254 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08003255 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003256
3257 if (relo->type != RELO_CALL)
3258 return -LIBBPF_ERRNO__RELOC;
3259
3260 if (prog->idx == obj->efile.text_shndx) {
3261 pr_warning("relo in .text insn %d into off %d\n",
3262 relo->insn_idx, relo->text_off);
3263 return -LIBBPF_ERRNO__RELOC;
3264 }
3265
3266 if (prog->main_prog_cnt == 0) {
3267 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
3268 if (!text) {
3269 pr_warning("no .text section found yet relo into text exist\n");
3270 return -LIBBPF_ERRNO__RELOC;
3271 }
3272 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07003273 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003274 if (!new_insn) {
3275 pr_warning("oom in prog realloc\n");
3276 return -ENOMEM;
3277 }
Yonghong Song2993e052018-11-19 15:29:16 -08003278
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003279 if (obj->btf_ext) {
3280 err = bpf_program_reloc_btf_ext(prog, obj,
3281 text->section_name,
3282 prog->insns_cnt);
3283 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08003284 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08003285 }
3286
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003287 memcpy(new_insn + prog->insns_cnt, text->insns,
3288 text->insns_cnt * sizeof(*insn));
3289 prog->insns = new_insn;
3290 prog->main_prog_cnt = prog->insns_cnt;
3291 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00003292 pr_debug("added %zd insn from %s to prog %s\n",
3293 text->insns_cnt, text->section_name,
3294 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003295 }
3296 insn = &prog->insns[relo->insn_idx];
3297 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003298 return 0;
3299}
3300
3301static int
Wang Nan9d759a92015-11-27 08:47:35 +00003302bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00003303{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003304 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003305
Yonghong Song2993e052018-11-19 15:29:16 -08003306 if (!prog)
3307 return 0;
3308
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003309 if (obj->btf_ext) {
3310 err = bpf_program_reloc_btf_ext(prog, obj,
3311 prog->section_name, 0);
3312 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08003313 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08003314 }
3315
3316 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00003317 return 0;
3318
3319 for (i = 0; i < prog->nr_reloc; i++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02003320 if (prog->reloc_desc[i].type == RELO_LD64 ||
3321 prog->reloc_desc[i].type == RELO_DATA) {
3322 bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003323 struct bpf_insn *insns = prog->insns;
3324 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003325
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003326 insn_idx = prog->reloc_desc[i].insn_idx;
3327 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003328
Daniel Borkmannd8599002019-04-09 23:20:13 +02003329 if (insn_idx + 1 >= (int)prog->insns_cnt) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003330 pr_warning("relocation out of range: '%s'\n",
3331 prog->section_name);
3332 return -LIBBPF_ERRNO__RELOC;
3333 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02003334
3335 if (!relo_data) {
3336 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
3337 } else {
3338 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
3339 insns[insn_idx + 1].imm = insns[insn_idx].imm;
3340 }
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003341 insns[insn_idx].imm = obj->maps[map_idx].fd;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02003342 } else if (prog->reloc_desc[i].type == RELO_CALL) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003343 err = bpf_program__reloc_text(prog, obj,
3344 &prog->reloc_desc[i]);
3345 if (err)
3346 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003347 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00003348 }
3349
3350 zfree(&prog->reloc_desc);
3351 prog->nr_reloc = 0;
3352 return 0;
3353}
3354
Wang Nan8a47a6c2015-07-01 02:14:05 +00003355static int
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003356bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
Wang Nan8a47a6c2015-07-01 02:14:05 +00003357{
3358 struct bpf_program *prog;
3359 size_t i;
3360 int err;
3361
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003362 if (obj->btf_ext) {
3363 err = bpf_object__relocate_core(obj, targ_btf_path);
3364 if (err) {
3365 pr_warning("failed to perform CO-RE relocations: %d\n",
3366 err);
3367 return err;
3368 }
3369 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00003370 for (i = 0; i < obj->nr_programs; i++) {
3371 prog = &obj->programs[i];
3372
Wang Nan9d759a92015-11-27 08:47:35 +00003373 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00003374 if (err) {
3375 pr_warning("failed to relocate '%s'\n",
3376 prog->section_name);
3377 return err;
3378 }
3379 }
3380 return 0;
3381}
3382
Wang Nan34090912015-07-01 02:14:02 +00003383static int bpf_object__collect_reloc(struct bpf_object *obj)
3384{
3385 int i, err;
3386
3387 if (!obj_elf_valid(obj)) {
3388 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00003389 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00003390 }
3391
3392 for (i = 0; i < obj->efile.nr_reloc; i++) {
3393 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
3394 Elf_Data *data = obj->efile.reloc[i].data;
3395 int idx = shdr->sh_info;
3396 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00003397
3398 if (shdr->sh_type != SHT_REL) {
3399 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003400 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00003401 }
3402
3403 prog = bpf_object__find_prog_by_idx(obj, idx);
3404 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01003405 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003406 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00003407 }
3408
Andrii Nakryiko399dc652019-05-29 10:36:11 -07003409 err = bpf_program__collect_reloc(prog, shdr, data, obj);
Wang Nan34090912015-07-01 02:14:02 +00003410 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00003411 return err;
Wang Nan34090912015-07-01 02:14:02 +00003412 }
3413 return 0;
3414}
3415
Wang Nan55cffde2015-07-01 02:14:07 +00003416static int
Yonghong Song2993e052018-11-19 15:29:16 -08003417load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003418 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00003419{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003420 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003421 char *cp, errmsg[STRERR_BUFSIZE];
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003422 int log_buf_size = BPF_LOG_BUF_SIZE;
Wang Nan55cffde2015-07-01 02:14:07 +00003423 char *log_buf;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07003424 int btf_fd, ret;
Wang Nan55cffde2015-07-01 02:14:07 +00003425
Andrii Nakryikofba01a02019-05-29 10:36:08 -07003426 if (!insns || !insns_cnt)
3427 return -EINVAL;
3428
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003429 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08003430 load_attr.prog_type = prog->type;
3431 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08003432 if (prog->caps->name)
3433 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003434 load_attr.insns = insns;
3435 load_attr.insns_cnt = insns_cnt;
3436 load_attr.license = license;
3437 load_attr.kern_version = kern_version;
Yonghong Song2993e052018-11-19 15:29:16 -08003438 load_attr.prog_ifindex = prog->prog_ifindex;
Andrii Nakryiko3415ec62019-08-01 00:24:05 -07003439 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
3440 if (prog->obj->btf_ext)
3441 btf_fd = bpf_object__btf_fd(prog->obj);
3442 else
3443 btf_fd = -1;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07003444 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08003445 load_attr.func_info = prog->func_info;
3446 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003447 load_attr.func_info_cnt = prog->func_info_cnt;
Martin KaFai Lau3d650142018-12-07 16:42:31 -08003448 load_attr.line_info = prog->line_info;
3449 load_attr.line_info_rec_size = prog->line_info_rec_size;
3450 load_attr.line_info_cnt = prog->line_info_cnt;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003451 load_attr.log_level = prog->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01003452 load_attr.prog_flags = prog->prog_flags;
Wang Nan55cffde2015-07-01 02:14:07 +00003453
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003454retry_load:
3455 log_buf = malloc(log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00003456 if (!log_buf)
3457 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
3458
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003459 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00003460
3461 if (ret >= 0) {
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003462 if (load_attr.log_level)
3463 pr_debug("verifier log:\n%s", log_buf);
Wang Nan55cffde2015-07-01 02:14:07 +00003464 *pfd = ret;
3465 ret = 0;
3466 goto out;
3467 }
3468
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003469 if (errno == ENOSPC) {
3470 log_buf_size <<= 1;
3471 free(log_buf);
3472 goto retry_load;
3473 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00003474 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07003475 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003476 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00003477
Wang Nan6371ca3b2015-11-06 13:49:37 +00003478 if (log_buf && log_buf[0] != '\0') {
3479 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00003480 pr_warning("-- BEGIN DUMP LOG ---\n");
3481 pr_warning("\n%s\n", log_buf);
3482 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003483 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
3484 pr_warning("Program too large (%zu insns), at most %d insns\n",
3485 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00003486 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00003487 } else {
Wang Nan705fa212016-07-13 10:44:02 +00003488 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003489 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00003490 int fd;
3491
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003492 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
3493 load_attr.expected_attach_type = 0;
3494 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00003495 if (fd >= 0) {
3496 close(fd);
3497 ret = -LIBBPF_ERRNO__PROGTYPE;
3498 goto out;
3499 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00003500 }
Wang Nan705fa212016-07-13 10:44:02 +00003501
3502 if (log_buf)
3503 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00003504 }
3505
3506out:
3507 free(log_buf);
3508 return ret;
3509}
3510
Joe Stringer29cd77f2018-10-02 13:35:39 -07003511int
Wang Nan55cffde2015-07-01 02:14:07 +00003512bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07003513 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00003514{
Wang Nanb5805632015-11-16 12:10:09 +00003515 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00003516
Wang Nanb5805632015-11-16 12:10:09 +00003517 if (prog->instances.nr < 0 || !prog->instances.fds) {
3518 if (prog->preprocessor) {
3519 pr_warning("Internal error: can't load program '%s'\n",
3520 prog->section_name);
3521 return -LIBBPF_ERRNO__INTERNAL;
3522 }
Wang Nan55cffde2015-07-01 02:14:07 +00003523
Wang Nanb5805632015-11-16 12:10:09 +00003524 prog->instances.fds = malloc(sizeof(int));
3525 if (!prog->instances.fds) {
3526 pr_warning("Not enough memory for BPF fds\n");
3527 return -ENOMEM;
3528 }
3529 prog->instances.nr = 1;
3530 prog->instances.fds[0] = -1;
3531 }
3532
3533 if (!prog->preprocessor) {
3534 if (prog->instances.nr != 1) {
3535 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
3536 prog->section_name, prog->instances.nr);
3537 }
Yonghong Song2993e052018-11-19 15:29:16 -08003538 err = load_program(prog, prog->insns, prog->insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003539 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00003540 if (!err)
3541 prog->instances.fds[0] = fd;
3542 goto out;
3543 }
3544
3545 for (i = 0; i < prog->instances.nr; i++) {
3546 struct bpf_prog_prep_result result;
3547 bpf_program_prep_t preprocessor = prog->preprocessor;
3548
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -08003549 memset(&result, 0, sizeof(result));
Wang Nanb5805632015-11-16 12:10:09 +00003550 err = preprocessor(prog, i, prog->insns,
3551 prog->insns_cnt, &result);
3552 if (err) {
3553 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
3554 i, prog->section_name);
3555 goto out;
3556 }
3557
3558 if (!result.new_insn_ptr || !result.new_insn_cnt) {
3559 pr_debug("Skip loading the %dth instance of program '%s'\n",
3560 i, prog->section_name);
3561 prog->instances.fds[i] = -1;
3562 if (result.pfd)
3563 *result.pfd = -1;
3564 continue;
3565 }
3566
Yonghong Song2993e052018-11-19 15:29:16 -08003567 err = load_program(prog, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00003568 result.new_insn_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003569 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00003570
3571 if (err) {
3572 pr_warning("Loading the %dth instance of program '%s' failed\n",
3573 i, prog->section_name);
3574 goto out;
3575 }
3576
3577 if (result.pfd)
3578 *result.pfd = fd;
3579 prog->instances.fds[i] = fd;
3580 }
3581out:
Wang Nan55cffde2015-07-01 02:14:07 +00003582 if (err)
3583 pr_warning("failed to load program '%s'\n",
3584 prog->section_name);
3585 zfree(&prog->insns);
3586 prog->insns_cnt = 0;
3587 return err;
3588}
3589
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07003590static bool bpf_program__is_function_storage(const struct bpf_program *prog,
3591 const struct bpf_object *obj)
Jakub Kicinski9a94f272018-06-28 14:41:38 -07003592{
3593 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
3594}
3595
Wang Nan55cffde2015-07-01 02:14:07 +00003596static int
Quentin Monnet60276f92019-05-24 11:36:47 +01003597bpf_object__load_progs(struct bpf_object *obj, int log_level)
Wang Nan55cffde2015-07-01 02:14:07 +00003598{
3599 size_t i;
3600 int err;
3601
3602 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07003603 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003604 continue;
Quentin Monnet501b1252019-05-29 15:26:41 +01003605 obj->programs[i].log_level |= log_level;
Wang Nan55cffde2015-07-01 02:14:07 +00003606 err = bpf_program__load(&obj->programs[i],
3607 obj->license,
3608 obj->kern_version);
3609 if (err)
3610 return err;
3611 }
3612 return 0;
3613}
3614
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003615static struct bpf_object *
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07003616__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003617 struct bpf_object_open_opts *opts)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003618{
3619 struct bpf_object *obj;
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003620 const char *obj_name;
3621 char tmp_name[64];
3622 bool relaxed_maps;
Wang Nan6371ca3b2015-11-06 13:49:37 +00003623 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003624
3625 if (elf_version(EV_CURRENT) == EV_NONE) {
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003626 pr_warning("failed to init libelf for %s\n",
3627 path ? : "(mem buf)");
Wang Nan6371ca3b2015-11-06 13:49:37 +00003628 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003629 }
3630
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003631 if (!OPTS_VALID(opts, bpf_object_open_opts))
3632 return ERR_PTR(-EINVAL);
3633
3634 obj_name = OPTS_GET(opts, object_name, path);
3635 if (obj_buf) {
3636 if (!obj_name) {
3637 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
3638 (unsigned long)obj_buf,
3639 (unsigned long)obj_buf_sz);
3640 obj_name = tmp_name;
3641 }
3642 path = obj_name;
3643 pr_debug("loading object '%s' from buffer\n", obj_name);
3644 }
3645
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003646 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003647 if (IS_ERR(obj))
3648 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003649
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003650 obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003651 relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
3652
Wang Nan6371ca3b2015-11-06 13:49:37 +00003653 CHECK_ERR(bpf_object__elf_init(obj), err, out);
3654 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003655 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003656 CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003657 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003658
3659 bpf_object__elf_finish(obj);
3660 return obj;
3661out:
3662 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003663 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003664}
3665
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07003666static struct bpf_object *
3667__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003668{
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003669 LIBBPF_OPTS(bpf_object_open_opts, opts,
3670 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
3671 );
3672
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003673 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003674 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003675 return NULL;
3676
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003677 pr_debug("loading %s\n", attr->file);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003678 return __bpf_object__open(attr->file, NULL, 0, &opts);
John Fastabendc034a172018-10-15 11:19:55 -07003679}
3680
3681struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
3682{
3683 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003684}
3685
3686struct bpf_object *bpf_object__open(const char *path)
3687{
3688 struct bpf_object_open_attr attr = {
3689 .file = path,
3690 .prog_type = BPF_PROG_TYPE_UNSPEC,
3691 };
3692
3693 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00003694}
3695
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003696struct bpf_object *
3697bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
3698{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003699 if (!path)
3700 return ERR_PTR(-EINVAL);
3701
3702 pr_debug("loading %s\n", path);
3703
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003704 return __bpf_object__open(path, NULL, 0, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003705}
3706
3707struct bpf_object *
3708bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
3709 struct bpf_object_open_opts *opts)
Wang Nan6c956392015-07-01 02:13:54 +00003710{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003711 if (!obj_buf || obj_buf_sz == 0)
3712 return ERR_PTR(-EINVAL);
Wang Nan6c956392015-07-01 02:13:54 +00003713
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003714 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003715}
3716
3717struct bpf_object *
3718bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
3719 const char *name)
3720{
3721 LIBBPF_OPTS(bpf_object_open_opts, opts,
3722 .object_name = name,
3723 /* wrong default, but backwards-compatible */
3724 .relaxed_maps = true,
3725 );
3726
3727 /* returning NULL is wrong, but backwards-compatible */
3728 if (!obj_buf || obj_buf_sz == 0)
3729 return NULL;
3730
3731 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003732}
3733
Wang Nan52d33522015-07-01 02:14:04 +00003734int bpf_object__unload(struct bpf_object *obj)
3735{
3736 size_t i;
3737
3738 if (!obj)
3739 return -EINVAL;
3740
Wang Nan9d759a92015-11-27 08:47:35 +00003741 for (i = 0; i < obj->nr_maps; i++)
3742 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00003743
Wang Nan55cffde2015-07-01 02:14:07 +00003744 for (i = 0; i < obj->nr_programs; i++)
3745 bpf_program__unload(&obj->programs[i]);
3746
Wang Nan52d33522015-07-01 02:14:04 +00003747 return 0;
3748}
3749
Quentin Monnet60276f92019-05-24 11:36:47 +01003750int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
Wang Nan52d33522015-07-01 02:14:04 +00003751{
Quentin Monnet60276f92019-05-24 11:36:47 +01003752 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00003753 int err;
3754
Quentin Monnet60276f92019-05-24 11:36:47 +01003755 if (!attr)
3756 return -EINVAL;
3757 obj = attr->obj;
Wang Nan52d33522015-07-01 02:14:04 +00003758 if (!obj)
3759 return -EINVAL;
3760
3761 if (obj->loaded) {
3762 pr_warning("object should not be loaded twice\n");
3763 return -EINVAL;
3764 }
3765
3766 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00003767
3768 CHECK_ERR(bpf_object__create_maps(obj), err, out);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003769 CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
Quentin Monnet60276f92019-05-24 11:36:47 +01003770 CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00003771
3772 return 0;
3773out:
3774 bpf_object__unload(obj);
3775 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003776 return err;
Wang Nan52d33522015-07-01 02:14:04 +00003777}
3778
Quentin Monnet60276f92019-05-24 11:36:47 +01003779int bpf_object__load(struct bpf_object *obj)
3780{
3781 struct bpf_object_load_attr attr = {
3782 .obj = obj,
3783 };
3784
3785 return bpf_object__load_xattr(&attr);
3786}
3787
Joe Stringerf3675402017-01-26 13:19:56 -08003788static int check_path(const char *path)
3789{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003790 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08003791 struct statfs st_fs;
3792 char *dname, *dir;
3793 int err = 0;
3794
3795 if (path == NULL)
3796 return -EINVAL;
3797
3798 dname = strdup(path);
3799 if (dname == NULL)
3800 return -ENOMEM;
3801
3802 dir = dirname(dname);
3803 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07003804 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003805 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08003806 err = -errno;
3807 }
3808 free(dname);
3809
3810 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
3811 pr_warning("specified path %s is not on BPF FS\n", path);
3812 err = -EINVAL;
3813 }
3814
3815 return err;
3816}
3817
3818int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
3819 int instance)
3820{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003821 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08003822 int err;
3823
3824 err = check_path(path);
3825 if (err)
3826 return err;
3827
3828 if (prog == NULL) {
3829 pr_warning("invalid program pointer\n");
3830 return -EINVAL;
3831 }
3832
3833 if (instance < 0 || instance >= prog->instances.nr) {
3834 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3835 instance, prog->section_name, prog->instances.nr);
3836 return -EINVAL;
3837 }
3838
3839 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07003840 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003841 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08003842 return -errno;
3843 }
3844 pr_debug("pinned program '%s'\n", path);
3845
3846 return 0;
3847}
3848
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003849int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
3850 int instance)
3851{
3852 int err;
3853
3854 err = check_path(path);
3855 if (err)
3856 return err;
3857
3858 if (prog == NULL) {
3859 pr_warning("invalid program pointer\n");
3860 return -EINVAL;
3861 }
3862
3863 if (instance < 0 || instance >= prog->instances.nr) {
3864 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3865 instance, prog->section_name, prog->instances.nr);
3866 return -EINVAL;
3867 }
3868
3869 err = unlink(path);
3870 if (err != 0)
3871 return -errno;
3872 pr_debug("unpinned program '%s'\n", path);
3873
3874 return 0;
3875}
3876
Joe Stringerf3675402017-01-26 13:19:56 -08003877static int make_dir(const char *path)
3878{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003879 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08003880 int err = 0;
3881
3882 if (mkdir(path, 0700) && errno != EEXIST)
3883 err = -errno;
3884
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003885 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07003886 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003887 pr_warning("failed to mkdir %s: %s\n", path, cp);
3888 }
Joe Stringerf3675402017-01-26 13:19:56 -08003889 return err;
3890}
3891
3892int bpf_program__pin(struct bpf_program *prog, const char *path)
3893{
3894 int i, err;
3895
3896 err = check_path(path);
3897 if (err)
3898 return err;
3899
3900 if (prog == NULL) {
3901 pr_warning("invalid program pointer\n");
3902 return -EINVAL;
3903 }
3904
3905 if (prog->instances.nr <= 0) {
3906 pr_warning("no instances of prog %s to pin\n",
3907 prog->section_name);
3908 return -EINVAL;
3909 }
3910
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08003911 if (prog->instances.nr == 1) {
3912 /* don't create subdirs when pinning single instance */
3913 return bpf_program__pin_instance(prog, path, 0);
3914 }
3915
Joe Stringerf3675402017-01-26 13:19:56 -08003916 err = make_dir(path);
3917 if (err)
3918 return err;
3919
3920 for (i = 0; i < prog->instances.nr; i++) {
3921 char buf[PATH_MAX];
3922 int len;
3923
3924 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003925 if (len < 0) {
3926 err = -EINVAL;
3927 goto err_unpin;
3928 } else if (len >= PATH_MAX) {
3929 err = -ENAMETOOLONG;
3930 goto err_unpin;
3931 }
3932
3933 err = bpf_program__pin_instance(prog, buf, i);
3934 if (err)
3935 goto err_unpin;
3936 }
3937
3938 return 0;
3939
3940err_unpin:
3941 for (i = i - 1; i >= 0; i--) {
3942 char buf[PATH_MAX];
3943 int len;
3944
3945 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3946 if (len < 0)
3947 continue;
3948 else if (len >= PATH_MAX)
3949 continue;
3950
3951 bpf_program__unpin_instance(prog, buf, i);
3952 }
3953
3954 rmdir(path);
3955
3956 return err;
3957}
3958
3959int bpf_program__unpin(struct bpf_program *prog, const char *path)
3960{
3961 int i, err;
3962
3963 err = check_path(path);
3964 if (err)
3965 return err;
3966
3967 if (prog == NULL) {
3968 pr_warning("invalid program pointer\n");
3969 return -EINVAL;
3970 }
3971
3972 if (prog->instances.nr <= 0) {
3973 pr_warning("no instances of prog %s to pin\n",
3974 prog->section_name);
3975 return -EINVAL;
3976 }
3977
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08003978 if (prog->instances.nr == 1) {
3979 /* don't create subdirs when pinning single instance */
3980 return bpf_program__unpin_instance(prog, path, 0);
3981 }
3982
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003983 for (i = 0; i < prog->instances.nr; i++) {
3984 char buf[PATH_MAX];
3985 int len;
3986
3987 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08003988 if (len < 0)
3989 return -EINVAL;
3990 else if (len >= PATH_MAX)
3991 return -ENAMETOOLONG;
3992
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003993 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08003994 if (err)
3995 return err;
3996 }
3997
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003998 err = rmdir(path);
3999 if (err)
4000 return -errno;
4001
Joe Stringerf3675402017-01-26 13:19:56 -08004002 return 0;
4003}
4004
Joe Stringerb6989f32017-01-26 13:19:57 -08004005int bpf_map__pin(struct bpf_map *map, const char *path)
4006{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004007 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08004008 int err;
4009
4010 err = check_path(path);
4011 if (err)
4012 return err;
4013
4014 if (map == NULL) {
4015 pr_warning("invalid map pointer\n");
4016 return -EINVAL;
4017 }
4018
4019 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004020 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004021 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08004022 return -errno;
4023 }
4024
4025 pr_debug("pinned map '%s'\n", path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004026
Joe Stringerb6989f32017-01-26 13:19:57 -08004027 return 0;
4028}
4029
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004030int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08004031{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004032 int err;
4033
4034 err = check_path(path);
4035 if (err)
4036 return err;
4037
4038 if (map == NULL) {
4039 pr_warning("invalid map pointer\n");
4040 return -EINVAL;
4041 }
4042
4043 err = unlink(path);
4044 if (err != 0)
4045 return -errno;
4046 pr_debug("unpinned map '%s'\n", path);
4047
4048 return 0;
4049}
4050
4051int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
4052{
Joe Stringerd5148d82017-01-26 13:19:58 -08004053 struct bpf_map *map;
4054 int err;
4055
4056 if (!obj)
4057 return -ENOENT;
4058
4059 if (!obj->loaded) {
4060 pr_warning("object not yet loaded; load it first\n");
4061 return -ENOENT;
4062 }
4063
4064 err = make_dir(path);
4065 if (err)
4066 return err;
4067
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08004068 bpf_object__for_each_map(map, obj) {
Joe Stringerd5148d82017-01-26 13:19:58 -08004069 char buf[PATH_MAX];
4070 int len;
4071
4072 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4073 bpf_map__name(map));
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004074 if (len < 0) {
4075 err = -EINVAL;
4076 goto err_unpin_maps;
4077 } else if (len >= PATH_MAX) {
4078 err = -ENAMETOOLONG;
4079 goto err_unpin_maps;
4080 }
4081
4082 err = bpf_map__pin(map, buf);
4083 if (err)
4084 goto err_unpin_maps;
4085 }
4086
4087 return 0;
4088
4089err_unpin_maps:
4090 while ((map = bpf_map__prev(map, obj))) {
4091 char buf[PATH_MAX];
4092 int len;
4093
4094 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4095 bpf_map__name(map));
4096 if (len < 0)
4097 continue;
4098 else if (len >= PATH_MAX)
4099 continue;
4100
4101 bpf_map__unpin(map, buf);
4102 }
4103
4104 return err;
4105}
4106
4107int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
4108{
4109 struct bpf_map *map;
4110 int err;
4111
4112 if (!obj)
4113 return -ENOENT;
4114
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08004115 bpf_object__for_each_map(map, obj) {
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004116 char buf[PATH_MAX];
4117 int len;
4118
4119 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4120 bpf_map__name(map));
Joe Stringerd5148d82017-01-26 13:19:58 -08004121 if (len < 0)
4122 return -EINVAL;
4123 else if (len >= PATH_MAX)
4124 return -ENAMETOOLONG;
4125
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004126 err = bpf_map__unpin(map, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08004127 if (err)
4128 return err;
4129 }
4130
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004131 return 0;
4132}
4133
4134int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
4135{
4136 struct bpf_program *prog;
4137 int err;
4138
4139 if (!obj)
4140 return -ENOENT;
4141
4142 if (!obj->loaded) {
4143 pr_warning("object not yet loaded; load it first\n");
4144 return -ENOENT;
4145 }
4146
4147 err = make_dir(path);
4148 if (err)
4149 return err;
4150
4151 bpf_object__for_each_program(prog, obj) {
4152 char buf[PATH_MAX];
4153 int len;
4154
4155 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08004156 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004157 if (len < 0) {
4158 err = -EINVAL;
4159 goto err_unpin_programs;
4160 } else if (len >= PATH_MAX) {
4161 err = -ENAMETOOLONG;
4162 goto err_unpin_programs;
4163 }
4164
4165 err = bpf_program__pin(prog, buf);
4166 if (err)
4167 goto err_unpin_programs;
4168 }
4169
4170 return 0;
4171
4172err_unpin_programs:
4173 while ((prog = bpf_program__prev(prog, obj))) {
4174 char buf[PATH_MAX];
4175 int len;
4176
4177 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08004178 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004179 if (len < 0)
4180 continue;
4181 else if (len >= PATH_MAX)
4182 continue;
4183
4184 bpf_program__unpin(prog, buf);
4185 }
4186
4187 return err;
4188}
4189
4190int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
4191{
4192 struct bpf_program *prog;
4193 int err;
4194
4195 if (!obj)
4196 return -ENOENT;
4197
Joe Stringerd5148d82017-01-26 13:19:58 -08004198 bpf_object__for_each_program(prog, obj) {
4199 char buf[PATH_MAX];
4200 int len;
4201
4202 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08004203 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08004204 if (len < 0)
4205 return -EINVAL;
4206 else if (len >= PATH_MAX)
4207 return -ENAMETOOLONG;
4208
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004209 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08004210 if (err)
4211 return err;
4212 }
4213
4214 return 0;
4215}
4216
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004217int bpf_object__pin(struct bpf_object *obj, const char *path)
4218{
4219 int err;
4220
4221 err = bpf_object__pin_maps(obj, path);
4222 if (err)
4223 return err;
4224
4225 err = bpf_object__pin_programs(obj, path);
4226 if (err) {
4227 bpf_object__unpin_maps(obj, path);
4228 return err;
4229 }
4230
4231 return 0;
4232}
4233
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004234void bpf_object__close(struct bpf_object *obj)
4235{
Wang Nana5b8bd42015-07-01 02:14:00 +00004236 size_t i;
4237
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004238 if (!obj)
4239 return;
4240
Wang Nan10931d22016-11-26 07:03:26 +00004241 if (obj->clear_priv)
4242 obj->clear_priv(obj, obj->priv);
4243
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004244 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00004245 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004246 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08004247 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004248
Wang Nan9d759a92015-11-27 08:47:35 +00004249 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00004250 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00004251 if (obj->maps[i].clear_priv)
4252 obj->maps[i].clear_priv(&obj->maps[i],
4253 obj->maps[i].priv);
4254 obj->maps[i].priv = NULL;
4255 obj->maps[i].clear_priv = NULL;
4256 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02004257
4258 zfree(&obj->sections.rodata);
4259 zfree(&obj->sections.data);
Wang Nan9d759a92015-11-27 08:47:35 +00004260 zfree(&obj->maps);
4261 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00004262
4263 if (obj->programs && obj->nr_programs) {
4264 for (i = 0; i < obj->nr_programs; i++)
4265 bpf_program__exit(&obj->programs[i]);
4266 }
4267 zfree(&obj->programs);
4268
Wang Nan9a208ef2015-07-01 02:14:10 +00004269 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004270 free(obj);
4271}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004272
Wang Nan9a208ef2015-07-01 02:14:10 +00004273struct bpf_object *
4274bpf_object__next(struct bpf_object *prev)
4275{
4276 struct bpf_object *next;
4277
4278 if (!prev)
4279 next = list_first_entry(&bpf_objects_list,
4280 struct bpf_object,
4281 list);
4282 else
4283 next = list_next_entry(prev, list);
4284
4285 /* Empty list is noticed here so don't need checking on entry. */
4286 if (&next->list == &bpf_objects_list)
4287 return NULL;
4288
4289 return next;
4290}
4291
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004292const char *bpf_object__name(const struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00004293{
Andrii Nakryikoc9e4c302019-10-04 15:40:36 -07004294 return obj ? obj->name : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00004295}
4296
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004297unsigned int bpf_object__kversion(const struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00004298{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03004299 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00004300}
4301
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004302struct btf *bpf_object__btf(const struct bpf_object *obj)
Andrey Ignatov789f6ba2019-02-14 15:01:43 -08004303{
4304 return obj ? obj->btf : NULL;
4305}
4306
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004307int bpf_object__btf_fd(const struct bpf_object *obj)
4308{
4309 return obj->btf ? btf__fd(obj->btf) : -1;
4310}
4311
Wang Nan10931d22016-11-26 07:03:26 +00004312int bpf_object__set_priv(struct bpf_object *obj, void *priv,
4313 bpf_object_clear_priv_t clear_priv)
4314{
4315 if (obj->priv && obj->clear_priv)
4316 obj->clear_priv(obj, obj->priv);
4317
4318 obj->priv = priv;
4319 obj->clear_priv = clear_priv;
4320 return 0;
4321}
4322
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004323void *bpf_object__priv(const struct bpf_object *obj)
Wang Nan10931d22016-11-26 07:03:26 +00004324{
4325 return obj ? obj->priv : ERR_PTR(-EINVAL);
4326}
4327
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004328static struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004329__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
4330 bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004331{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004332 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004333 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004334
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004335 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004336 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004337
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004338 if (!p)
4339 /* Iter from the beginning */
4340 return forward ? &obj->programs[0] :
4341 &obj->programs[nr_programs - 1];
4342
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004343 if (p->obj != obj) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004344 pr_warning("error: program handler doesn't match object\n");
4345 return NULL;
4346 }
4347
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004348 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004349 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004350 return NULL;
4351 return &obj->programs[idx];
4352}
4353
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004354struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004355bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004356{
4357 struct bpf_program *prog = prev;
4358
4359 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004360 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004361 } while (prog && bpf_program__is_function_storage(prog, obj));
4362
4363 return prog;
4364}
4365
4366struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004367bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004368{
4369 struct bpf_program *prog = next;
4370
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004371 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004372 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004373 } while (prog && bpf_program__is_function_storage(prog, obj));
4374
4375 return prog;
4376}
4377
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03004378int bpf_program__set_priv(struct bpf_program *prog, void *priv,
4379 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004380{
4381 if (prog->priv && prog->clear_priv)
4382 prog->clear_priv(prog, prog->priv);
4383
4384 prog->priv = priv;
4385 prog->clear_priv = clear_priv;
4386 return 0;
4387}
4388
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004389void *bpf_program__priv(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004390{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03004391 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004392}
4393
Jakub Kicinski9aba3612018-06-28 14:41:37 -07004394void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
4395{
4396 prog->prog_ifindex = ifindex;
4397}
4398
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004399const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004400{
4401 const char *title;
4402
4403 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09004404 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004405 title = strdup(title);
4406 if (!title) {
4407 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00004408 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004409 }
4410 }
4411
4412 return title;
4413}
4414
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004415int bpf_program__fd(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004416{
Wang Nanb5805632015-11-16 12:10:09 +00004417 return bpf_program__nth_fd(prog, 0);
4418}
4419
4420int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
4421 bpf_program_prep_t prep)
4422{
4423 int *instances_fds;
4424
4425 if (nr_instances <= 0 || !prep)
4426 return -EINVAL;
4427
4428 if (prog->instances.nr > 0 || prog->instances.fds) {
4429 pr_warning("Can't set pre-processor after loading\n");
4430 return -EINVAL;
4431 }
4432
4433 instances_fds = malloc(sizeof(int) * nr_instances);
4434 if (!instances_fds) {
4435 pr_warning("alloc memory failed for fds\n");
4436 return -ENOMEM;
4437 }
4438
4439 /* fill all fd with -1 */
4440 memset(instances_fds, -1, sizeof(int) * nr_instances);
4441
4442 prog->instances.nr = nr_instances;
4443 prog->instances.fds = instances_fds;
4444 prog->preprocessor = prep;
4445 return 0;
4446}
4447
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004448int bpf_program__nth_fd(const struct bpf_program *prog, int n)
Wang Nanb5805632015-11-16 12:10:09 +00004449{
4450 int fd;
4451
Jakub Kicinski1e960042018-07-26 14:32:18 -07004452 if (!prog)
4453 return -EINVAL;
4454
Wang Nanb5805632015-11-16 12:10:09 +00004455 if (n >= prog->instances.nr || n < 0) {
4456 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
4457 n, prog->section_name, prog->instances.nr);
4458 return -EINVAL;
4459 }
4460
4461 fd = prog->instances.fds[n];
4462 if (fd < 0) {
4463 pr_warning("%dth instance of program '%s' is invalid\n",
4464 n, prog->section_name);
4465 return -ENOENT;
4466 }
4467
4468 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004469}
Wang Nan9d759a92015-11-27 08:47:35 +00004470
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07004471void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00004472{
4473 prog->type = type;
4474}
4475
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004476static bool bpf_program__is_type(const struct bpf_program *prog,
Wang Nan5f44e4c82016-07-13 10:44:01 +00004477 enum bpf_prog_type type)
4478{
4479 return prog ? (prog->type == type) : false;
4480}
4481
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004482#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
4483int bpf_program__set_##NAME(struct bpf_program *prog) \
4484{ \
4485 if (!prog) \
4486 return -EINVAL; \
4487 bpf_program__set_type(prog, TYPE); \
4488 return 0; \
4489} \
4490 \
4491bool bpf_program__is_##NAME(const struct bpf_program *prog) \
4492{ \
4493 return bpf_program__is_type(prog, TYPE); \
4494} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00004495
Joe Stringer7803ba72017-01-22 17:11:24 -08004496BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08004497BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08004498BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
4499BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08004500BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07004501BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08004502BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
4503BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00004504
John Fastabend16962b22018-04-23 14:30:38 -07004505void bpf_program__set_expected_attach_type(struct bpf_program *prog,
4506 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004507{
4508 prog->expected_attach_type = type;
4509}
4510
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004511#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
4512 { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004513
Andrey Ignatov956b6202018-09-26 15:24:53 -07004514/* Programs that can NOT be attached. */
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004515#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004516
Andrey Ignatov956b6202018-09-26 15:24:53 -07004517/* Programs that can be attached. */
4518#define BPF_APROG_SEC(string, ptype, atype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004519 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07004520
Andrey Ignatov956b6202018-09-26 15:24:53 -07004521/* Programs that must specify expected attach type at load time. */
4522#define BPF_EAPROG_SEC(string, ptype, eatype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004523 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
4524
4525/* Programs that use BTF to identify attach point */
4526#define BPF_PROG_BTF(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 1, 0)
Andrey Ignatov956b6202018-09-26 15:24:53 -07004527
4528/* Programs that can be attached but attach type can't be identified by section
4529 * name. Kept for backward compatibility.
4530 */
4531#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07004532
Roman Gushchin583c9002017-12-13 15:18:51 +00004533static const struct {
4534 const char *sec;
4535 size_t len;
4536 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004537 enum bpf_attach_type expected_attach_type;
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004538 bool is_attachable;
4539 bool is_attach_btf;
Andrey Ignatov956b6202018-09-26 15:24:53 -07004540 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00004541} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07004542 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
4543 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
4544 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
4545 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
4546 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
4547 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
4548 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004549 BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004550 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
4551 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
4552 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
4553 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
4554 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
4555 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07004556 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
4557 BPF_CGROUP_INET_INGRESS),
4558 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
4559 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004560 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
4561 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
4562 BPF_CGROUP_INET_SOCK_CREATE),
4563 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
4564 BPF_CGROUP_INET4_POST_BIND),
4565 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
4566 BPF_CGROUP_INET6_POST_BIND),
4567 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
4568 BPF_CGROUP_DEVICE),
4569 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
4570 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07004571 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
4572 BPF_SK_SKB_STREAM_PARSER),
4573 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
4574 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004575 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
4576 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
4577 BPF_SK_MSG_VERDICT),
4578 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
4579 BPF_LIRC_MODE2),
4580 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
4581 BPF_FLOW_DISSECTOR),
4582 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4583 BPF_CGROUP_INET4_BIND),
4584 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4585 BPF_CGROUP_INET6_BIND),
4586 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4587 BPF_CGROUP_INET4_CONNECT),
4588 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4589 BPF_CGROUP_INET6_CONNECT),
4590 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4591 BPF_CGROUP_UDP4_SENDMSG),
4592 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4593 BPF_CGROUP_UDP6_SENDMSG),
Daniel Borkmann9bb59ac2019-06-07 01:48:59 +02004594 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4595 BPF_CGROUP_UDP4_RECVMSG),
4596 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4597 BPF_CGROUP_UDP6_RECVMSG),
Andrey Ignatov063cc9f2019-03-08 09:15:26 -08004598 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
4599 BPF_CGROUP_SYSCTL),
Stanislav Fomichev4cdbfb52019-06-27 13:38:49 -07004600 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
4601 BPF_CGROUP_GETSOCKOPT),
4602 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
4603 BPF_CGROUP_SETSOCKOPT),
Roman Gushchin583c9002017-12-13 15:18:51 +00004604};
Roman Gushchin583c9002017-12-13 15:18:51 +00004605
Andrey Ignatov956b6202018-09-26 15:24:53 -07004606#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004607#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07004608#undef BPF_APROG_SEC
4609#undef BPF_EAPROG_SEC
4610#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004611
Taeung Songc76e4c22019-01-21 22:06:38 +09004612#define MAX_TYPE_NAME_SIZE 32
4613
4614static char *libbpf_get_type_names(bool attach_type)
4615{
4616 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
4617 char *buf;
4618
4619 buf = malloc(len);
4620 if (!buf)
4621 return NULL;
4622
4623 buf[0] = '\0';
4624 /* Forge string buf with all available names */
4625 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4626 if (attach_type && !section_names[i].is_attachable)
4627 continue;
4628
4629 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
4630 free(buf);
4631 return NULL;
4632 }
4633 strcat(buf, " ");
4634 strcat(buf, section_names[i].sec);
4635 }
4636
4637 return buf;
4638}
4639
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004640int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
4641 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00004642{
Taeung Songc76e4c22019-01-21 22:06:38 +09004643 char *type_names;
Roman Gushchin583c9002017-12-13 15:18:51 +00004644 int i;
4645
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004646 if (!name)
4647 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00004648
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004649 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4650 if (strncmp(name, section_names[i].sec, section_names[i].len))
4651 continue;
4652 *prog_type = section_names[i].prog_type;
4653 *expected_attach_type = section_names[i].expected_attach_type;
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004654 if (section_names[i].is_attach_btf) {
4655 struct btf *btf = bpf_core_find_kernel_btf();
4656 char raw_tp_btf_name[128] = "btf_trace_";
4657 char *dst = raw_tp_btf_name + sizeof("btf_trace_") - 1;
4658 int ret;
4659
4660 if (IS_ERR(btf)) {
4661 pr_warning("vmlinux BTF is not found\n");
4662 return -EINVAL;
4663 }
4664 /* prepend "btf_trace_" prefix per kernel convention */
4665 strncat(dst, name + section_names[i].len,
4666 sizeof(raw_tp_btf_name) - (dst - raw_tp_btf_name));
4667 ret = btf__find_by_name(btf, raw_tp_btf_name);
4668 btf__free(btf);
4669 if (ret <= 0) {
4670 pr_warning("%s is not found in vmlinux BTF\n", dst);
4671 return -EINVAL;
4672 }
4673 *expected_attach_type = ret;
4674 }
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004675 return 0;
4676 }
Taeung Songc76e4c22019-01-21 22:06:38 +09004677 pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
4678 type_names = libbpf_get_type_names(false);
4679 if (type_names != NULL) {
4680 pr_info("supported section(type) names are:%s\n", type_names);
4681 free(type_names);
4682 }
4683
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004684 return -EINVAL;
4685}
Roman Gushchin583c9002017-12-13 15:18:51 +00004686
Andrey Ignatov956b6202018-09-26 15:24:53 -07004687int libbpf_attach_type_by_name(const char *name,
4688 enum bpf_attach_type *attach_type)
4689{
Taeung Songc76e4c22019-01-21 22:06:38 +09004690 char *type_names;
Andrey Ignatov956b6202018-09-26 15:24:53 -07004691 int i;
4692
4693 if (!name)
4694 return -EINVAL;
4695
4696 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4697 if (strncmp(name, section_names[i].sec, section_names[i].len))
4698 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07004699 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07004700 return -EINVAL;
4701 *attach_type = section_names[i].attach_type;
4702 return 0;
4703 }
Taeung Songc76e4c22019-01-21 22:06:38 +09004704 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
4705 type_names = libbpf_get_type_names(true);
4706 if (type_names != NULL) {
4707 pr_info("attachable section(type) names are:%s\n", type_names);
4708 free(type_names);
4709 }
4710
Andrey Ignatov956b6202018-09-26 15:24:53 -07004711 return -EINVAL;
4712}
4713
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004714static int
4715bpf_program__identify_section(struct bpf_program *prog,
4716 enum bpf_prog_type *prog_type,
4717 enum bpf_attach_type *expected_attach_type)
4718{
4719 return libbpf_prog_type_by_name(prog->section_name, prog_type,
4720 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00004721}
4722
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004723int bpf_map__fd(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00004724{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03004725 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00004726}
4727
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004728const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00004729{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03004730 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00004731}
4732
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004733const char *bpf_map__name(const struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00004734{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03004735 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00004736}
4737
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07004738__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004739{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07004740 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004741}
4742
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07004743__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004744{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07004745 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004746}
4747
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03004748int bpf_map__set_priv(struct bpf_map *map, void *priv,
4749 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00004750{
4751 if (!map)
4752 return -EINVAL;
4753
4754 if (map->priv) {
4755 if (map->clear_priv)
4756 map->clear_priv(map, map->priv);
4757 }
4758
4759 map->priv = priv;
4760 map->clear_priv = clear_priv;
4761 return 0;
4762}
4763
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004764void *bpf_map__priv(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00004765{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03004766 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00004767}
4768
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004769bool bpf_map__is_offload_neutral(const struct bpf_map *map)
Jakub Kicinskif83fb222018-07-10 14:43:01 -07004770{
4771 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
4772}
4773
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004774bool bpf_map__is_internal(const struct bpf_map *map)
Daniel Borkmannd8599002019-04-09 23:20:13 +02004775{
4776 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
4777}
4778
Jakub Kicinski9aba3612018-06-28 14:41:37 -07004779void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
4780{
4781 map->map_ifindex = ifindex;
4782}
4783
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08004784int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
4785{
4786 if (!bpf_map_type__is_map_in_map(map->def.type)) {
4787 pr_warning("error: unsupported map type\n");
4788 return -EINVAL;
4789 }
4790 if (map->inner_map_fd != -1) {
4791 pr_warning("error: inner_map_fd already specified\n");
4792 return -EINVAL;
4793 }
4794 map->inner_map_fd = fd;
4795 return 0;
4796}
4797
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004798static struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004799__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00004800{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004801 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00004802 struct bpf_map *s, *e;
4803
4804 if (!obj || !obj->maps)
4805 return NULL;
4806
4807 s = obj->maps;
4808 e = obj->maps + obj->nr_maps;
4809
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004810 if ((m < s) || (m >= e)) {
Wang Nan9d759a92015-11-27 08:47:35 +00004811 pr_warning("error in %s: map handler doesn't belong to object\n",
4812 __func__);
4813 return NULL;
4814 }
4815
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004816 idx = (m - obj->maps) + i;
4817 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00004818 return NULL;
4819 return &obj->maps[idx];
4820}
Wang Nan561bbcc2015-11-27 08:47:36 +00004821
4822struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004823bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004824{
4825 if (prev == NULL)
4826 return obj->maps;
4827
4828 return __bpf_map__iter(prev, obj, 1);
4829}
4830
4831struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004832bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004833{
4834 if (next == NULL) {
4835 if (!obj->nr_maps)
4836 return NULL;
4837 return obj->maps + obj->nr_maps - 1;
4838 }
4839
4840 return __bpf_map__iter(next, obj, -1);
4841}
4842
4843struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004844bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00004845{
4846 struct bpf_map *pos;
4847
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08004848 bpf_object__for_each_map(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00004849 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00004850 return pos;
4851 }
4852 return NULL;
4853}
Wang Nan5a6acad2016-11-26 07:03:27 +00004854
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01004855int
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004856bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01004857{
4858 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
4859}
4860
Wang Nan5a6acad2016-11-26 07:03:27 +00004861struct bpf_map *
4862bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
4863{
Andrii Nakryikodb488142019-06-17 12:26:54 -07004864 return ERR_PTR(-ENOTSUP);
Wang Nan5a6acad2016-11-26 07:03:27 +00004865}
Joe Stringere28ff1a2017-01-22 17:11:25 -08004866
4867long libbpf_get_error(const void *ptr)
4868{
Hariprasad Kelamd98363b2019-05-25 14:32:57 +05304869 return PTR_ERR_OR_ZERO(ptr);
Joe Stringere28ff1a2017-01-22 17:11:25 -08004870}
John Fastabend6f6d33f2017-08-15 22:34:22 -07004871
4872int bpf_prog_load(const char *file, enum bpf_prog_type type,
4873 struct bpf_object **pobj, int *prog_fd)
4874{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004875 struct bpf_prog_load_attr attr;
4876
4877 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
4878 attr.file = file;
4879 attr.prog_type = type;
4880 attr.expected_attach_type = 0;
4881
4882 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
4883}
4884
4885int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
4886 struct bpf_object **pobj, int *prog_fd)
4887{
Leo Yan33bae182019-07-02 18:25:31 +08004888 struct bpf_object_open_attr open_attr = {};
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004889 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004890 enum bpf_attach_type expected_attach_type;
4891 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07004892 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07004893 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07004894 int err;
4895
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004896 if (!attr)
4897 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07004898 if (!attr->file)
4899 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004900
Leo Yan33bae182019-07-02 18:25:31 +08004901 open_attr.file = attr->file;
4902 open_attr.prog_type = attr->prog_type;
4903
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07004904 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07004905 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07004906 return -ENOENT;
4907
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004908 bpf_object__for_each_program(prog, obj) {
4909 /*
4910 * If type is not specified, try to guess it based on
4911 * section name.
4912 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004913 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07004914 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004915 expected_attach_type = attr->expected_attach_type;
4916 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07004917 err = bpf_program__identify_section(prog, &prog_type,
4918 &expected_attach_type);
4919 if (err < 0) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004920 bpf_object__close(obj);
4921 return -EINVAL;
4922 }
4923 }
4924
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004925 bpf_program__set_type(prog, prog_type);
4926 bpf_program__set_expected_attach_type(prog,
4927 expected_attach_type);
4928
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004929 prog->log_level = attr->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01004930 prog->prog_flags = attr->prog_flags;
Taeung Song69495d22018-09-03 08:30:07 +09004931 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004932 first_prog = prog;
4933 }
4934
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08004935 bpf_object__for_each_map(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07004936 if (!bpf_map__is_offload_neutral(map))
4937 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07004938 }
4939
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004940 if (!first_prog) {
4941 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07004942 bpf_object__close(obj);
4943 return -ENOENT;
4944 }
4945
John Fastabend6f6d33f2017-08-15 22:34:22 -07004946 err = bpf_object__load(obj);
4947 if (err) {
4948 bpf_object__close(obj);
4949 return -EINVAL;
4950 }
4951
4952 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004953 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07004954 return 0;
4955}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07004956
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07004957struct bpf_link {
4958 int (*destroy)(struct bpf_link *link);
4959};
4960
4961int bpf_link__destroy(struct bpf_link *link)
4962{
4963 int err;
4964
4965 if (!link)
4966 return 0;
4967
4968 err = link->destroy(link);
4969 free(link);
4970
4971 return err;
4972}
4973
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07004974struct bpf_link_fd {
4975 struct bpf_link link; /* has to be at the top of struct */
4976 int fd; /* hook FD */
4977};
4978
4979static int bpf_link__destroy_perf_event(struct bpf_link *link)
4980{
4981 struct bpf_link_fd *l = (void *)link;
4982 int err;
4983
4984 err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
4985 if (err)
4986 err = -errno;
4987
4988 close(l->fd);
4989 return err;
4990}
4991
4992struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
4993 int pfd)
4994{
4995 char errmsg[STRERR_BUFSIZE];
4996 struct bpf_link_fd *link;
4997 int prog_fd, err;
4998
4999 if (pfd < 0) {
5000 pr_warning("program '%s': invalid perf event FD %d\n",
5001 bpf_program__title(prog, false), pfd);
5002 return ERR_PTR(-EINVAL);
5003 }
5004 prog_fd = bpf_program__fd(prog);
5005 if (prog_fd < 0) {
5006 pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
5007 bpf_program__title(prog, false));
5008 return ERR_PTR(-EINVAL);
5009 }
5010
5011 link = malloc(sizeof(*link));
5012 if (!link)
5013 return ERR_PTR(-ENOMEM);
5014 link->link.destroy = &bpf_link__destroy_perf_event;
5015 link->fd = pfd;
5016
5017 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
5018 err = -errno;
5019 free(link);
5020 pr_warning("program '%s': failed to attach to pfd %d: %s\n",
5021 bpf_program__title(prog, false), pfd,
5022 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5023 return ERR_PTR(err);
5024 }
5025 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5026 err = -errno;
5027 free(link);
5028 pr_warning("program '%s': failed to enable pfd %d: %s\n",
5029 bpf_program__title(prog, false), pfd,
5030 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5031 return ERR_PTR(err);
5032 }
5033 return (struct bpf_link *)link;
5034}
5035
Andrii Nakryikob2650022019-07-01 16:58:58 -07005036/*
5037 * this function is expected to parse integer in the range of [0, 2^31-1] from
5038 * given file using scanf format string fmt. If actual parsed value is
5039 * negative, the result might be indistinguishable from error
5040 */
5041static int parse_uint_from_file(const char *file, const char *fmt)
5042{
5043 char buf[STRERR_BUFSIZE];
5044 int err, ret;
5045 FILE *f;
5046
5047 f = fopen(file, "r");
5048 if (!f) {
5049 err = -errno;
5050 pr_debug("failed to open '%s': %s\n", file,
5051 libbpf_strerror_r(err, buf, sizeof(buf)));
5052 return err;
5053 }
5054 err = fscanf(f, fmt, &ret);
5055 if (err != 1) {
5056 err = err == EOF ? -EIO : -errno;
5057 pr_debug("failed to parse '%s': %s\n", file,
5058 libbpf_strerror_r(err, buf, sizeof(buf)));
5059 fclose(f);
5060 return err;
5061 }
5062 fclose(f);
5063 return ret;
5064}
5065
5066static int determine_kprobe_perf_type(void)
5067{
5068 const char *file = "/sys/bus/event_source/devices/kprobe/type";
5069
5070 return parse_uint_from_file(file, "%d\n");
5071}
5072
5073static int determine_uprobe_perf_type(void)
5074{
5075 const char *file = "/sys/bus/event_source/devices/uprobe/type";
5076
5077 return parse_uint_from_file(file, "%d\n");
5078}
5079
5080static int determine_kprobe_retprobe_bit(void)
5081{
5082 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
5083
5084 return parse_uint_from_file(file, "config:%d\n");
5085}
5086
5087static int determine_uprobe_retprobe_bit(void)
5088{
5089 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
5090
5091 return parse_uint_from_file(file, "config:%d\n");
5092}
5093
5094static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5095 uint64_t offset, int pid)
5096{
5097 struct perf_event_attr attr = {};
5098 char errmsg[STRERR_BUFSIZE];
5099 int type, pfd, err;
5100
5101 type = uprobe ? determine_uprobe_perf_type()
5102 : determine_kprobe_perf_type();
5103 if (type < 0) {
5104 pr_warning("failed to determine %s perf type: %s\n",
5105 uprobe ? "uprobe" : "kprobe",
5106 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
5107 return type;
5108 }
5109 if (retprobe) {
5110 int bit = uprobe ? determine_uprobe_retprobe_bit()
5111 : determine_kprobe_retprobe_bit();
5112
5113 if (bit < 0) {
5114 pr_warning("failed to determine %s retprobe bit: %s\n",
5115 uprobe ? "uprobe" : "kprobe",
5116 libbpf_strerror_r(bit, errmsg,
5117 sizeof(errmsg)));
5118 return bit;
5119 }
5120 attr.config |= 1 << bit;
5121 }
5122 attr.size = sizeof(attr);
5123 attr.type = type;
Andrii Nakryiko36db2a92019-07-08 21:00:07 -07005124 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
5125 attr.config2 = offset; /* kprobe_addr or probe_offset */
Andrii Nakryikob2650022019-07-01 16:58:58 -07005126
5127 /* pid filter is meaningful only for uprobes */
5128 pfd = syscall(__NR_perf_event_open, &attr,
5129 pid < 0 ? -1 : pid /* pid */,
5130 pid == -1 ? 0 : -1 /* cpu */,
5131 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5132 if (pfd < 0) {
5133 err = -errno;
5134 pr_warning("%s perf_event_open() failed: %s\n",
5135 uprobe ? "uprobe" : "kprobe",
5136 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5137 return err;
5138 }
5139 return pfd;
5140}
5141
5142struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
5143 bool retprobe,
5144 const char *func_name)
5145{
5146 char errmsg[STRERR_BUFSIZE];
5147 struct bpf_link *link;
5148 int pfd, err;
5149
5150 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
5151 0 /* offset */, -1 /* pid */);
5152 if (pfd < 0) {
5153 pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
5154 bpf_program__title(prog, false),
5155 retprobe ? "kretprobe" : "kprobe", func_name,
5156 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5157 return ERR_PTR(pfd);
5158 }
5159 link = bpf_program__attach_perf_event(prog, pfd);
5160 if (IS_ERR(link)) {
5161 close(pfd);
5162 err = PTR_ERR(link);
5163 pr_warning("program '%s': failed to attach to %s '%s': %s\n",
5164 bpf_program__title(prog, false),
5165 retprobe ? "kretprobe" : "kprobe", func_name,
5166 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5167 return link;
5168 }
5169 return link;
5170}
5171
5172struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
5173 bool retprobe, pid_t pid,
5174 const char *binary_path,
5175 size_t func_offset)
5176{
5177 char errmsg[STRERR_BUFSIZE];
5178 struct bpf_link *link;
5179 int pfd, err;
5180
5181 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
5182 binary_path, func_offset, pid);
5183 if (pfd < 0) {
5184 pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
5185 bpf_program__title(prog, false),
5186 retprobe ? "uretprobe" : "uprobe",
5187 binary_path, func_offset,
5188 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5189 return ERR_PTR(pfd);
5190 }
5191 link = bpf_program__attach_perf_event(prog, pfd);
5192 if (IS_ERR(link)) {
5193 close(pfd);
5194 err = PTR_ERR(link);
5195 pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
5196 bpf_program__title(prog, false),
5197 retprobe ? "uretprobe" : "uprobe",
5198 binary_path, func_offset,
5199 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5200 return link;
5201 }
5202 return link;
5203}
5204
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07005205static int determine_tracepoint_id(const char *tp_category,
5206 const char *tp_name)
5207{
5208 char file[PATH_MAX];
5209 int ret;
5210
5211 ret = snprintf(file, sizeof(file),
5212 "/sys/kernel/debug/tracing/events/%s/%s/id",
5213 tp_category, tp_name);
5214 if (ret < 0)
5215 return -errno;
5216 if (ret >= sizeof(file)) {
5217 pr_debug("tracepoint %s/%s path is too long\n",
5218 tp_category, tp_name);
5219 return -E2BIG;
5220 }
5221 return parse_uint_from_file(file, "%d\n");
5222}
5223
5224static int perf_event_open_tracepoint(const char *tp_category,
5225 const char *tp_name)
5226{
5227 struct perf_event_attr attr = {};
5228 char errmsg[STRERR_BUFSIZE];
5229 int tp_id, pfd, err;
5230
5231 tp_id = determine_tracepoint_id(tp_category, tp_name);
5232 if (tp_id < 0) {
5233 pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
5234 tp_category, tp_name,
5235 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
5236 return tp_id;
5237 }
5238
5239 attr.type = PERF_TYPE_TRACEPOINT;
5240 attr.size = sizeof(attr);
5241 attr.config = tp_id;
5242
5243 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
5244 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5245 if (pfd < 0) {
5246 err = -errno;
5247 pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n",
5248 tp_category, tp_name,
5249 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5250 return err;
5251 }
5252 return pfd;
5253}
5254
5255struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
5256 const char *tp_category,
5257 const char *tp_name)
5258{
5259 char errmsg[STRERR_BUFSIZE];
5260 struct bpf_link *link;
5261 int pfd, err;
5262
5263 pfd = perf_event_open_tracepoint(tp_category, tp_name);
5264 if (pfd < 0) {
5265 pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
5266 bpf_program__title(prog, false),
5267 tp_category, tp_name,
5268 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5269 return ERR_PTR(pfd);
5270 }
5271 link = bpf_program__attach_perf_event(prog, pfd);
5272 if (IS_ERR(link)) {
5273 close(pfd);
5274 err = PTR_ERR(link);
5275 pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
5276 bpf_program__title(prog, false),
5277 tp_category, tp_name,
5278 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5279 return link;
5280 }
5281 return link;
5282}
5283
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07005284static int bpf_link__destroy_fd(struct bpf_link *link)
5285{
5286 struct bpf_link_fd *l = (void *)link;
5287
5288 return close(l->fd);
5289}
5290
5291struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
5292 const char *tp_name)
5293{
5294 char errmsg[STRERR_BUFSIZE];
5295 struct bpf_link_fd *link;
5296 int prog_fd, pfd;
5297
5298 prog_fd = bpf_program__fd(prog);
5299 if (prog_fd < 0) {
5300 pr_warning("program '%s': can't attach before loaded\n",
5301 bpf_program__title(prog, false));
5302 return ERR_PTR(-EINVAL);
5303 }
5304
5305 link = malloc(sizeof(*link));
5306 if (!link)
5307 return ERR_PTR(-ENOMEM);
5308 link->link.destroy = &bpf_link__destroy_fd;
5309
5310 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
5311 if (pfd < 0) {
5312 pfd = -errno;
5313 free(link);
5314 pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n",
5315 bpf_program__title(prog, false), tp_name,
5316 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5317 return ERR_PTR(pfd);
5318 }
5319 link->fd = pfd;
5320 return (struct bpf_link *)link;
5321}
5322
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005323enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005324bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
5325 void **copy_mem, size_t *copy_size,
5326 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005327{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005328 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02005329 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005330 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005331 void *base = ((__u8 *)header) + page_size;
5332 int ret = LIBBPF_PERF_EVENT_CONT;
5333 struct perf_event_header *ehdr;
5334 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005335
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005336 while (data_head != data_tail) {
5337 ehdr = base + (data_tail & (mmap_size - 1));
5338 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005339
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005340 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
5341 void *copy_start = ehdr;
5342 size_t len_first = base + mmap_size - copy_start;
5343 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005344
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005345 if (*copy_size < ehdr_size) {
5346 free(*copy_mem);
5347 *copy_mem = malloc(ehdr_size);
5348 if (!*copy_mem) {
5349 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005350 ret = LIBBPF_PERF_EVENT_ERROR;
5351 break;
5352 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005353 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005354 }
5355
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005356 memcpy(*copy_mem, copy_start, len_first);
5357 memcpy(*copy_mem + len_first, base, len_secnd);
5358 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005359 }
5360
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005361 ret = fn(ehdr, private_data);
5362 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005363 if (ret != LIBBPF_PERF_EVENT_CONT)
5364 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005365 }
5366
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02005367 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005368 return ret;
5369}
Song Liu34be16462019-03-11 22:30:38 -07005370
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005371struct perf_buffer;
5372
5373struct perf_buffer_params {
5374 struct perf_event_attr *attr;
5375 /* if event_cb is specified, it takes precendence */
5376 perf_buffer_event_fn event_cb;
5377 /* sample_cb and lost_cb are higher-level common-case callbacks */
5378 perf_buffer_sample_fn sample_cb;
5379 perf_buffer_lost_fn lost_cb;
5380 void *ctx;
5381 int cpu_cnt;
5382 int *cpus;
5383 int *map_keys;
5384};
5385
5386struct perf_cpu_buf {
5387 struct perf_buffer *pb;
5388 void *base; /* mmap()'ed memory */
5389 void *buf; /* for reconstructing segmented data */
5390 size_t buf_size;
5391 int fd;
5392 int cpu;
5393 int map_key;
5394};
5395
5396struct perf_buffer {
5397 perf_buffer_event_fn event_cb;
5398 perf_buffer_sample_fn sample_cb;
5399 perf_buffer_lost_fn lost_cb;
5400 void *ctx; /* passed into callbacks */
5401
5402 size_t page_size;
5403 size_t mmap_size;
5404 struct perf_cpu_buf **cpu_bufs;
5405 struct epoll_event *events;
5406 int cpu_cnt;
5407 int epoll_fd; /* perf event FD */
5408 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
5409};
5410
5411static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
5412 struct perf_cpu_buf *cpu_buf)
5413{
5414 if (!cpu_buf)
5415 return;
5416 if (cpu_buf->base &&
5417 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
5418 pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
5419 if (cpu_buf->fd >= 0) {
5420 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
5421 close(cpu_buf->fd);
5422 }
5423 free(cpu_buf->buf);
5424 free(cpu_buf);
5425}
5426
5427void perf_buffer__free(struct perf_buffer *pb)
5428{
5429 int i;
5430
5431 if (!pb)
5432 return;
5433 if (pb->cpu_bufs) {
5434 for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
5435 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
5436
5437 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
5438 perf_buffer__free_cpu_buf(pb, cpu_buf);
5439 }
5440 free(pb->cpu_bufs);
5441 }
5442 if (pb->epoll_fd >= 0)
5443 close(pb->epoll_fd);
5444 free(pb->events);
5445 free(pb);
5446}
5447
5448static struct perf_cpu_buf *
5449perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
5450 int cpu, int map_key)
5451{
5452 struct perf_cpu_buf *cpu_buf;
5453 char msg[STRERR_BUFSIZE];
5454 int err;
5455
5456 cpu_buf = calloc(1, sizeof(*cpu_buf));
5457 if (!cpu_buf)
5458 return ERR_PTR(-ENOMEM);
5459
5460 cpu_buf->pb = pb;
5461 cpu_buf->cpu = cpu;
5462 cpu_buf->map_key = map_key;
5463
5464 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
5465 -1, PERF_FLAG_FD_CLOEXEC);
5466 if (cpu_buf->fd < 0) {
5467 err = -errno;
5468 pr_warning("failed to open perf buffer event on cpu #%d: %s\n",
5469 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5470 goto error;
5471 }
5472
5473 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
5474 PROT_READ | PROT_WRITE, MAP_SHARED,
5475 cpu_buf->fd, 0);
5476 if (cpu_buf->base == MAP_FAILED) {
5477 cpu_buf->base = NULL;
5478 err = -errno;
5479 pr_warning("failed to mmap perf buffer on cpu #%d: %s\n",
5480 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5481 goto error;
5482 }
5483
5484 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5485 err = -errno;
5486 pr_warning("failed to enable perf buffer event on cpu #%d: %s\n",
5487 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5488 goto error;
5489 }
5490
5491 return cpu_buf;
5492
5493error:
5494 perf_buffer__free_cpu_buf(pb, cpu_buf);
5495 return (struct perf_cpu_buf *)ERR_PTR(err);
5496}
5497
5498static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5499 struct perf_buffer_params *p);
5500
5501struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
5502 const struct perf_buffer_opts *opts)
5503{
5504 struct perf_buffer_params p = {};
Arnaldo Carvalho de Melo4be6e052019-07-19 11:34:07 -03005505 struct perf_event_attr attr = { 0, };
5506
5507 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
5508 attr.type = PERF_TYPE_SOFTWARE;
5509 attr.sample_type = PERF_SAMPLE_RAW;
5510 attr.sample_period = 1;
5511 attr.wakeup_events = 1;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005512
5513 p.attr = &attr;
5514 p.sample_cb = opts ? opts->sample_cb : NULL;
5515 p.lost_cb = opts ? opts->lost_cb : NULL;
5516 p.ctx = opts ? opts->ctx : NULL;
5517
5518 return __perf_buffer__new(map_fd, page_cnt, &p);
5519}
5520
5521struct perf_buffer *
5522perf_buffer__new_raw(int map_fd, size_t page_cnt,
5523 const struct perf_buffer_raw_opts *opts)
5524{
5525 struct perf_buffer_params p = {};
5526
5527 p.attr = opts->attr;
5528 p.event_cb = opts->event_cb;
5529 p.ctx = opts->ctx;
5530 p.cpu_cnt = opts->cpu_cnt;
5531 p.cpus = opts->cpus;
5532 p.map_keys = opts->map_keys;
5533
5534 return __perf_buffer__new(map_fd, page_cnt, &p);
5535}
5536
5537static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5538 struct perf_buffer_params *p)
5539{
5540 struct bpf_map_info map = {};
5541 char msg[STRERR_BUFSIZE];
5542 struct perf_buffer *pb;
5543 __u32 map_info_len;
5544 int err, i;
5545
5546 if (page_cnt & (page_cnt - 1)) {
5547 pr_warning("page count should be power of two, but is %zu\n",
5548 page_cnt);
5549 return ERR_PTR(-EINVAL);
5550 }
5551
5552 map_info_len = sizeof(map);
5553 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
5554 if (err) {
5555 err = -errno;
5556 pr_warning("failed to get map info for map FD %d: %s\n",
5557 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
5558 return ERR_PTR(err);
5559 }
5560
5561 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
5562 pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
5563 map.name);
5564 return ERR_PTR(-EINVAL);
5565 }
5566
5567 pb = calloc(1, sizeof(*pb));
5568 if (!pb)
5569 return ERR_PTR(-ENOMEM);
5570
5571 pb->event_cb = p->event_cb;
5572 pb->sample_cb = p->sample_cb;
5573 pb->lost_cb = p->lost_cb;
5574 pb->ctx = p->ctx;
5575
5576 pb->page_size = getpagesize();
5577 pb->mmap_size = pb->page_size * page_cnt;
5578 pb->map_fd = map_fd;
5579
5580 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
5581 if (pb->epoll_fd < 0) {
5582 err = -errno;
5583 pr_warning("failed to create epoll instance: %s\n",
5584 libbpf_strerror_r(err, msg, sizeof(msg)));
5585 goto error;
5586 }
5587
5588 if (p->cpu_cnt > 0) {
5589 pb->cpu_cnt = p->cpu_cnt;
5590 } else {
5591 pb->cpu_cnt = libbpf_num_possible_cpus();
5592 if (pb->cpu_cnt < 0) {
5593 err = pb->cpu_cnt;
5594 goto error;
5595 }
5596 if (map.max_entries < pb->cpu_cnt)
5597 pb->cpu_cnt = map.max_entries;
5598 }
5599
5600 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
5601 if (!pb->events) {
5602 err = -ENOMEM;
5603 pr_warning("failed to allocate events: out of memory\n");
5604 goto error;
5605 }
5606 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
5607 if (!pb->cpu_bufs) {
5608 err = -ENOMEM;
5609 pr_warning("failed to allocate buffers: out of memory\n");
5610 goto error;
5611 }
5612
5613 for (i = 0; i < pb->cpu_cnt; i++) {
5614 struct perf_cpu_buf *cpu_buf;
5615 int cpu, map_key;
5616
5617 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
5618 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
5619
5620 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
5621 if (IS_ERR(cpu_buf)) {
5622 err = PTR_ERR(cpu_buf);
5623 goto error;
5624 }
5625
5626 pb->cpu_bufs[i] = cpu_buf;
5627
5628 err = bpf_map_update_elem(pb->map_fd, &map_key,
5629 &cpu_buf->fd, 0);
5630 if (err) {
5631 err = -errno;
5632 pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
5633 cpu, map_key, cpu_buf->fd,
5634 libbpf_strerror_r(err, msg, sizeof(msg)));
5635 goto error;
5636 }
5637
5638 pb->events[i].events = EPOLLIN;
5639 pb->events[i].data.ptr = cpu_buf;
5640 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
5641 &pb->events[i]) < 0) {
5642 err = -errno;
5643 pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
5644 cpu, cpu_buf->fd,
5645 libbpf_strerror_r(err, msg, sizeof(msg)));
5646 goto error;
5647 }
5648 }
5649
5650 return pb;
5651
5652error:
5653 if (pb)
5654 perf_buffer__free(pb);
5655 return ERR_PTR(err);
5656}
5657
5658struct perf_sample_raw {
5659 struct perf_event_header header;
5660 uint32_t size;
5661 char data[0];
5662};
5663
5664struct perf_sample_lost {
5665 struct perf_event_header header;
5666 uint64_t id;
5667 uint64_t lost;
5668 uint64_t sample_id;
5669};
5670
5671static enum bpf_perf_event_ret
5672perf_buffer__process_record(struct perf_event_header *e, void *ctx)
5673{
5674 struct perf_cpu_buf *cpu_buf = ctx;
5675 struct perf_buffer *pb = cpu_buf->pb;
5676 void *data = e;
5677
5678 /* user wants full control over parsing perf event */
5679 if (pb->event_cb)
5680 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
5681
5682 switch (e->type) {
5683 case PERF_RECORD_SAMPLE: {
5684 struct perf_sample_raw *s = data;
5685
5686 if (pb->sample_cb)
5687 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
5688 break;
5689 }
5690 case PERF_RECORD_LOST: {
5691 struct perf_sample_lost *s = data;
5692
5693 if (pb->lost_cb)
5694 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
5695 break;
5696 }
5697 default:
5698 pr_warning("unknown perf sample type %d\n", e->type);
5699 return LIBBPF_PERF_EVENT_ERROR;
5700 }
5701 return LIBBPF_PERF_EVENT_CONT;
5702}
5703
5704static int perf_buffer__process_records(struct perf_buffer *pb,
5705 struct perf_cpu_buf *cpu_buf)
5706{
5707 enum bpf_perf_event_ret ret;
5708
5709 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
5710 pb->page_size, &cpu_buf->buf,
5711 &cpu_buf->buf_size,
5712 perf_buffer__process_record, cpu_buf);
5713 if (ret != LIBBPF_PERF_EVENT_CONT)
5714 return ret;
5715 return 0;
5716}
5717
5718int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
5719{
5720 int i, cnt, err;
5721
5722 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
5723 for (i = 0; i < cnt; i++) {
5724 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
5725
5726 err = perf_buffer__process_records(pb, cpu_buf);
5727 if (err) {
5728 pr_warning("error while processing records: %d\n", err);
5729 return err;
5730 }
5731 }
5732 return cnt < 0 ? -errno : cnt;
5733}
5734
Song Liu34be16462019-03-11 22:30:38 -07005735struct bpf_prog_info_array_desc {
5736 int array_offset; /* e.g. offset of jited_prog_insns */
5737 int count_offset; /* e.g. offset of jited_prog_len */
5738 int size_offset; /* > 0: offset of rec size,
5739 * < 0: fix size of -size_offset
5740 */
5741};
5742
5743static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
5744 [BPF_PROG_INFO_JITED_INSNS] = {
5745 offsetof(struct bpf_prog_info, jited_prog_insns),
5746 offsetof(struct bpf_prog_info, jited_prog_len),
5747 -1,
5748 },
5749 [BPF_PROG_INFO_XLATED_INSNS] = {
5750 offsetof(struct bpf_prog_info, xlated_prog_insns),
5751 offsetof(struct bpf_prog_info, xlated_prog_len),
5752 -1,
5753 },
5754 [BPF_PROG_INFO_MAP_IDS] = {
5755 offsetof(struct bpf_prog_info, map_ids),
5756 offsetof(struct bpf_prog_info, nr_map_ids),
5757 -(int)sizeof(__u32),
5758 },
5759 [BPF_PROG_INFO_JITED_KSYMS] = {
5760 offsetof(struct bpf_prog_info, jited_ksyms),
5761 offsetof(struct bpf_prog_info, nr_jited_ksyms),
5762 -(int)sizeof(__u64),
5763 },
5764 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
5765 offsetof(struct bpf_prog_info, jited_func_lens),
5766 offsetof(struct bpf_prog_info, nr_jited_func_lens),
5767 -(int)sizeof(__u32),
5768 },
5769 [BPF_PROG_INFO_FUNC_INFO] = {
5770 offsetof(struct bpf_prog_info, func_info),
5771 offsetof(struct bpf_prog_info, nr_func_info),
5772 offsetof(struct bpf_prog_info, func_info_rec_size),
5773 },
5774 [BPF_PROG_INFO_LINE_INFO] = {
5775 offsetof(struct bpf_prog_info, line_info),
5776 offsetof(struct bpf_prog_info, nr_line_info),
5777 offsetof(struct bpf_prog_info, line_info_rec_size),
5778 },
5779 [BPF_PROG_INFO_JITED_LINE_INFO] = {
5780 offsetof(struct bpf_prog_info, jited_line_info),
5781 offsetof(struct bpf_prog_info, nr_jited_line_info),
5782 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
5783 },
5784 [BPF_PROG_INFO_PROG_TAGS] = {
5785 offsetof(struct bpf_prog_info, prog_tags),
5786 offsetof(struct bpf_prog_info, nr_prog_tags),
5787 -(int)sizeof(__u8) * BPF_TAG_SIZE,
5788 },
5789
5790};
5791
5792static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
5793{
5794 __u32 *array = (__u32 *)info;
5795
5796 if (offset >= 0)
5797 return array[offset / sizeof(__u32)];
5798 return -(int)offset;
5799}
5800
5801static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
5802{
5803 __u64 *array = (__u64 *)info;
5804
5805 if (offset >= 0)
5806 return array[offset / sizeof(__u64)];
5807 return -(int)offset;
5808}
5809
5810static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
5811 __u32 val)
5812{
5813 __u32 *array = (__u32 *)info;
5814
5815 if (offset >= 0)
5816 array[offset / sizeof(__u32)] = val;
5817}
5818
5819static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
5820 __u64 val)
5821{
5822 __u64 *array = (__u64 *)info;
5823
5824 if (offset >= 0)
5825 array[offset / sizeof(__u64)] = val;
5826}
5827
5828struct bpf_prog_info_linear *
5829bpf_program__get_prog_info_linear(int fd, __u64 arrays)
5830{
5831 struct bpf_prog_info_linear *info_linear;
5832 struct bpf_prog_info info = {};
5833 __u32 info_len = sizeof(info);
5834 __u32 data_len = 0;
5835 int i, err;
5836 void *ptr;
5837
5838 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
5839 return ERR_PTR(-EINVAL);
5840
5841 /* step 1: get array dimensions */
5842 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
5843 if (err) {
5844 pr_debug("can't get prog info: %s", strerror(errno));
5845 return ERR_PTR(-EFAULT);
5846 }
5847
5848 /* step 2: calculate total size of all arrays */
5849 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5850 bool include_array = (arrays & (1UL << i)) > 0;
5851 struct bpf_prog_info_array_desc *desc;
5852 __u32 count, size;
5853
5854 desc = bpf_prog_info_array_desc + i;
5855
5856 /* kernel is too old to support this field */
5857 if (info_len < desc->array_offset + sizeof(__u32) ||
5858 info_len < desc->count_offset + sizeof(__u32) ||
5859 (desc->size_offset > 0 && info_len < desc->size_offset))
5860 include_array = false;
5861
5862 if (!include_array) {
5863 arrays &= ~(1UL << i); /* clear the bit */
5864 continue;
5865 }
5866
5867 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5868 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5869
5870 data_len += count * size;
5871 }
5872
5873 /* step 3: allocate continuous memory */
5874 data_len = roundup(data_len, sizeof(__u64));
5875 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
5876 if (!info_linear)
5877 return ERR_PTR(-ENOMEM);
5878
5879 /* step 4: fill data to info_linear->info */
5880 info_linear->arrays = arrays;
5881 memset(&info_linear->info, 0, sizeof(info));
5882 ptr = info_linear->data;
5883
5884 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5885 struct bpf_prog_info_array_desc *desc;
5886 __u32 count, size;
5887
5888 if ((arrays & (1UL << i)) == 0)
5889 continue;
5890
5891 desc = bpf_prog_info_array_desc + i;
5892 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5893 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5894 bpf_prog_info_set_offset_u32(&info_linear->info,
5895 desc->count_offset, count);
5896 bpf_prog_info_set_offset_u32(&info_linear->info,
5897 desc->size_offset, size);
5898 bpf_prog_info_set_offset_u64(&info_linear->info,
5899 desc->array_offset,
5900 ptr_to_u64(ptr));
5901 ptr += count * size;
5902 }
5903
5904 /* step 5: call syscall again to get required arrays */
5905 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
5906 if (err) {
5907 pr_debug("can't get prog info: %s", strerror(errno));
5908 free(info_linear);
5909 return ERR_PTR(-EFAULT);
5910 }
5911
5912 /* step 6: verify the data */
5913 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5914 struct bpf_prog_info_array_desc *desc;
5915 __u32 v1, v2;
5916
5917 if ((arrays & (1UL << i)) == 0)
5918 continue;
5919
5920 desc = bpf_prog_info_array_desc + i;
5921 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5922 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5923 desc->count_offset);
5924 if (v1 != v2)
5925 pr_warning("%s: mismatch in element count\n", __func__);
5926
5927 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5928 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5929 desc->size_offset);
5930 if (v1 != v2)
5931 pr_warning("%s: mismatch in rec size\n", __func__);
5932 }
5933
5934 /* step 7: update info_len and data_len */
5935 info_linear->info_len = sizeof(struct bpf_prog_info);
5936 info_linear->data_len = data_len;
5937
5938 return info_linear;
5939}
5940
5941void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
5942{
5943 int i;
5944
5945 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5946 struct bpf_prog_info_array_desc *desc;
5947 __u64 addr, offs;
5948
5949 if ((info_linear->arrays & (1UL << i)) == 0)
5950 continue;
5951
5952 desc = bpf_prog_info_array_desc + i;
5953 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
5954 desc->array_offset);
5955 offs = addr - ptr_to_u64(info_linear->data);
5956 bpf_prog_info_set_offset_u64(&info_linear->info,
5957 desc->array_offset, offs);
5958 }
5959}
5960
5961void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
5962{
5963 int i;
5964
5965 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5966 struct bpf_prog_info_array_desc *desc;
5967 __u64 addr, offs;
5968
5969 if ((info_linear->arrays & (1UL << i)) == 0)
5970 continue;
5971
5972 desc = bpf_prog_info_array_desc + i;
5973 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
5974 desc->array_offset);
5975 addr = offs + ptr_to_u64(info_linear->data);
5976 bpf_prog_info_set_offset_u64(&info_linear->info,
5977 desc->array_offset, addr);
5978 }
5979}
Hechao Li6446b312019-06-10 17:56:50 -07005980
5981int libbpf_num_possible_cpus(void)
5982{
5983 static const char *fcpu = "/sys/devices/system/cpu/possible";
5984 int len = 0, n = 0, il = 0, ir = 0;
5985 unsigned int start = 0, end = 0;
Takshak Chahande56fbc242019-07-31 15:10:55 -07005986 int tmp_cpus = 0;
Hechao Li6446b312019-06-10 17:56:50 -07005987 static int cpus;
5988 char buf[128];
5989 int error = 0;
5990 int fd = -1;
5991
Takshak Chahande56fbc242019-07-31 15:10:55 -07005992 tmp_cpus = READ_ONCE(cpus);
5993 if (tmp_cpus > 0)
5994 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07005995
5996 fd = open(fcpu, O_RDONLY);
5997 if (fd < 0) {
5998 error = errno;
5999 pr_warning("Failed to open file %s: %s\n",
6000 fcpu, strerror(error));
6001 return -error;
6002 }
6003 len = read(fd, buf, sizeof(buf));
6004 close(fd);
6005 if (len <= 0) {
6006 error = len ? errno : EINVAL;
6007 pr_warning("Failed to read # of possible cpus from %s: %s\n",
6008 fcpu, strerror(error));
6009 return -error;
6010 }
6011 if (len == sizeof(buf)) {
6012 pr_warning("File %s size overflow\n", fcpu);
6013 return -EOVERFLOW;
6014 }
6015 buf[len] = '\0';
6016
Takshak Chahande56fbc242019-07-31 15:10:55 -07006017 for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
Hechao Li6446b312019-06-10 17:56:50 -07006018 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
6019 if (buf[ir] == ',' || buf[ir] == '\0') {
6020 buf[ir] = '\0';
6021 n = sscanf(&buf[il], "%u-%u", &start, &end);
6022 if (n <= 0) {
6023 pr_warning("Failed to get # CPUs from %s\n",
6024 &buf[il]);
6025 return -EINVAL;
6026 } else if (n == 1) {
6027 end = start;
6028 }
Takshak Chahande56fbc242019-07-31 15:10:55 -07006029 tmp_cpus += end - start + 1;
Hechao Li6446b312019-06-10 17:56:50 -07006030 il = ir + 1;
6031 }
6032 }
Takshak Chahande56fbc242019-07-31 15:10:55 -07006033 if (tmp_cpus <= 0) {
6034 pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
Hechao Li6446b312019-06-10 17:56:50 -07006035 return -EINVAL;
6036 }
Takshak Chahande56fbc242019-07-31 15:10:55 -07006037
6038 WRITE_ONCE(cpus, tmp_cpus);
6039 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07006040}