blob: 857d3d16968ebc6687c536b873f275f97c6fddb9 [file] [log] [blame]
Eric Leblond6061a3d2018-01-30 21:55:03 +01001// SPDX-License-Identifier: LGPL-2.1
2
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan203d1ca2016-07-04 11:02:42 +000010 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation;
14 * version 2.1 of the License (not later!)
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this program; if not, see <http://www.gnu.org/licenses>
Wang Nan1b76c132015-07-01 02:13:51 +000023 */
24
Jakub Kicinski531b0142018-07-10 14:43:05 -070025#define _GNU_SOURCE
Wang Nan1b76c132015-07-01 02:13:51 +000026#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000027#include <stdio.h>
28#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080029#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000030#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000031#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000032#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000033#include <fcntl.h>
34#include <errno.h>
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -070035#include <perf-sys.h>
Wang Nan1b76c132015-07-01 02:13:51 +000036#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080037#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000038#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000039#include <linux/bpf.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000040#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080041#include <linux/limits.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070045#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000046#include <libelf.h>
47#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000048
49#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000050#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070051#include "btf.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000052
Wang Nan9b161372016-07-18 06:01:08 +000053#ifndef EM_BPF
54#define EM_BPF 247
55#endif
56
Joe Stringerf3675402017-01-26 13:19:56 -080057#ifndef BPF_FS_MAGIC
58#define BPF_FS_MAGIC 0xcafe4a11
59#endif
60
Wang Nanb3f59d62015-07-01 02:13:52 +000061#define __printf(a, b) __attribute__((format(printf, a, b)))
62
63__printf(1, 2)
64static int __base_pr(const char *format, ...)
65{
66 va_list args;
67 int err;
68
69 va_start(args, format);
70 err = vfprintf(stderr, format, args);
71 va_end(args);
72 return err;
73}
74
75static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
76static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
77static __printf(1, 2) libbpf_print_fn_t __pr_debug;
78
79#define __pr(func, fmt, ...) \
80do { \
81 if ((func)) \
82 (func)("libbpf: " fmt, ##__VA_ARGS__); \
83} while (0)
84
85#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
86#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
87#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
88
89void libbpf_set_print(libbpf_print_fn_t warn,
90 libbpf_print_fn_t info,
91 libbpf_print_fn_t debug)
92{
93 __pr_warning = warn;
94 __pr_info = info;
95 __pr_debug = debug;
96}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000097
Wang Nan6371ca3b2015-11-06 13:49:37 +000098#define STRERR_BUFSIZE 128
99
Wang Nan6371ca3b2015-11-06 13:49:37 +0000100#define CHECK_ERR(action, err, out) do { \
101 err = action; \
102 if (err) \
103 goto out; \
104} while(0)
105
106
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000107/* Copied from tools/perf/util/util.h */
108#ifndef zfree
109# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
110#endif
111
112#ifndef zclose
113# define zclose(fd) ({ \
114 int ___err = 0; \
115 if ((fd) >= 0) \
116 ___err = close((fd)); \
117 fd = -1; \
118 ___err; })
119#endif
120
121#ifdef HAVE_LIBELF_MMAP_SUPPORT
122# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
123#else
124# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
125#endif
126
Wang Nana5b8bd42015-07-01 02:14:00 +0000127/*
128 * bpf_prog should be a better name but it has been used in
129 * linux/filter.h.
130 */
131struct bpf_program {
132 /* Index in elf obj file, for relocation use. */
133 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700134 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700135 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000136 char *section_name;
137 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800138 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000139 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000140
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800141 struct reloc_desc {
142 enum {
143 RELO_LD64,
144 RELO_CALL,
145 } type;
Wang Nan34090912015-07-01 02:14:02 +0000146 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800147 union {
148 int map_idx;
149 int text_off;
150 };
Wang Nan34090912015-07-01 02:14:02 +0000151 } *reloc_desc;
152 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000153
Wang Nanb5805632015-11-16 12:10:09 +0000154 struct {
155 int nr;
156 int *fds;
157 } instances;
158 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000159
160 struct bpf_object *obj;
161 void *priv;
162 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700163
164 enum bpf_attach_type expected_attach_type;
Wang Nana5b8bd42015-07-01 02:14:00 +0000165};
166
Wang Nan9d759a92015-11-27 08:47:35 +0000167struct bpf_map {
168 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000169 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000170 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700171 int map_ifindex;
Wang Nan9d759a92015-11-27 08:47:35 +0000172 struct bpf_map_def def;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -0700173 uint32_t btf_key_type_id;
174 uint32_t btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000175 void *priv;
176 bpf_map_clear_priv_t clear_priv;
177};
178
Wang Nan9a208ef2015-07-01 02:14:10 +0000179static LIST_HEAD(bpf_objects_list);
180
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000181struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000182 char license[64];
183 u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000184
Wang Nana5b8bd42015-07-01 02:14:00 +0000185 struct bpf_program *programs;
186 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000187 struct bpf_map *maps;
188 size_t nr_maps;
189
Wang Nan52d33522015-07-01 02:14:04 +0000190 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700191 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000192
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000193 /*
194 * Information when doing elf related work. Only valid if fd
195 * is valid.
196 */
197 struct {
198 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000199 void *obj_buf;
200 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000201 Elf *elf;
202 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000203 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000204 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000205 struct {
206 GElf_Shdr shdr;
207 Elf_Data *data;
208 } *reloc;
209 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000210 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800211 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000212 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000213 /*
214 * All loaded bpf_object is linked in a list, which is
215 * hidden to caller. bpf_objects__<func> handlers deal with
216 * all objects.
217 */
218 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000219
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700220 struct btf *btf;
221
Wang Nan10931d22016-11-26 07:03:26 +0000222 void *priv;
223 bpf_object_clear_priv_t clear_priv;
224
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000225 char path[];
226};
227#define obj_elf_valid(o) ((o)->efile.elf)
228
Wang Nan55cffde2015-07-01 02:14:07 +0000229static void bpf_program__unload(struct bpf_program *prog)
230{
Wang Nanb5805632015-11-16 12:10:09 +0000231 int i;
232
Wang Nan55cffde2015-07-01 02:14:07 +0000233 if (!prog)
234 return;
235
Wang Nanb5805632015-11-16 12:10:09 +0000236 /*
237 * If the object is opened but the program was never loaded,
238 * it is possible that prog->instances.nr == -1.
239 */
240 if (prog->instances.nr > 0) {
241 for (i = 0; i < prog->instances.nr; i++)
242 zclose(prog->instances.fds[i]);
243 } else if (prog->instances.nr != -1) {
244 pr_warning("Internal error: instances.nr is %d\n",
245 prog->instances.nr);
246 }
247
248 prog->instances.nr = -1;
249 zfree(&prog->instances.fds);
Wang Nan55cffde2015-07-01 02:14:07 +0000250}
251
Wang Nana5b8bd42015-07-01 02:14:00 +0000252static void bpf_program__exit(struct bpf_program *prog)
253{
254 if (!prog)
255 return;
256
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000257 if (prog->clear_priv)
258 prog->clear_priv(prog, prog->priv);
259
260 prog->priv = NULL;
261 prog->clear_priv = NULL;
262
Wang Nan55cffde2015-07-01 02:14:07 +0000263 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700264 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000265 zfree(&prog->section_name);
266 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000267 zfree(&prog->reloc_desc);
268
269 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000270 prog->insns_cnt = 0;
271 prog->idx = -1;
272}
273
274static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700275bpf_program__init(void *data, size_t size, char *section_name, int idx,
276 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000277{
278 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700279 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000280 return -EINVAL;
281 }
282
283 bzero(prog, sizeof(*prog));
284
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700285 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000286 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100287 pr_warning("failed to alloc name for prog under section(%d) %s\n",
288 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000289 goto errout;
290 }
291
292 prog->insns = malloc(size);
293 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700294 pr_warning("failed to alloc insns for prog under section %s\n",
295 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000296 goto errout;
297 }
298 prog->insns_cnt = size / sizeof(struct bpf_insn);
299 memcpy(prog->insns, data,
300 prog->insns_cnt * sizeof(struct bpf_insn));
301 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000302 prog->instances.fds = NULL;
303 prog->instances.nr = -1;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000304 prog->type = BPF_PROG_TYPE_KPROBE;
Wang Nana5b8bd42015-07-01 02:14:00 +0000305
306 return 0;
307errout:
308 bpf_program__exit(prog);
309 return -ENOMEM;
310}
311
312static int
313bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700314 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000315{
316 struct bpf_program prog, *progs;
317 int nr_progs, err;
318
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700319 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000320 if (err)
321 return err;
322
323 progs = obj->programs;
324 nr_progs = obj->nr_programs;
325
Jakub Kicinski531b0142018-07-10 14:43:05 -0700326 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000327 if (!progs) {
328 /*
329 * In this case the original obj->programs
330 * is still valid, so don't need special treat for
331 * bpf_close_object().
332 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700333 pr_warning("failed to alloc a new program under section '%s'\n",
334 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000335 bpf_program__exit(&prog);
336 return -ENOMEM;
337 }
338
339 pr_debug("found program %s\n", prog.section_name);
340 obj->programs = progs;
341 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000342 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000343 progs[nr_progs] = prog;
344 return 0;
345}
346
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700347static int
348bpf_object__init_prog_names(struct bpf_object *obj)
349{
350 Elf_Data *symbols = obj->efile.symbols;
351 struct bpf_program *prog;
352 size_t pi, si;
353
354 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800355 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700356
357 prog = &obj->programs[pi];
358
359 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
360 si++) {
361 GElf_Sym sym;
362
363 if (!gelf_getsym(symbols, si, &sym))
364 continue;
365 if (sym.st_shndx != prog->idx)
366 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000367 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
368 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700369
370 name = elf_strptr(obj->efile.elf,
371 obj->efile.strtabidx,
372 sym.st_name);
373 if (!name) {
374 pr_warning("failed to get sym name string for prog %s\n",
375 prog->section_name);
376 return -LIBBPF_ERRNO__LIBELF;
377 }
378 }
379
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700380 if (!name && prog->idx == obj->efile.text_shndx)
381 name = ".text";
382
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700383 if (!name) {
384 pr_warning("failed to find sym for prog %s\n",
385 prog->section_name);
386 return -EINVAL;
387 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700388
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700389 prog->name = strdup(name);
390 if (!prog->name) {
391 pr_warning("failed to allocate memory for prog sym %s\n",
392 name);
393 return -ENOMEM;
394 }
395 }
396
397 return 0;
398}
399
Wang Nan6c956392015-07-01 02:13:54 +0000400static struct bpf_object *bpf_object__new(const char *path,
401 void *obj_buf,
402 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000403{
404 struct bpf_object *obj;
405
406 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
407 if (!obj) {
408 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000409 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000410 }
411
412 strcpy(obj->path, path);
413 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000414
415 /*
416 * Caller of this function should also calls
417 * bpf_object__elf_finish() after data collection to return
418 * obj_buf to user. If not, we should duplicate the buffer to
419 * avoid user freeing them before elf finish.
420 */
421 obj->efile.obj_buf = obj_buf;
422 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000423 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000424
Wang Nan52d33522015-07-01 02:14:04 +0000425 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000426
427 INIT_LIST_HEAD(&obj->list);
428 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000429 return obj;
430}
431
432static void bpf_object__elf_finish(struct bpf_object *obj)
433{
434 if (!obj_elf_valid(obj))
435 return;
436
437 if (obj->efile.elf) {
438 elf_end(obj->efile.elf);
439 obj->efile.elf = NULL;
440 }
Wang Nanbec7d682015-07-01 02:13:59 +0000441 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000442
443 zfree(&obj->efile.reloc);
444 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000445 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000446 obj->efile.obj_buf = NULL;
447 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000448}
449
450static int bpf_object__elf_init(struct bpf_object *obj)
451{
452 int err = 0;
453 GElf_Ehdr *ep;
454
455 if (obj_elf_valid(obj)) {
456 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000457 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000458 }
459
Wang Nan6c956392015-07-01 02:13:54 +0000460 if (obj->efile.obj_buf_sz > 0) {
461 /*
462 * obj_buf should have been validated by
463 * bpf_object__open_buffer().
464 */
465 obj->efile.elf = elf_memory(obj->efile.obj_buf,
466 obj->efile.obj_buf_sz);
467 } else {
468 obj->efile.fd = open(obj->path, O_RDONLY);
469 if (obj->efile.fd < 0) {
470 pr_warning("failed to open %s: %s\n", obj->path,
471 strerror(errno));
472 return -errno;
473 }
474
475 obj->efile.elf = elf_begin(obj->efile.fd,
476 LIBBPF_ELF_C_READ_MMAP,
477 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000478 }
479
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000480 if (!obj->efile.elf) {
481 pr_warning("failed to open %s as ELF file\n",
482 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000483 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000484 goto errout;
485 }
486
487 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
488 pr_warning("failed to get EHDR from %s\n",
489 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000490 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000491 goto errout;
492 }
493 ep = &obj->efile.ehdr;
494
Wang Nan9b161372016-07-18 06:01:08 +0000495 /* Old LLVM set e_machine to EM_NONE */
496 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000497 pr_warning("%s is not an eBPF object file\n",
498 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000499 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000500 goto errout;
501 }
502
503 return 0;
504errout:
505 bpf_object__elf_finish(obj);
506 return err;
507}
508
Wang Nancc4228d2015-07-01 02:13:55 +0000509static int
510bpf_object__check_endianness(struct bpf_object *obj)
511{
512 static unsigned int const endian = 1;
513
514 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
515 case ELFDATA2LSB:
516 /* We are big endian, BPF obj is little endian. */
517 if (*(unsigned char const *)&endian != 1)
518 goto mismatch;
519 break;
520
521 case ELFDATA2MSB:
522 /* We are little endian, BPF obj is big endian. */
523 if (*(unsigned char const *)&endian != 0)
524 goto mismatch;
525 break;
526 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000527 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000528 }
529
530 return 0;
531
532mismatch:
533 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000534 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000535}
536
Wang Nancb1e5e92015-07-01 02:13:57 +0000537static int
538bpf_object__init_license(struct bpf_object *obj,
539 void *data, size_t size)
540{
541 memcpy(obj->license, data,
542 min(size, sizeof(obj->license) - 1));
543 pr_debug("license of %s is %s\n", obj->path, obj->license);
544 return 0;
545}
546
547static int
548bpf_object__init_kversion(struct bpf_object *obj,
549 void *data, size_t size)
550{
551 u32 kver;
552
553 if (size != sizeof(kver)) {
554 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000555 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000556 }
557 memcpy(&kver, data, sizeof(kver));
558 obj->kern_version = kver;
559 pr_debug("kernel version of %s is %x\n", obj->path,
560 obj->kern_version);
561 return 0;
562}
563
Eric Leblond4708bbd2016-11-15 04:05:47 +0000564static int compare_bpf_map(const void *_a, const void *_b)
565{
566 const struct bpf_map *a = _a;
567 const struct bpf_map *b = _b;
568
569 return a->offset - b->offset;
570}
571
572static int
573bpf_object__init_maps(struct bpf_object *obj)
574{
Craig Gallekb13c5c12017-10-05 10:41:57 -0400575 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000576 Elf_Scn *scn;
577 Elf_Data *data;
578 Elf_Data *symbols = obj->efile.symbols;
579
580 if (obj->efile.maps_shndx < 0)
581 return -EINVAL;
582 if (!symbols)
583 return -EINVAL;
584
585 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
586 if (scn)
587 data = elf_getdata(scn, NULL);
588 if (!scn || !data) {
589 pr_warning("failed to get Elf_Data from map section %d\n",
590 obj->efile.maps_shndx);
591 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000592 }
593
Eric Leblond4708bbd2016-11-15 04:05:47 +0000594 /*
595 * Count number of maps. Each map has a name.
596 * Array of maps is not supported: only the first element is
597 * considered.
598 *
599 * TODO: Detect array of map and report error.
600 */
601 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
602 GElf_Sym sym;
603
604 if (!gelf_getsym(symbols, i, &sym))
605 continue;
606 if (sym.st_shndx != obj->efile.maps_shndx)
607 continue;
608 nr_maps++;
609 }
610
611 /* Alloc obj->maps and fill nr_maps. */
612 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
613 nr_maps, data->d_size);
614
615 if (!nr_maps)
616 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000617
Craig Gallekb13c5c12017-10-05 10:41:57 -0400618 /* Assume equally sized map definitions */
619 map_def_sz = data->d_size / nr_maps;
620 if (!data->d_size || (data->d_size % nr_maps) != 0) {
621 pr_warning("unable to determine map definition size "
622 "section %s, %d maps in %zd bytes\n",
623 obj->path, nr_maps, data->d_size);
624 return -EINVAL;
625 }
626
Wang Nan9d759a92015-11-27 08:47:35 +0000627 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
628 if (!obj->maps) {
629 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000630 return -ENOMEM;
631 }
Wang Nan9d759a92015-11-27 08:47:35 +0000632 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000633
Eric Leblond4708bbd2016-11-15 04:05:47 +0000634 /*
635 * fill all fd with -1 so won't close incorrect
636 * fd (fd=0 is stdin) when failure (zclose won't close
637 * negative fd)).
638 */
639 for (i = 0; i < nr_maps; i++)
Wang Nan9d759a92015-11-27 08:47:35 +0000640 obj->maps[i].fd = -1;
641
Eric Leblond4708bbd2016-11-15 04:05:47 +0000642 /*
643 * Fill obj->maps using data in "maps" section.
644 */
645 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000646 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000647 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000648 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000649
650 if (!gelf_getsym(symbols, i, &sym))
651 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000652 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000653 continue;
654
655 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000656 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000657 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000658 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400659 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000660 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
661 obj->path, map_name);
662 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000663 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000664
Wang Nan561bbcc2015-11-27 08:47:36 +0000665 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000666 if (!obj->maps[map_idx].name) {
667 pr_warning("failed to alloc map name\n");
668 return -ENOMEM;
669 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000670 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000671 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000672 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400673 /*
674 * If the definition of the map in the object file fits in
675 * bpf_map_def, copy it. Any extra fields in our version
676 * of bpf_map_def will default to zero as a result of the
677 * calloc above.
678 */
679 if (map_def_sz <= sizeof(struct bpf_map_def)) {
680 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
681 } else {
682 /*
683 * Here the map structure being read is bigger than what
684 * we expect, truncate if the excess bits are all zero.
685 * If they are not zero, reject this map as
686 * incompatible.
687 */
688 char *b;
689 for (b = ((char *)def) + sizeof(struct bpf_map_def);
690 b < ((char *)def) + map_def_sz; b++) {
691 if (*b != 0) {
692 pr_warning("maps section in %s: \"%s\" "
693 "has unrecognized, non-zero "
694 "options\n",
695 obj->path, map_name);
696 return -EINVAL;
697 }
698 }
699 memcpy(&obj->maps[map_idx].def, def,
700 sizeof(struct bpf_map_def));
701 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000702 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000703 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000704
705 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400706 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000707}
708
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100709static bool section_have_execinstr(struct bpf_object *obj, int idx)
710{
711 Elf_Scn *scn;
712 GElf_Shdr sh;
713
714 scn = elf_getscn(obj->efile.elf, idx);
715 if (!scn)
716 return false;
717
718 if (gelf_getshdr(scn, &sh) != &sh)
719 return false;
720
721 if (sh.sh_flags & SHF_EXECINSTR)
722 return true;
723
724 return false;
725}
726
Wang Nan29603662015-07-01 02:13:56 +0000727static int bpf_object__elf_collect(struct bpf_object *obj)
728{
729 Elf *elf = obj->efile.elf;
730 GElf_Ehdr *ep = &obj->efile.ehdr;
731 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000732 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000733
734 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
735 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
736 pr_warning("failed to get e_shstrndx from %s\n",
737 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000738 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000739 }
740
741 while ((scn = elf_nextscn(elf, scn)) != NULL) {
742 char *name;
743 GElf_Shdr sh;
744 Elf_Data *data;
745
746 idx++;
747 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100748 pr_warning("failed to get section(%d) header from %s\n",
749 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000750 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000751 goto out;
752 }
753
754 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
755 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100756 pr_warning("failed to get section(%d) name from %s\n",
757 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000758 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000759 goto out;
760 }
761
762 data = elf_getdata(scn, 0);
763 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100764 pr_warning("failed to get section(%d) data from %s(%s)\n",
765 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000766 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000767 goto out;
768 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100769 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
770 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000771 (int)sh.sh_link, (unsigned long)sh.sh_flags,
772 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000773
774 if (strcmp(name, "license") == 0)
775 err = bpf_object__init_license(obj,
776 data->d_buf,
777 data->d_size);
778 else if (strcmp(name, "version") == 0)
779 err = bpf_object__init_kversion(obj,
780 data->d_buf,
781 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000782 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000783 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700784 else if (strcmp(name, BTF_ELF_SEC) == 0) {
785 obj->btf = btf__new(data->d_buf, data->d_size,
786 __pr_debug);
787 if (IS_ERR(obj->btf)) {
788 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
789 BTF_ELF_SEC, PTR_ERR(obj->btf));
790 obj->btf = NULL;
791 }
792 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000793 if (obj->efile.symbols) {
794 pr_warning("bpf: multiple SYMTAB in %s\n",
795 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000796 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000797 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000798 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000799 obj->efile.strtabidx = sh.sh_link;
800 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000801 } else if ((sh.sh_type == SHT_PROGBITS) &&
802 (sh.sh_flags & SHF_EXECINSTR) &&
803 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800804 if (strcmp(name, ".text") == 0)
805 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000806 err = bpf_object__add_program(obj, data->d_buf,
807 data->d_size, name, idx);
808 if (err) {
Wang Nan6371ca3b2015-11-06 13:49:37 +0000809 char errmsg[STRERR_BUFSIZE];
810
Wang Nana5b8bd42015-07-01 02:14:00 +0000811 strerror_r(-err, errmsg, sizeof(errmsg));
812 pr_warning("failed to alloc program %s (%s): %s",
813 name, obj->path, errmsg);
814 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000815 } else if (sh.sh_type == SHT_REL) {
816 void *reloc = obj->efile.reloc;
817 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100818 int sec = sh.sh_info; /* points to other section */
819
820 /* Only do relo for section with exec instructions */
821 if (!section_have_execinstr(obj, sec)) {
822 pr_debug("skip relo %s(%d) for section(%d)\n",
823 name, idx, sec);
824 continue;
825 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000826
Jakub Kicinski531b0142018-07-10 14:43:05 -0700827 reloc = reallocarray(reloc, nr_reloc,
828 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +0000829 if (!reloc) {
830 pr_warning("realloc failed\n");
831 err = -ENOMEM;
832 } else {
833 int n = nr_reloc - 1;
834
835 obj->efile.reloc = reloc;
836 obj->efile.nr_reloc = nr_reloc;
837
838 obj->efile.reloc[n].shdr = sh;
839 obj->efile.reloc[n].data = data;
840 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100841 } else {
842 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000843 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000844 if (err)
845 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000846 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000847
Wang Nan77ba9a52015-12-08 02:25:30 +0000848 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
849 pr_warning("Corrupted ELF file: index of strtab invalid\n");
850 return LIBBPF_ERRNO__FORMAT;
851 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700852 if (obj->efile.maps_shndx >= 0) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000853 err = bpf_object__init_maps(obj);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700854 if (err)
855 goto out;
856 }
857 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000858out:
859 return err;
860}
861
Wang Nan34090912015-07-01 02:14:02 +0000862static struct bpf_program *
863bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
864{
865 struct bpf_program *prog;
866 size_t i;
867
868 for (i = 0; i < obj->nr_programs; i++) {
869 prog = &obj->programs[i];
870 if (prog->idx == idx)
871 return prog;
872 }
873 return NULL;
874}
875
Jakub Kicinski6d4b1982018-07-26 14:32:19 -0700876struct bpf_program *
877bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
878{
879 struct bpf_program *pos;
880
881 bpf_object__for_each_program(pos, obj) {
882 if (pos->section_name && !strcmp(pos->section_name, title))
883 return pos;
884 }
885 return NULL;
886}
887
Wang Nan34090912015-07-01 02:14:02 +0000888static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800889bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
890 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000891{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800892 Elf_Data *symbols = obj->efile.symbols;
893 int text_shndx = obj->efile.text_shndx;
894 int maps_shndx = obj->efile.maps_shndx;
895 struct bpf_map *maps = obj->maps;
896 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000897 int i, nrels;
898
899 pr_debug("collecting relocating info for: '%s'\n",
900 prog->section_name);
901 nrels = shdr->sh_size / shdr->sh_entsize;
902
903 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
904 if (!prog->reloc_desc) {
905 pr_warning("failed to alloc memory in relocation\n");
906 return -ENOMEM;
907 }
908 prog->nr_reloc = nrels;
909
910 for (i = 0; i < nrels; i++) {
911 GElf_Sym sym;
912 GElf_Rel rel;
913 unsigned int insn_idx;
914 struct bpf_insn *insns = prog->insns;
915 size_t map_idx;
916
917 if (!gelf_getrel(data, i, &rel)) {
918 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000919 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000920 }
921
Wang Nan34090912015-07-01 02:14:02 +0000922 if (!gelf_getsym(symbols,
923 GELF_R_SYM(rel.r_info),
924 &sym)) {
925 pr_warning("relocation: symbol %"PRIx64" not found\n",
926 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000927 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000928 }
David Miller7d9890e2017-12-19 15:53:11 -0500929 pr_debug("relo for %lld value %lld name %d\n",
930 (long long) (rel.r_info >> 32),
931 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +0000932
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800933 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +0000934 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
935 prog->section_name, sym.st_shndx);
936 return -LIBBPF_ERRNO__RELOC;
937 }
938
939 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
940 pr_debug("relocation: insn_idx=%u\n", insn_idx);
941
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800942 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
943 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
944 pr_warning("incorrect bpf_call opcode\n");
945 return -LIBBPF_ERRNO__RELOC;
946 }
947 prog->reloc_desc[i].type = RELO_CALL;
948 prog->reloc_desc[i].insn_idx = insn_idx;
949 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700950 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800951 continue;
952 }
953
Wang Nan34090912015-07-01 02:14:02 +0000954 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
955 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
956 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000957 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000958 }
959
Joe Stringer94e5ade2017-01-22 17:11:22 -0800960 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
961 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
962 if (maps[map_idx].offset == sym.st_value) {
963 pr_debug("relocation: find map %zd (%s) for insn %u\n",
964 map_idx, maps[map_idx].name, insn_idx);
965 break;
966 }
967 }
968
Wang Nan34090912015-07-01 02:14:02 +0000969 if (map_idx >= nr_maps) {
970 pr_warning("bpf relocation: map_idx %d large than %d\n",
971 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000972 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000973 }
974
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800975 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +0000976 prog->reloc_desc[i].insn_idx = insn_idx;
977 prog->reloc_desc[i].map_idx = map_idx;
978 }
979 return 0;
980}
981
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700982static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
983{
984 struct bpf_map_def *def = &map->def;
985 const size_t max_name = 256;
986 int64_t key_size, value_size;
987 int32_t key_id, value_id;
988 char name[max_name];
989
990 /* Find key type by name from BTF */
991 if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
992 pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
993 map->name, map->name);
994 return -EINVAL;
995 }
996
997 key_id = btf__find_by_name(btf, name);
998 if (key_id < 0) {
999 pr_debug("map:%s key_type:%s cannot be found in BTF\n",
1000 map->name, name);
1001 return key_id;
1002 }
1003
1004 key_size = btf__resolve_size(btf, key_id);
1005 if (key_size < 0) {
1006 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
1007 map->name, name);
1008 return key_size;
1009 }
1010
1011 if (def->key_size != key_size) {
Sirio Balmellia1c81812018-05-23 18:17:07 +02001012 pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
1013 map->name, name, (unsigned int)key_size, def->key_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001014 return -EINVAL;
1015 }
1016
1017 /* Find value type from BTF */
1018 if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
1019 pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
1020 map->name, map->name);
1021 return -EINVAL;
1022 }
1023
1024 value_id = btf__find_by_name(btf, name);
1025 if (value_id < 0) {
1026 pr_debug("map:%s value_type:%s cannot be found in BTF\n",
1027 map->name, name);
1028 return value_id;
1029 }
1030
1031 value_size = btf__resolve_size(btf, value_id);
1032 if (value_size < 0) {
1033 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
1034 map->name, name);
1035 return value_size;
1036 }
1037
1038 if (def->value_size != value_size) {
Sirio Balmellia1c81812018-05-23 18:17:07 +02001039 pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
1040 map->name, name, (unsigned int)value_size, def->value_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001041 return -EINVAL;
1042 }
1043
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001044 map->btf_key_type_id = key_id;
1045 map->btf_value_type_id = value_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001046
1047 return 0;
1048}
1049
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001050int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1051{
1052 struct bpf_map_info info = {};
1053 __u32 len = sizeof(info);
1054 int new_fd, err;
1055 char *new_name;
1056
1057 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1058 if (err)
1059 return err;
1060
1061 new_name = strdup(info.name);
1062 if (!new_name)
1063 return -errno;
1064
1065 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1066 if (new_fd < 0)
1067 goto err_free_new_name;
1068
1069 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1070 if (new_fd < 0)
1071 goto err_close_new_fd;
1072
1073 err = zclose(map->fd);
1074 if (err)
1075 goto err_close_new_fd;
1076 free(map->name);
1077
1078 map->fd = new_fd;
1079 map->name = new_name;
1080 map->def.type = info.type;
1081 map->def.key_size = info.key_size;
1082 map->def.value_size = info.value_size;
1083 map->def.max_entries = info.max_entries;
1084 map->def.map_flags = info.map_flags;
1085 map->btf_key_type_id = info.btf_key_type_id;
1086 map->btf_value_type_id = info.btf_value_type_id;
1087
1088 return 0;
1089
1090err_close_new_fd:
1091 close(new_fd);
1092err_free_new_name:
1093 free(new_name);
1094 return -errno;
1095}
1096
Wang Nan52d33522015-07-01 02:14:04 +00001097static int
1098bpf_object__create_maps(struct bpf_object *obj)
1099{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001100 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001101 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001102 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001103
Wang Nan9d759a92015-11-27 08:47:35 +00001104 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001105 struct bpf_map *map = &obj->maps[i];
1106 struct bpf_map_def *def = &map->def;
1107 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001108
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001109 if (map->fd >= 0) {
1110 pr_debug("skip map create (preset) %s: fd=%d\n",
1111 map->name, map->fd);
1112 continue;
1113 }
1114
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001115 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001116 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001117 create_attr.map_type = def->type;
1118 create_attr.map_flags = def->map_flags;
1119 create_attr.key_size = def->key_size;
1120 create_attr.value_size = def->value_size;
1121 create_attr.max_entries = def->max_entries;
1122 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001123 create_attr.btf_key_type_id = 0;
1124 create_attr.btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001125
1126 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1127 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001128 create_attr.btf_key_type_id = map->btf_key_type_id;
1129 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001130 }
1131
1132 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001133 if (*pfd < 0 && create_attr.btf_key_type_id) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001134 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1135 map->name, strerror(errno), errno);
1136 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001137 create_attr.btf_key_type_id = 0;
1138 create_attr.btf_value_type_id = 0;
1139 map->btf_key_type_id = 0;
1140 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001141 *pfd = bpf_create_map_xattr(&create_attr);
1142 }
1143
Wang Nan52d33522015-07-01 02:14:04 +00001144 if (*pfd < 0) {
1145 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001146
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001147 err = *pfd;
Eric Leblond49bf4b32017-08-20 21:48:14 +02001148 pr_warning("failed to create map (name: '%s'): %s\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001149 map->name,
Wang Nan52d33522015-07-01 02:14:04 +00001150 strerror(errno));
1151 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001152 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001153 return err;
1154 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001155 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001156 }
1157
Wang Nan52d33522015-07-01 02:14:04 +00001158 return 0;
1159}
1160
Wang Nan8a47a6c2015-07-01 02:14:05 +00001161static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001162bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1163 struct reloc_desc *relo)
1164{
1165 struct bpf_insn *insn, *new_insn;
1166 struct bpf_program *text;
1167 size_t new_cnt;
1168
1169 if (relo->type != RELO_CALL)
1170 return -LIBBPF_ERRNO__RELOC;
1171
1172 if (prog->idx == obj->efile.text_shndx) {
1173 pr_warning("relo in .text insn %d into off %d\n",
1174 relo->insn_idx, relo->text_off);
1175 return -LIBBPF_ERRNO__RELOC;
1176 }
1177
1178 if (prog->main_prog_cnt == 0) {
1179 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1180 if (!text) {
1181 pr_warning("no .text section found yet relo into text exist\n");
1182 return -LIBBPF_ERRNO__RELOC;
1183 }
1184 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001185 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001186 if (!new_insn) {
1187 pr_warning("oom in prog realloc\n");
1188 return -ENOMEM;
1189 }
1190 memcpy(new_insn + prog->insns_cnt, text->insns,
1191 text->insns_cnt * sizeof(*insn));
1192 prog->insns = new_insn;
1193 prog->main_prog_cnt = prog->insns_cnt;
1194 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001195 pr_debug("added %zd insn from %s to prog %s\n",
1196 text->insns_cnt, text->section_name,
1197 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001198 }
1199 insn = &prog->insns[relo->insn_idx];
1200 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001201 return 0;
1202}
1203
1204static int
Wang Nan9d759a92015-11-27 08:47:35 +00001205bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001206{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001207 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001208
1209 if (!prog || !prog->reloc_desc)
1210 return 0;
1211
1212 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001213 if (prog->reloc_desc[i].type == RELO_LD64) {
1214 struct bpf_insn *insns = prog->insns;
1215 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001216
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001217 insn_idx = prog->reloc_desc[i].insn_idx;
1218 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001219
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001220 if (insn_idx >= (int)prog->insns_cnt) {
1221 pr_warning("relocation out of range: '%s'\n",
1222 prog->section_name);
1223 return -LIBBPF_ERRNO__RELOC;
1224 }
1225 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1226 insns[insn_idx].imm = obj->maps[map_idx].fd;
1227 } else {
1228 err = bpf_program__reloc_text(prog, obj,
1229 &prog->reloc_desc[i]);
1230 if (err)
1231 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001232 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001233 }
1234
1235 zfree(&prog->reloc_desc);
1236 prog->nr_reloc = 0;
1237 return 0;
1238}
1239
1240
1241static int
1242bpf_object__relocate(struct bpf_object *obj)
1243{
1244 struct bpf_program *prog;
1245 size_t i;
1246 int err;
1247
1248 for (i = 0; i < obj->nr_programs; i++) {
1249 prog = &obj->programs[i];
1250
Wang Nan9d759a92015-11-27 08:47:35 +00001251 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001252 if (err) {
1253 pr_warning("failed to relocate '%s'\n",
1254 prog->section_name);
1255 return err;
1256 }
1257 }
1258 return 0;
1259}
1260
Wang Nan34090912015-07-01 02:14:02 +00001261static int bpf_object__collect_reloc(struct bpf_object *obj)
1262{
1263 int i, err;
1264
1265 if (!obj_elf_valid(obj)) {
1266 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001267 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001268 }
1269
1270 for (i = 0; i < obj->efile.nr_reloc; i++) {
1271 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1272 Elf_Data *data = obj->efile.reloc[i].data;
1273 int idx = shdr->sh_info;
1274 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001275
1276 if (shdr->sh_type != SHT_REL) {
1277 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001278 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001279 }
1280
1281 prog = bpf_object__find_prog_by_idx(obj, idx);
1282 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001283 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001284 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001285 }
1286
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001287 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001288 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001289 obj);
Wang Nan34090912015-07-01 02:14:02 +00001290 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001291 return err;
Wang Nan34090912015-07-01 02:14:02 +00001292 }
1293 return 0;
1294}
1295
Wang Nan55cffde2015-07-01 02:14:07 +00001296static int
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001297load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1298 const char *name, struct bpf_insn *insns, int insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001299 char *license, u32 kern_version, int *pfd, int prog_ifindex)
Wang Nan55cffde2015-07-01 02:14:07 +00001300{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001301 struct bpf_load_program_attr load_attr;
Wang Nan55cffde2015-07-01 02:14:07 +00001302 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001303 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001304
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001305 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1306 load_attr.prog_type = type;
1307 load_attr.expected_attach_type = expected_attach_type;
1308 load_attr.name = name;
1309 load_attr.insns = insns;
1310 load_attr.insns_cnt = insns_cnt;
1311 load_attr.license = license;
1312 load_attr.kern_version = kern_version;
David Beckettf0307a72018-05-16 14:02:49 -07001313 load_attr.prog_ifindex = prog_ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001314
1315 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001316 return -EINVAL;
1317
1318 log_buf = malloc(BPF_LOG_BUF_SIZE);
1319 if (!log_buf)
1320 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1321
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001322 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001323
1324 if (ret >= 0) {
1325 *pfd = ret;
1326 ret = 0;
1327 goto out;
1328 }
1329
Wang Nan6371ca3b2015-11-06 13:49:37 +00001330 ret = -LIBBPF_ERRNO__LOAD;
Wang Nan55cffde2015-07-01 02:14:07 +00001331 pr_warning("load bpf program failed: %s\n", strerror(errno));
1332
Wang Nan6371ca3b2015-11-06 13:49:37 +00001333 if (log_buf && log_buf[0] != '\0') {
1334 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001335 pr_warning("-- BEGIN DUMP LOG ---\n");
1336 pr_warning("\n%s\n", log_buf);
1337 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001338 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1339 pr_warning("Program too large (%zu insns), at most %d insns\n",
1340 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001341 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001342 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001343 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001344 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001345 int fd;
1346
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001347 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1348 load_attr.expected_attach_type = 0;
1349 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001350 if (fd >= 0) {
1351 close(fd);
1352 ret = -LIBBPF_ERRNO__PROGTYPE;
1353 goto out;
1354 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001355 }
Wang Nan705fa212016-07-13 10:44:02 +00001356
1357 if (log_buf)
1358 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001359 }
1360
1361out:
1362 free(log_buf);
1363 return ret;
1364}
1365
1366static int
1367bpf_program__load(struct bpf_program *prog,
1368 char *license, u32 kern_version)
1369{
Wang Nanb5805632015-11-16 12:10:09 +00001370 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001371
Wang Nanb5805632015-11-16 12:10:09 +00001372 if (prog->instances.nr < 0 || !prog->instances.fds) {
1373 if (prog->preprocessor) {
1374 pr_warning("Internal error: can't load program '%s'\n",
1375 prog->section_name);
1376 return -LIBBPF_ERRNO__INTERNAL;
1377 }
Wang Nan55cffde2015-07-01 02:14:07 +00001378
Wang Nanb5805632015-11-16 12:10:09 +00001379 prog->instances.fds = malloc(sizeof(int));
1380 if (!prog->instances.fds) {
1381 pr_warning("Not enough memory for BPF fds\n");
1382 return -ENOMEM;
1383 }
1384 prog->instances.nr = 1;
1385 prog->instances.fds[0] = -1;
1386 }
1387
1388 if (!prog->preprocessor) {
1389 if (prog->instances.nr != 1) {
1390 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1391 prog->section_name, prog->instances.nr);
1392 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001393 err = load_program(prog->type, prog->expected_attach_type,
1394 prog->name, prog->insns, prog->insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001395 license, kern_version, &fd,
1396 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001397 if (!err)
1398 prog->instances.fds[0] = fd;
1399 goto out;
1400 }
1401
1402 for (i = 0; i < prog->instances.nr; i++) {
1403 struct bpf_prog_prep_result result;
1404 bpf_program_prep_t preprocessor = prog->preprocessor;
1405
1406 bzero(&result, sizeof(result));
1407 err = preprocessor(prog, i, prog->insns,
1408 prog->insns_cnt, &result);
1409 if (err) {
1410 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1411 i, prog->section_name);
1412 goto out;
1413 }
1414
1415 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1416 pr_debug("Skip loading the %dth instance of program '%s'\n",
1417 i, prog->section_name);
1418 prog->instances.fds[i] = -1;
1419 if (result.pfd)
1420 *result.pfd = -1;
1421 continue;
1422 }
1423
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001424 err = load_program(prog->type, prog->expected_attach_type,
1425 prog->name, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001426 result.new_insn_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001427 license, kern_version, &fd,
1428 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001429
1430 if (err) {
1431 pr_warning("Loading the %dth instance of program '%s' failed\n",
1432 i, prog->section_name);
1433 goto out;
1434 }
1435
1436 if (result.pfd)
1437 *result.pfd = fd;
1438 prog->instances.fds[i] = fd;
1439 }
1440out:
Wang Nan55cffde2015-07-01 02:14:07 +00001441 if (err)
1442 pr_warning("failed to load program '%s'\n",
1443 prog->section_name);
1444 zfree(&prog->insns);
1445 prog->insns_cnt = 0;
1446 return err;
1447}
1448
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001449static bool bpf_program__is_function_storage(struct bpf_program *prog,
1450 struct bpf_object *obj)
1451{
1452 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1453}
1454
Wang Nan55cffde2015-07-01 02:14:07 +00001455static int
1456bpf_object__load_progs(struct bpf_object *obj)
1457{
1458 size_t i;
1459 int err;
1460
1461 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001462 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001463 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001464 err = bpf_program__load(&obj->programs[i],
1465 obj->license,
1466 obj->kern_version);
1467 if (err)
1468 return err;
1469 }
1470 return 0;
1471}
1472
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001473static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001474{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001475 switch (type) {
1476 case BPF_PROG_TYPE_SOCKET_FILTER:
1477 case BPF_PROG_TYPE_SCHED_CLS:
1478 case BPF_PROG_TYPE_SCHED_ACT:
1479 case BPF_PROG_TYPE_XDP:
1480 case BPF_PROG_TYPE_CGROUP_SKB:
1481 case BPF_PROG_TYPE_CGROUP_SOCK:
1482 case BPF_PROG_TYPE_LWT_IN:
1483 case BPF_PROG_TYPE_LWT_OUT:
1484 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001485 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001486 case BPF_PROG_TYPE_SOCK_OPS:
1487 case BPF_PROG_TYPE_SK_SKB:
1488 case BPF_PROG_TYPE_CGROUP_DEVICE:
1489 case BPF_PROG_TYPE_SK_MSG:
1490 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001491 case BPF_PROG_TYPE_LIRC_MODE2:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001492 return false;
1493 case BPF_PROG_TYPE_UNSPEC:
1494 case BPF_PROG_TYPE_KPROBE:
1495 case BPF_PROG_TYPE_TRACEPOINT:
1496 case BPF_PROG_TYPE_PERF_EVENT:
1497 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1498 default:
1499 return true;
1500 }
1501}
1502
1503static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1504{
1505 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001506 pr_warning("%s doesn't provide kernel version\n",
1507 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001508 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001509 }
1510 return 0;
1511}
1512
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001513static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001514__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1515 bool needs_kver)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001516{
1517 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001518 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001519
1520 if (elf_version(EV_CURRENT) == EV_NONE) {
1521 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001522 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001523 }
1524
Wang Nan6c956392015-07-01 02:13:54 +00001525 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001526 if (IS_ERR(obj))
1527 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001528
Wang Nan6371ca3b2015-11-06 13:49:37 +00001529 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1530 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1531 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1532 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001533 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001534
1535 bpf_object__elf_finish(obj);
1536 return obj;
1537out:
1538 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001539 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001540}
1541
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001542struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001543{
1544 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001545 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001546 return NULL;
1547
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001548 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001549
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001550 return __bpf_object__open(attr->file, NULL, 0,
1551 bpf_prog_type__needs_kver(attr->prog_type));
1552}
1553
1554struct bpf_object *bpf_object__open(const char *path)
1555{
1556 struct bpf_object_open_attr attr = {
1557 .file = path,
1558 .prog_type = BPF_PROG_TYPE_UNSPEC,
1559 };
1560
1561 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00001562}
1563
1564struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001565 size_t obj_buf_sz,
1566 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001567{
Wang Nanacf860a2015-08-27 02:30:55 +00001568 char tmp_name[64];
1569
Wang Nan6c956392015-07-01 02:13:54 +00001570 /* param validation */
1571 if (!obj_buf || obj_buf_sz <= 0)
1572 return NULL;
1573
Wang Nanacf860a2015-08-27 02:30:55 +00001574 if (!name) {
1575 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1576 (unsigned long)obj_buf,
1577 (unsigned long)obj_buf_sz);
1578 tmp_name[sizeof(tmp_name) - 1] = '\0';
1579 name = tmp_name;
1580 }
1581 pr_debug("loading object '%s' from buffer\n",
1582 name);
Wang Nan6c956392015-07-01 02:13:54 +00001583
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001584 return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001585}
1586
Wang Nan52d33522015-07-01 02:14:04 +00001587int bpf_object__unload(struct bpf_object *obj)
1588{
1589 size_t i;
1590
1591 if (!obj)
1592 return -EINVAL;
1593
Wang Nan9d759a92015-11-27 08:47:35 +00001594 for (i = 0; i < obj->nr_maps; i++)
1595 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001596
Wang Nan55cffde2015-07-01 02:14:07 +00001597 for (i = 0; i < obj->nr_programs; i++)
1598 bpf_program__unload(&obj->programs[i]);
1599
Wang Nan52d33522015-07-01 02:14:04 +00001600 return 0;
1601}
1602
1603int bpf_object__load(struct bpf_object *obj)
1604{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001605 int err;
1606
Wang Nan52d33522015-07-01 02:14:04 +00001607 if (!obj)
1608 return -EINVAL;
1609
1610 if (obj->loaded) {
1611 pr_warning("object should not be loaded twice\n");
1612 return -EINVAL;
1613 }
1614
1615 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001616
1617 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1618 CHECK_ERR(bpf_object__relocate(obj), err, out);
1619 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001620
1621 return 0;
1622out:
1623 bpf_object__unload(obj);
1624 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001625 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001626}
1627
Joe Stringerf3675402017-01-26 13:19:56 -08001628static int check_path(const char *path)
1629{
1630 struct statfs st_fs;
1631 char *dname, *dir;
1632 int err = 0;
1633
1634 if (path == NULL)
1635 return -EINVAL;
1636
1637 dname = strdup(path);
1638 if (dname == NULL)
1639 return -ENOMEM;
1640
1641 dir = dirname(dname);
1642 if (statfs(dir, &st_fs)) {
1643 pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
1644 err = -errno;
1645 }
1646 free(dname);
1647
1648 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1649 pr_warning("specified path %s is not on BPF FS\n", path);
1650 err = -EINVAL;
1651 }
1652
1653 return err;
1654}
1655
1656int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1657 int instance)
1658{
1659 int err;
1660
1661 err = check_path(path);
1662 if (err)
1663 return err;
1664
1665 if (prog == NULL) {
1666 pr_warning("invalid program pointer\n");
1667 return -EINVAL;
1668 }
1669
1670 if (instance < 0 || instance >= prog->instances.nr) {
1671 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1672 instance, prog->section_name, prog->instances.nr);
1673 return -EINVAL;
1674 }
1675
1676 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1677 pr_warning("failed to pin program: %s\n", strerror(errno));
1678 return -errno;
1679 }
1680 pr_debug("pinned program '%s'\n", path);
1681
1682 return 0;
1683}
1684
1685static int make_dir(const char *path)
1686{
1687 int err = 0;
1688
1689 if (mkdir(path, 0700) && errno != EEXIST)
1690 err = -errno;
1691
1692 if (err)
1693 pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
1694 return err;
1695}
1696
1697int bpf_program__pin(struct bpf_program *prog, const char *path)
1698{
1699 int i, err;
1700
1701 err = check_path(path);
1702 if (err)
1703 return err;
1704
1705 if (prog == NULL) {
1706 pr_warning("invalid program pointer\n");
1707 return -EINVAL;
1708 }
1709
1710 if (prog->instances.nr <= 0) {
1711 pr_warning("no instances of prog %s to pin\n",
1712 prog->section_name);
1713 return -EINVAL;
1714 }
1715
1716 err = make_dir(path);
1717 if (err)
1718 return err;
1719
1720 for (i = 0; i < prog->instances.nr; i++) {
1721 char buf[PATH_MAX];
1722 int len;
1723
1724 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1725 if (len < 0)
1726 return -EINVAL;
1727 else if (len >= PATH_MAX)
1728 return -ENAMETOOLONG;
1729
1730 err = bpf_program__pin_instance(prog, buf, i);
1731 if (err)
1732 return err;
1733 }
1734
1735 return 0;
1736}
1737
Joe Stringerb6989f32017-01-26 13:19:57 -08001738int bpf_map__pin(struct bpf_map *map, const char *path)
1739{
1740 int err;
1741
1742 err = check_path(path);
1743 if (err)
1744 return err;
1745
1746 if (map == NULL) {
1747 pr_warning("invalid map pointer\n");
1748 return -EINVAL;
1749 }
1750
1751 if (bpf_obj_pin(map->fd, path)) {
1752 pr_warning("failed to pin map: %s\n", strerror(errno));
1753 return -errno;
1754 }
1755
1756 pr_debug("pinned map '%s'\n", path);
1757 return 0;
1758}
1759
Joe Stringerd5148d82017-01-26 13:19:58 -08001760int bpf_object__pin(struct bpf_object *obj, const char *path)
1761{
1762 struct bpf_program *prog;
1763 struct bpf_map *map;
1764 int err;
1765
1766 if (!obj)
1767 return -ENOENT;
1768
1769 if (!obj->loaded) {
1770 pr_warning("object not yet loaded; load it first\n");
1771 return -ENOENT;
1772 }
1773
1774 err = make_dir(path);
1775 if (err)
1776 return err;
1777
1778 bpf_map__for_each(map, obj) {
1779 char buf[PATH_MAX];
1780 int len;
1781
1782 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1783 bpf_map__name(map));
1784 if (len < 0)
1785 return -EINVAL;
1786 else if (len >= PATH_MAX)
1787 return -ENAMETOOLONG;
1788
1789 err = bpf_map__pin(map, buf);
1790 if (err)
1791 return err;
1792 }
1793
1794 bpf_object__for_each_program(prog, obj) {
1795 char buf[PATH_MAX];
1796 int len;
1797
1798 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1799 prog->section_name);
1800 if (len < 0)
1801 return -EINVAL;
1802 else if (len >= PATH_MAX)
1803 return -ENAMETOOLONG;
1804
1805 err = bpf_program__pin(prog, buf);
1806 if (err)
1807 return err;
1808 }
1809
1810 return 0;
1811}
1812
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001813void bpf_object__close(struct bpf_object *obj)
1814{
Wang Nana5b8bd42015-07-01 02:14:00 +00001815 size_t i;
1816
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001817 if (!obj)
1818 return;
1819
Wang Nan10931d22016-11-26 07:03:26 +00001820 if (obj->clear_priv)
1821 obj->clear_priv(obj, obj->priv);
1822
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001823 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00001824 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001825 btf__free(obj->btf);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001826
Wang Nan9d759a92015-11-27 08:47:35 +00001827 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00001828 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00001829 if (obj->maps[i].clear_priv)
1830 obj->maps[i].clear_priv(&obj->maps[i],
1831 obj->maps[i].priv);
1832 obj->maps[i].priv = NULL;
1833 obj->maps[i].clear_priv = NULL;
1834 }
1835 zfree(&obj->maps);
1836 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00001837
1838 if (obj->programs && obj->nr_programs) {
1839 for (i = 0; i < obj->nr_programs; i++)
1840 bpf_program__exit(&obj->programs[i]);
1841 }
1842 zfree(&obj->programs);
1843
Wang Nan9a208ef2015-07-01 02:14:10 +00001844 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001845 free(obj);
1846}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001847
Wang Nan9a208ef2015-07-01 02:14:10 +00001848struct bpf_object *
1849bpf_object__next(struct bpf_object *prev)
1850{
1851 struct bpf_object *next;
1852
1853 if (!prev)
1854 next = list_first_entry(&bpf_objects_list,
1855 struct bpf_object,
1856 list);
1857 else
1858 next = list_next_entry(prev, list);
1859
1860 /* Empty list is noticed here so don't need checking on entry. */
1861 if (&next->list == &bpf_objects_list)
1862 return NULL;
1863
1864 return next;
1865}
1866
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001867const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00001868{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001869 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00001870}
1871
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001872unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00001873{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001874 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00001875}
1876
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001877int bpf_object__btf_fd(const struct bpf_object *obj)
1878{
1879 return obj->btf ? btf__fd(obj->btf) : -1;
1880}
1881
Wang Nan10931d22016-11-26 07:03:26 +00001882int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1883 bpf_object_clear_priv_t clear_priv)
1884{
1885 if (obj->priv && obj->clear_priv)
1886 obj->clear_priv(obj, obj->priv);
1887
1888 obj->priv = priv;
1889 obj->clear_priv = clear_priv;
1890 return 0;
1891}
1892
1893void *bpf_object__priv(struct bpf_object *obj)
1894{
1895 return obj ? obj->priv : ERR_PTR(-EINVAL);
1896}
1897
Jakub Kicinskieac7d842018-06-28 14:41:39 -07001898static struct bpf_program *
1899__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001900{
1901 size_t idx;
1902
1903 if (!obj->programs)
1904 return NULL;
1905 /* First handler */
1906 if (prev == NULL)
1907 return &obj->programs[0];
1908
1909 if (prev->obj != obj) {
1910 pr_warning("error: program handler doesn't match object\n");
1911 return NULL;
1912 }
1913
1914 idx = (prev - obj->programs) + 1;
1915 if (idx >= obj->nr_programs)
1916 return NULL;
1917 return &obj->programs[idx];
1918}
1919
Jakub Kicinskieac7d842018-06-28 14:41:39 -07001920struct bpf_program *
1921bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1922{
1923 struct bpf_program *prog = prev;
1924
1925 do {
1926 prog = __bpf_program__next(prog, obj);
1927 } while (prog && bpf_program__is_function_storage(prog, obj));
1928
1929 return prog;
1930}
1931
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03001932int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1933 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001934{
1935 if (prog->priv && prog->clear_priv)
1936 prog->clear_priv(prog, prog->priv);
1937
1938 prog->priv = priv;
1939 prog->clear_priv = clear_priv;
1940 return 0;
1941}
1942
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001943void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001944{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001945 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001946}
1947
Jakub Kicinski9aba3612018-06-28 14:41:37 -07001948void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
1949{
1950 prog->prog_ifindex = ifindex;
1951}
1952
Namhyung Kim715f8db2015-11-03 20:21:05 +09001953const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001954{
1955 const char *title;
1956
1957 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09001958 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001959 title = strdup(title);
1960 if (!title) {
1961 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001962 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001963 }
1964 }
1965
1966 return title;
1967}
1968
1969int bpf_program__fd(struct bpf_program *prog)
1970{
Wang Nanb5805632015-11-16 12:10:09 +00001971 return bpf_program__nth_fd(prog, 0);
1972}
1973
1974int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1975 bpf_program_prep_t prep)
1976{
1977 int *instances_fds;
1978
1979 if (nr_instances <= 0 || !prep)
1980 return -EINVAL;
1981
1982 if (prog->instances.nr > 0 || prog->instances.fds) {
1983 pr_warning("Can't set pre-processor after loading\n");
1984 return -EINVAL;
1985 }
1986
1987 instances_fds = malloc(sizeof(int) * nr_instances);
1988 if (!instances_fds) {
1989 pr_warning("alloc memory failed for fds\n");
1990 return -ENOMEM;
1991 }
1992
1993 /* fill all fd with -1 */
1994 memset(instances_fds, -1, sizeof(int) * nr_instances);
1995
1996 prog->instances.nr = nr_instances;
1997 prog->instances.fds = instances_fds;
1998 prog->preprocessor = prep;
1999 return 0;
2000}
2001
2002int bpf_program__nth_fd(struct bpf_program *prog, int n)
2003{
2004 int fd;
2005
Jakub Kicinski1e960042018-07-26 14:32:18 -07002006 if (!prog)
2007 return -EINVAL;
2008
Wang Nanb5805632015-11-16 12:10:09 +00002009 if (n >= prog->instances.nr || n < 0) {
2010 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2011 n, prog->section_name, prog->instances.nr);
2012 return -EINVAL;
2013 }
2014
2015 fd = prog->instances.fds[n];
2016 if (fd < 0) {
2017 pr_warning("%dth instance of program '%s' is invalid\n",
2018 n, prog->section_name);
2019 return -ENOENT;
2020 }
2021
2022 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002023}
Wang Nan9d759a92015-11-27 08:47:35 +00002024
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002025void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002026{
2027 prog->type = type;
2028}
2029
Wang Nan5f44e4c82016-07-13 10:44:01 +00002030static bool bpf_program__is_type(struct bpf_program *prog,
2031 enum bpf_prog_type type)
2032{
2033 return prog ? (prog->type == type) : false;
2034}
2035
Joe Stringered794072017-01-22 17:11:23 -08002036#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2037int bpf_program__set_##NAME(struct bpf_program *prog) \
2038{ \
2039 if (!prog) \
2040 return -EINVAL; \
2041 bpf_program__set_type(prog, TYPE); \
2042 return 0; \
2043} \
2044 \
2045bool bpf_program__is_##NAME(struct bpf_program *prog) \
2046{ \
2047 return bpf_program__is_type(prog, TYPE); \
2048} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002049
Joe Stringer7803ba72017-01-22 17:11:24 -08002050BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002051BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002052BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2053BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002054BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002055BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002056BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2057BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002058
John Fastabend16962b22018-04-23 14:30:38 -07002059void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2060 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002061{
2062 prog->expected_attach_type = type;
2063}
2064
2065#define BPF_PROG_SEC_FULL(string, ptype, atype) \
2066 { string, sizeof(string) - 1, ptype, atype }
2067
2068#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
2069
Andrey Ignatov81efee72018-04-17 10:28:45 -07002070#define BPF_S_PROG_SEC(string, ptype) \
2071 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
2072
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002073#define BPF_SA_PROG_SEC(string, ptype) \
2074 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
2075
Roman Gushchin583c9002017-12-13 15:18:51 +00002076static const struct {
2077 const char *sec;
2078 size_t len;
2079 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002080 enum bpf_attach_type expected_attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002081} section_names[] = {
2082 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2083 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2084 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
Quentin Monnet0badd332018-02-07 20:27:13 -08002085 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2086 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002087 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002088 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002089 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2090 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2091 BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2092 BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
2093 BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
Quentin Monnet0badd332018-02-07 20:27:13 -08002094 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2095 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2096 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
Jakub Kicinskid9b683d2018-06-28 14:41:36 -07002097 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Roman Gushchin583c9002017-12-13 15:18:51 +00002098 BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
2099 BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
John Fastabend4c4c3c22018-03-18 12:57:41 -07002100 BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
Jakub Kicinskid9b683d2018-06-28 14:41:36 -07002101 BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002102 BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
2103 BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
Andrey Ignatov622adaf2018-03-30 15:08:06 -07002104 BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
2105 BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
Andrey Ignatov72481f32018-05-25 08:55:25 -07002106 BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
2107 BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
Andrey Ignatov81efee72018-04-17 10:28:45 -07002108 BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
2109 BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
Roman Gushchin583c9002017-12-13 15:18:51 +00002110};
Roman Gushchin583c9002017-12-13 15:18:51 +00002111
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002112#undef BPF_PROG_SEC
2113#undef BPF_PROG_SEC_FULL
Andrey Ignatov81efee72018-04-17 10:28:45 -07002114#undef BPF_S_PROG_SEC
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002115#undef BPF_SA_PROG_SEC
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002116
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002117int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2118 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00002119{
2120 int i;
2121
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002122 if (!name)
2123 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00002124
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002125 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2126 if (strncmp(name, section_names[i].sec, section_names[i].len))
2127 continue;
2128 *prog_type = section_names[i].prog_type;
2129 *expected_attach_type = section_names[i].expected_attach_type;
2130 return 0;
2131 }
2132 return -EINVAL;
2133}
Roman Gushchin583c9002017-12-13 15:18:51 +00002134
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002135static int
2136bpf_program__identify_section(struct bpf_program *prog,
2137 enum bpf_prog_type *prog_type,
2138 enum bpf_attach_type *expected_attach_type)
2139{
2140 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2141 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00002142}
2143
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002144int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002145{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002146 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002147}
2148
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002149const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002150{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002151 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002152}
2153
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002154const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002155{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002156 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002157}
2158
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002159uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002160{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002161 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002162}
2163
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002164uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002165{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002166 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002167}
2168
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002169int bpf_map__set_priv(struct bpf_map *map, void *priv,
2170 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002171{
2172 if (!map)
2173 return -EINVAL;
2174
2175 if (map->priv) {
2176 if (map->clear_priv)
2177 map->clear_priv(map, map->priv);
2178 }
2179
2180 map->priv = priv;
2181 map->clear_priv = clear_priv;
2182 return 0;
2183}
2184
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002185void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002186{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002187 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002188}
2189
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002190bool bpf_map__is_offload_neutral(struct bpf_map *map)
2191{
2192 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2193}
2194
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002195void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2196{
2197 map->map_ifindex = ifindex;
2198}
2199
Wang Nan9d759a92015-11-27 08:47:35 +00002200struct bpf_map *
2201bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2202{
2203 size_t idx;
2204 struct bpf_map *s, *e;
2205
2206 if (!obj || !obj->maps)
2207 return NULL;
2208
2209 s = obj->maps;
2210 e = obj->maps + obj->nr_maps;
2211
2212 if (prev == NULL)
2213 return s;
2214
2215 if ((prev < s) || (prev >= e)) {
2216 pr_warning("error in %s: map handler doesn't belong to object\n",
2217 __func__);
2218 return NULL;
2219 }
2220
2221 idx = (prev - obj->maps) + 1;
2222 if (idx >= obj->nr_maps)
2223 return NULL;
2224 return &obj->maps[idx];
2225}
Wang Nan561bbcc2015-11-27 08:47:36 +00002226
2227struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002228bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002229{
2230 struct bpf_map *pos;
2231
2232 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002233 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002234 return pos;
2235 }
2236 return NULL;
2237}
Wang Nan5a6acad2016-11-26 07:03:27 +00002238
2239struct bpf_map *
2240bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2241{
2242 int i;
2243
2244 for (i = 0; i < obj->nr_maps; i++) {
2245 if (obj->maps[i].offset == offset)
2246 return &obj->maps[i];
2247 }
2248 return ERR_PTR(-ENOENT);
2249}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002250
2251long libbpf_get_error(const void *ptr)
2252{
2253 if (IS_ERR(ptr))
2254 return PTR_ERR(ptr);
2255 return 0;
2256}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002257
2258int bpf_prog_load(const char *file, enum bpf_prog_type type,
2259 struct bpf_object **pobj, int *prog_fd)
2260{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002261 struct bpf_prog_load_attr attr;
2262
2263 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2264 attr.file = file;
2265 attr.prog_type = type;
2266 attr.expected_attach_type = 0;
2267
2268 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2269}
2270
2271int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2272 struct bpf_object **pobj, int *prog_fd)
2273{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002274 struct bpf_object_open_attr open_attr = {
2275 .file = attr->file,
2276 .prog_type = attr->prog_type,
2277 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002278 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002279 enum bpf_attach_type expected_attach_type;
2280 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002281 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002282 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002283 int err;
2284
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002285 if (!attr)
2286 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002287 if (!attr->file)
2288 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002289
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002290 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07002291 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002292 return -ENOENT;
2293
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002294 bpf_object__for_each_program(prog, obj) {
2295 /*
2296 * If type is not specified, try to guess it based on
2297 * section name.
2298 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002299 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002300 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002301 expected_attach_type = attr->expected_attach_type;
2302 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002303 err = bpf_program__identify_section(prog, &prog_type,
2304 &expected_attach_type);
2305 if (err < 0) {
2306 pr_warning("failed to guess program type based on section name %s\n",
2307 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002308 bpf_object__close(obj);
2309 return -EINVAL;
2310 }
2311 }
2312
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002313 bpf_program__set_type(prog, prog_type);
2314 bpf_program__set_expected_attach_type(prog,
2315 expected_attach_type);
2316
Jakub Kicinski9a94f272018-06-28 14:41:38 -07002317 if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002318 first_prog = prog;
2319 }
2320
David Beckettf0307a72018-05-16 14:02:49 -07002321 bpf_map__for_each(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002322 if (!bpf_map__is_offload_neutral(map))
2323 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07002324 }
2325
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002326 if (!first_prog) {
2327 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002328 bpf_object__close(obj);
2329 return -ENOENT;
2330 }
2331
John Fastabend6f6d33f2017-08-15 22:34:22 -07002332 err = bpf_object__load(obj);
2333 if (err) {
2334 bpf_object__close(obj);
2335 return -EINVAL;
2336 }
2337
2338 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002339 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002340 return 0;
2341}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002342
2343enum bpf_perf_event_ret
2344bpf_perf_event_read_simple(void *mem, unsigned long size,
2345 unsigned long page_size, void **buf, size_t *buf_len,
2346 bpf_perf_event_print_t fn, void *priv)
2347{
2348 volatile struct perf_event_mmap_page *header = mem;
2349 __u64 data_tail = header->data_tail;
2350 __u64 data_head = header->data_head;
2351 void *base, *begin, *end;
2352 int ret;
2353
2354 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2355 if (data_head == data_tail)
2356 return LIBBPF_PERF_EVENT_CONT;
2357
2358 base = ((char *)header) + page_size;
2359
2360 begin = base + data_tail % size;
2361 end = base + data_head % size;
2362
2363 while (begin != end) {
2364 struct perf_event_header *ehdr;
2365
2366 ehdr = begin;
2367 if (begin + ehdr->size > base + size) {
2368 long len = base + size - begin;
2369
2370 if (*buf_len < ehdr->size) {
2371 free(*buf);
2372 *buf = malloc(ehdr->size);
2373 if (!*buf) {
2374 ret = LIBBPF_PERF_EVENT_ERROR;
2375 break;
2376 }
2377 *buf_len = ehdr->size;
2378 }
2379
2380 memcpy(*buf, begin, len);
2381 memcpy(*buf + len, base, ehdr->size - len);
2382 ehdr = (void *)*buf;
2383 begin = base + ehdr->size - len;
2384 } else if (begin + ehdr->size == base + size) {
2385 begin = base;
2386 } else {
2387 begin += ehdr->size;
2388 }
2389
2390 ret = fn(ehdr, priv);
2391 if (ret != LIBBPF_PERF_EVENT_CONT)
2392 break;
2393
2394 data_tail += ehdr->size;
2395 }
2396
2397 __sync_synchronize(); /* smp_mb() */
2398 header->data_tail = data_tail;
2399
2400 return ret;
2401}