blob: 176cf55237281cbd49b62f875347af58efab3de4 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000010 */
11
Jakub Kicinski531b0142018-07-10 14:43:05 -070012#define _GNU_SOURCE
Wang Nan1b76c132015-07-01 02:13:51 +000013#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000014#include <stdio.h>
15#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080016#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000017#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000018#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000019#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000020#include <fcntl.h>
21#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080023#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000024#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000025#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070026#include <linux/btf.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000027#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080028#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070029#include <linux/perf_event.h>
Joe Stringerf3675402017-01-26 13:19:56 -080030#include <sys/stat.h>
31#include <sys/types.h>
32#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070033#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000034#include <libelf.h>
35#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000036
37#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000038#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070039#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030040#include "str_error.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000041
Wang Nan9b161372016-07-18 06:01:08 +000042#ifndef EM_BPF
43#define EM_BPF 247
44#endif
45
Joe Stringerf3675402017-01-26 13:19:56 -080046#ifndef BPF_FS_MAGIC
47#define BPF_FS_MAGIC 0xcafe4a11
48#endif
49
Wang Nanb3f59d62015-07-01 02:13:52 +000050#define __printf(a, b) __attribute__((format(printf, a, b)))
51
52__printf(1, 2)
53static int __base_pr(const char *format, ...)
54{
55 va_list args;
56 int err;
57
58 va_start(args, format);
59 err = vfprintf(stderr, format, args);
60 va_end(args);
61 return err;
62}
63
64static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
65static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
66static __printf(1, 2) libbpf_print_fn_t __pr_debug;
67
68#define __pr(func, fmt, ...) \
69do { \
70 if ((func)) \
71 (func)("libbpf: " fmt, ##__VA_ARGS__); \
72} while (0)
73
74#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
75#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
76#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
77
78void libbpf_set_print(libbpf_print_fn_t warn,
79 libbpf_print_fn_t info,
80 libbpf_print_fn_t debug)
81{
82 __pr_warning = warn;
83 __pr_info = info;
84 __pr_debug = debug;
85}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000086
Wang Nan6371ca3b2015-11-06 13:49:37 +000087#define STRERR_BUFSIZE 128
88
Wang Nan6371ca3b2015-11-06 13:49:37 +000089#define CHECK_ERR(action, err, out) do { \
90 err = action; \
91 if (err) \
92 goto out; \
93} while(0)
94
95
Wang Nan1a5e3fb2015-07-01 02:13:53 +000096/* Copied from tools/perf/util/util.h */
97#ifndef zfree
98# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
99#endif
100
101#ifndef zclose
102# define zclose(fd) ({ \
103 int ___err = 0; \
104 if ((fd) >= 0) \
105 ___err = close((fd)); \
106 fd = -1; \
107 ___err; })
108#endif
109
110#ifdef HAVE_LIBELF_MMAP_SUPPORT
111# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
112#else
113# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
114#endif
115
Wang Nana5b8bd42015-07-01 02:14:00 +0000116/*
117 * bpf_prog should be a better name but it has been used in
118 * linux/filter.h.
119 */
120struct bpf_program {
121 /* Index in elf obj file, for relocation use. */
122 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700123 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700124 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000125 char *section_name;
126 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800127 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000128 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000129
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800130 struct reloc_desc {
131 enum {
132 RELO_LD64,
133 RELO_CALL,
134 } type;
Wang Nan34090912015-07-01 02:14:02 +0000135 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800136 union {
137 int map_idx;
138 int text_off;
139 };
Wang Nan34090912015-07-01 02:14:02 +0000140 } *reloc_desc;
141 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000142
Wang Nanb5805632015-11-16 12:10:09 +0000143 struct {
144 int nr;
145 int *fds;
146 } instances;
147 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000148
149 struct bpf_object *obj;
150 void *priv;
151 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700152
153 enum bpf_attach_type expected_attach_type;
Wang Nana5b8bd42015-07-01 02:14:00 +0000154};
155
Wang Nan9d759a92015-11-27 08:47:35 +0000156struct bpf_map {
157 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000158 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000159 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700160 int map_ifindex;
Wang Nan9d759a92015-11-27 08:47:35 +0000161 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700162 __u32 btf_key_type_id;
163 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000164 void *priv;
165 bpf_map_clear_priv_t clear_priv;
166};
167
Wang Nan9a208ef2015-07-01 02:14:10 +0000168static LIST_HEAD(bpf_objects_list);
169
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000170struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000171 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700172 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000173
Wang Nana5b8bd42015-07-01 02:14:00 +0000174 struct bpf_program *programs;
175 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000176 struct bpf_map *maps;
177 size_t nr_maps;
178
Wang Nan52d33522015-07-01 02:14:04 +0000179 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700180 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000181
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000182 /*
183 * Information when doing elf related work. Only valid if fd
184 * is valid.
185 */
186 struct {
187 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000188 void *obj_buf;
189 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000190 Elf *elf;
191 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000192 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000193 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000194 struct {
195 GElf_Shdr shdr;
196 Elf_Data *data;
197 } *reloc;
198 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000199 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800200 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000201 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000202 /*
203 * All loaded bpf_object is linked in a list, which is
204 * hidden to caller. bpf_objects__<func> handlers deal with
205 * all objects.
206 */
207 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000208
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700209 struct btf *btf;
210
Wang Nan10931d22016-11-26 07:03:26 +0000211 void *priv;
212 bpf_object_clear_priv_t clear_priv;
213
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000214 char path[];
215};
216#define obj_elf_valid(o) ((o)->efile.elf)
217
Joe Stringer29cd77f2018-10-02 13:35:39 -0700218void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000219{
Wang Nanb5805632015-11-16 12:10:09 +0000220 int i;
221
Wang Nan55cffde2015-07-01 02:14:07 +0000222 if (!prog)
223 return;
224
Wang Nanb5805632015-11-16 12:10:09 +0000225 /*
226 * If the object is opened but the program was never loaded,
227 * it is possible that prog->instances.nr == -1.
228 */
229 if (prog->instances.nr > 0) {
230 for (i = 0; i < prog->instances.nr; i++)
231 zclose(prog->instances.fds[i]);
232 } else if (prog->instances.nr != -1) {
233 pr_warning("Internal error: instances.nr is %d\n",
234 prog->instances.nr);
235 }
236
237 prog->instances.nr = -1;
238 zfree(&prog->instances.fds);
Wang Nan55cffde2015-07-01 02:14:07 +0000239}
240
Wang Nana5b8bd42015-07-01 02:14:00 +0000241static void bpf_program__exit(struct bpf_program *prog)
242{
243 if (!prog)
244 return;
245
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000246 if (prog->clear_priv)
247 prog->clear_priv(prog, prog->priv);
248
249 prog->priv = NULL;
250 prog->clear_priv = NULL;
251
Wang Nan55cffde2015-07-01 02:14:07 +0000252 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700253 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000254 zfree(&prog->section_name);
255 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000256 zfree(&prog->reloc_desc);
257
258 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000259 prog->insns_cnt = 0;
260 prog->idx = -1;
261}
262
263static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700264bpf_program__init(void *data, size_t size, char *section_name, int idx,
265 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000266{
267 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700268 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000269 return -EINVAL;
270 }
271
272 bzero(prog, sizeof(*prog));
273
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700274 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000275 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100276 pr_warning("failed to alloc name for prog under section(%d) %s\n",
277 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000278 goto errout;
279 }
280
281 prog->insns = malloc(size);
282 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700283 pr_warning("failed to alloc insns for prog under section %s\n",
284 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000285 goto errout;
286 }
287 prog->insns_cnt = size / sizeof(struct bpf_insn);
288 memcpy(prog->insns, data,
289 prog->insns_cnt * sizeof(struct bpf_insn));
290 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000291 prog->instances.fds = NULL;
292 prog->instances.nr = -1;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000293 prog->type = BPF_PROG_TYPE_KPROBE;
Wang Nana5b8bd42015-07-01 02:14:00 +0000294
295 return 0;
296errout:
297 bpf_program__exit(prog);
298 return -ENOMEM;
299}
300
301static int
302bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700303 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000304{
305 struct bpf_program prog, *progs;
306 int nr_progs, err;
307
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700308 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000309 if (err)
310 return err;
311
312 progs = obj->programs;
313 nr_progs = obj->nr_programs;
314
Jakub Kicinski531b0142018-07-10 14:43:05 -0700315 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000316 if (!progs) {
317 /*
318 * In this case the original obj->programs
319 * is still valid, so don't need special treat for
320 * bpf_close_object().
321 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700322 pr_warning("failed to alloc a new program under section '%s'\n",
323 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000324 bpf_program__exit(&prog);
325 return -ENOMEM;
326 }
327
328 pr_debug("found program %s\n", prog.section_name);
329 obj->programs = progs;
330 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000331 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000332 progs[nr_progs] = prog;
333 return 0;
334}
335
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700336static int
337bpf_object__init_prog_names(struct bpf_object *obj)
338{
339 Elf_Data *symbols = obj->efile.symbols;
340 struct bpf_program *prog;
341 size_t pi, si;
342
343 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800344 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700345
346 prog = &obj->programs[pi];
347
348 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
349 si++) {
350 GElf_Sym sym;
351
352 if (!gelf_getsym(symbols, si, &sym))
353 continue;
354 if (sym.st_shndx != prog->idx)
355 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000356 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
357 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700358
359 name = elf_strptr(obj->efile.elf,
360 obj->efile.strtabidx,
361 sym.st_name);
362 if (!name) {
363 pr_warning("failed to get sym name string for prog %s\n",
364 prog->section_name);
365 return -LIBBPF_ERRNO__LIBELF;
366 }
367 }
368
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700369 if (!name && prog->idx == obj->efile.text_shndx)
370 name = ".text";
371
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700372 if (!name) {
373 pr_warning("failed to find sym for prog %s\n",
374 prog->section_name);
375 return -EINVAL;
376 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700377
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700378 prog->name = strdup(name);
379 if (!prog->name) {
380 pr_warning("failed to allocate memory for prog sym %s\n",
381 name);
382 return -ENOMEM;
383 }
384 }
385
386 return 0;
387}
388
Wang Nan6c956392015-07-01 02:13:54 +0000389static struct bpf_object *bpf_object__new(const char *path,
390 void *obj_buf,
391 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000392{
393 struct bpf_object *obj;
394
395 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
396 if (!obj) {
397 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000398 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000399 }
400
401 strcpy(obj->path, path);
402 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000403
404 /*
405 * Caller of this function should also calls
406 * bpf_object__elf_finish() after data collection to return
407 * obj_buf to user. If not, we should duplicate the buffer to
408 * avoid user freeing them before elf finish.
409 */
410 obj->efile.obj_buf = obj_buf;
411 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000412 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000413
Wang Nan52d33522015-07-01 02:14:04 +0000414 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000415
416 INIT_LIST_HEAD(&obj->list);
417 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000418 return obj;
419}
420
421static void bpf_object__elf_finish(struct bpf_object *obj)
422{
423 if (!obj_elf_valid(obj))
424 return;
425
426 if (obj->efile.elf) {
427 elf_end(obj->efile.elf);
428 obj->efile.elf = NULL;
429 }
Wang Nanbec7d682015-07-01 02:13:59 +0000430 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000431
432 zfree(&obj->efile.reloc);
433 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000434 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000435 obj->efile.obj_buf = NULL;
436 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000437}
438
439static int bpf_object__elf_init(struct bpf_object *obj)
440{
441 int err = 0;
442 GElf_Ehdr *ep;
443
444 if (obj_elf_valid(obj)) {
445 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000446 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000447 }
448
Wang Nan6c956392015-07-01 02:13:54 +0000449 if (obj->efile.obj_buf_sz > 0) {
450 /*
451 * obj_buf should have been validated by
452 * bpf_object__open_buffer().
453 */
454 obj->efile.elf = elf_memory(obj->efile.obj_buf,
455 obj->efile.obj_buf_sz);
456 } else {
457 obj->efile.fd = open(obj->path, O_RDONLY);
458 if (obj->efile.fd < 0) {
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200459 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700460 char *cp = libbpf_strerror_r(errno, errmsg,
461 sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200462
463 pr_warning("failed to open %s: %s\n", obj->path, cp);
Wang Nan6c956392015-07-01 02:13:54 +0000464 return -errno;
465 }
466
467 obj->efile.elf = elf_begin(obj->efile.fd,
468 LIBBPF_ELF_C_READ_MMAP,
469 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000470 }
471
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000472 if (!obj->efile.elf) {
473 pr_warning("failed to open %s as ELF file\n",
474 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000475 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000476 goto errout;
477 }
478
479 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
480 pr_warning("failed to get EHDR from %s\n",
481 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000482 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000483 goto errout;
484 }
485 ep = &obj->efile.ehdr;
486
Wang Nan9b161372016-07-18 06:01:08 +0000487 /* Old LLVM set e_machine to EM_NONE */
488 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000489 pr_warning("%s is not an eBPF object file\n",
490 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000491 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000492 goto errout;
493 }
494
495 return 0;
496errout:
497 bpf_object__elf_finish(obj);
498 return err;
499}
500
Wang Nancc4228d2015-07-01 02:13:55 +0000501static int
502bpf_object__check_endianness(struct bpf_object *obj)
503{
504 static unsigned int const endian = 1;
505
506 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
507 case ELFDATA2LSB:
508 /* We are big endian, BPF obj is little endian. */
509 if (*(unsigned char const *)&endian != 1)
510 goto mismatch;
511 break;
512
513 case ELFDATA2MSB:
514 /* We are little endian, BPF obj is big endian. */
515 if (*(unsigned char const *)&endian != 0)
516 goto mismatch;
517 break;
518 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000519 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000520 }
521
522 return 0;
523
524mismatch:
525 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000526 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000527}
528
Wang Nancb1e5e92015-07-01 02:13:57 +0000529static int
530bpf_object__init_license(struct bpf_object *obj,
531 void *data, size_t size)
532{
533 memcpy(obj->license, data,
534 min(size, sizeof(obj->license) - 1));
535 pr_debug("license of %s is %s\n", obj->path, obj->license);
536 return 0;
537}
538
539static int
540bpf_object__init_kversion(struct bpf_object *obj,
541 void *data, size_t size)
542{
Yonghong Song438363c2018-10-09 16:14:47 -0700543 __u32 kver;
Wang Nancb1e5e92015-07-01 02:13:57 +0000544
545 if (size != sizeof(kver)) {
546 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000547 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000548 }
549 memcpy(&kver, data, sizeof(kver));
550 obj->kern_version = kver;
551 pr_debug("kernel version of %s is %x\n", obj->path,
552 obj->kern_version);
553 return 0;
554}
555
Eric Leblond4708bbd2016-11-15 04:05:47 +0000556static int compare_bpf_map(const void *_a, const void *_b)
557{
558 const struct bpf_map *a = _a;
559 const struct bpf_map *b = _b;
560
561 return a->offset - b->offset;
562}
563
564static int
565bpf_object__init_maps(struct bpf_object *obj)
566{
Craig Gallekb13c5c12017-10-05 10:41:57 -0400567 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000568 Elf_Scn *scn;
569 Elf_Data *data;
570 Elf_Data *symbols = obj->efile.symbols;
571
572 if (obj->efile.maps_shndx < 0)
573 return -EINVAL;
574 if (!symbols)
575 return -EINVAL;
576
577 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
578 if (scn)
579 data = elf_getdata(scn, NULL);
580 if (!scn || !data) {
581 pr_warning("failed to get Elf_Data from map section %d\n",
582 obj->efile.maps_shndx);
583 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000584 }
585
Eric Leblond4708bbd2016-11-15 04:05:47 +0000586 /*
587 * Count number of maps. Each map has a name.
588 * Array of maps is not supported: only the first element is
589 * considered.
590 *
591 * TODO: Detect array of map and report error.
592 */
593 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
594 GElf_Sym sym;
595
596 if (!gelf_getsym(symbols, i, &sym))
597 continue;
598 if (sym.st_shndx != obj->efile.maps_shndx)
599 continue;
600 nr_maps++;
601 }
602
603 /* Alloc obj->maps and fill nr_maps. */
604 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
605 nr_maps, data->d_size);
606
607 if (!nr_maps)
608 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000609
Craig Gallekb13c5c12017-10-05 10:41:57 -0400610 /* Assume equally sized map definitions */
611 map_def_sz = data->d_size / nr_maps;
612 if (!data->d_size || (data->d_size % nr_maps) != 0) {
613 pr_warning("unable to determine map definition size "
614 "section %s, %d maps in %zd bytes\n",
615 obj->path, nr_maps, data->d_size);
616 return -EINVAL;
617 }
618
Wang Nan9d759a92015-11-27 08:47:35 +0000619 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
620 if (!obj->maps) {
621 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000622 return -ENOMEM;
623 }
Wang Nan9d759a92015-11-27 08:47:35 +0000624 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000625
Eric Leblond4708bbd2016-11-15 04:05:47 +0000626 /*
627 * fill all fd with -1 so won't close incorrect
628 * fd (fd=0 is stdin) when failure (zclose won't close
629 * negative fd)).
630 */
631 for (i = 0; i < nr_maps; i++)
Wang Nan9d759a92015-11-27 08:47:35 +0000632 obj->maps[i].fd = -1;
633
Eric Leblond4708bbd2016-11-15 04:05:47 +0000634 /*
635 * Fill obj->maps using data in "maps" section.
636 */
637 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000638 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000639 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000640 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000641
642 if (!gelf_getsym(symbols, i, &sym))
643 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000644 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000645 continue;
646
647 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000648 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000649 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000650 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400651 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000652 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
653 obj->path, map_name);
654 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000655 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000656
Wang Nan561bbcc2015-11-27 08:47:36 +0000657 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000658 if (!obj->maps[map_idx].name) {
659 pr_warning("failed to alloc map name\n");
660 return -ENOMEM;
661 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000662 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000663 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000664 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400665 /*
666 * If the definition of the map in the object file fits in
667 * bpf_map_def, copy it. Any extra fields in our version
668 * of bpf_map_def will default to zero as a result of the
669 * calloc above.
670 */
671 if (map_def_sz <= sizeof(struct bpf_map_def)) {
672 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
673 } else {
674 /*
675 * Here the map structure being read is bigger than what
676 * we expect, truncate if the excess bits are all zero.
677 * If they are not zero, reject this map as
678 * incompatible.
679 */
680 char *b;
681 for (b = ((char *)def) + sizeof(struct bpf_map_def);
682 b < ((char *)def) + map_def_sz; b++) {
683 if (*b != 0) {
684 pr_warning("maps section in %s: \"%s\" "
685 "has unrecognized, non-zero "
686 "options\n",
687 obj->path, map_name);
688 return -EINVAL;
689 }
690 }
691 memcpy(&obj->maps[map_idx].def, def,
692 sizeof(struct bpf_map_def));
693 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000694 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000695 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000696
697 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400698 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000699}
700
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100701static bool section_have_execinstr(struct bpf_object *obj, int idx)
702{
703 Elf_Scn *scn;
704 GElf_Shdr sh;
705
706 scn = elf_getscn(obj->efile.elf, idx);
707 if (!scn)
708 return false;
709
710 if (gelf_getshdr(scn, &sh) != &sh)
711 return false;
712
713 if (sh.sh_flags & SHF_EXECINSTR)
714 return true;
715
716 return false;
717}
718
Wang Nan29603662015-07-01 02:13:56 +0000719static int bpf_object__elf_collect(struct bpf_object *obj)
720{
721 Elf *elf = obj->efile.elf;
722 GElf_Ehdr *ep = &obj->efile.ehdr;
723 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000724 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000725
726 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
727 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
728 pr_warning("failed to get e_shstrndx from %s\n",
729 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000730 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000731 }
732
733 while ((scn = elf_nextscn(elf, scn)) != NULL) {
734 char *name;
735 GElf_Shdr sh;
736 Elf_Data *data;
737
738 idx++;
739 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100740 pr_warning("failed to get section(%d) header from %s\n",
741 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000742 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000743 goto out;
744 }
745
746 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
747 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100748 pr_warning("failed to get section(%d) name from %s\n",
749 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000750 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000751 goto out;
752 }
753
754 data = elf_getdata(scn, 0);
755 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100756 pr_warning("failed to get section(%d) data from %s(%s)\n",
757 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000758 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000759 goto out;
760 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100761 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
762 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000763 (int)sh.sh_link, (unsigned long)sh.sh_flags,
764 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000765
766 if (strcmp(name, "license") == 0)
767 err = bpf_object__init_license(obj,
768 data->d_buf,
769 data->d_size);
770 else if (strcmp(name, "version") == 0)
771 err = bpf_object__init_kversion(obj,
772 data->d_buf,
773 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000774 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000775 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700776 else if (strcmp(name, BTF_ELF_SEC) == 0) {
777 obj->btf = btf__new(data->d_buf, data->d_size,
778 __pr_debug);
779 if (IS_ERR(obj->btf)) {
780 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
781 BTF_ELF_SEC, PTR_ERR(obj->btf));
782 obj->btf = NULL;
783 }
784 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000785 if (obj->efile.symbols) {
786 pr_warning("bpf: multiple SYMTAB in %s\n",
787 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000788 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000789 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000790 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000791 obj->efile.strtabidx = sh.sh_link;
792 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000793 } else if ((sh.sh_type == SHT_PROGBITS) &&
794 (sh.sh_flags & SHF_EXECINSTR) &&
795 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800796 if (strcmp(name, ".text") == 0)
797 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000798 err = bpf_object__add_program(obj, data->d_buf,
799 data->d_size, name, idx);
800 if (err) {
Wang Nan6371ca3b2015-11-06 13:49:37 +0000801 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700802 char *cp = libbpf_strerror_r(-err, errmsg,
803 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000804
Wang Nana5b8bd42015-07-01 02:14:00 +0000805 pr_warning("failed to alloc program %s (%s): %s",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200806 name, obj->path, cp);
Wang Nana5b8bd42015-07-01 02:14:00 +0000807 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000808 } else if (sh.sh_type == SHT_REL) {
809 void *reloc = obj->efile.reloc;
810 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100811 int sec = sh.sh_info; /* points to other section */
812
813 /* Only do relo for section with exec instructions */
814 if (!section_have_execinstr(obj, sec)) {
815 pr_debug("skip relo %s(%d) for section(%d)\n",
816 name, idx, sec);
817 continue;
818 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000819
Jakub Kicinski531b0142018-07-10 14:43:05 -0700820 reloc = reallocarray(reloc, nr_reloc,
821 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +0000822 if (!reloc) {
823 pr_warning("realloc failed\n");
824 err = -ENOMEM;
825 } else {
826 int n = nr_reloc - 1;
827
828 obj->efile.reloc = reloc;
829 obj->efile.nr_reloc = nr_reloc;
830
831 obj->efile.reloc[n].shdr = sh;
832 obj->efile.reloc[n].data = data;
833 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100834 } else {
835 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000836 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000837 if (err)
838 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000839 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000840
Wang Nan77ba9a52015-12-08 02:25:30 +0000841 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
842 pr_warning("Corrupted ELF file: index of strtab invalid\n");
843 return LIBBPF_ERRNO__FORMAT;
844 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700845 if (obj->efile.maps_shndx >= 0) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000846 err = bpf_object__init_maps(obj);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700847 if (err)
848 goto out;
849 }
850 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000851out:
852 return err;
853}
854
Wang Nan34090912015-07-01 02:14:02 +0000855static struct bpf_program *
856bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
857{
858 struct bpf_program *prog;
859 size_t i;
860
861 for (i = 0; i < obj->nr_programs; i++) {
862 prog = &obj->programs[i];
863 if (prog->idx == idx)
864 return prog;
865 }
866 return NULL;
867}
868
Jakub Kicinski6d4b1982018-07-26 14:32:19 -0700869struct bpf_program *
870bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
871{
872 struct bpf_program *pos;
873
874 bpf_object__for_each_program(pos, obj) {
875 if (pos->section_name && !strcmp(pos->section_name, title))
876 return pos;
877 }
878 return NULL;
879}
880
Wang Nan34090912015-07-01 02:14:02 +0000881static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800882bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
883 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000884{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800885 Elf_Data *symbols = obj->efile.symbols;
886 int text_shndx = obj->efile.text_shndx;
887 int maps_shndx = obj->efile.maps_shndx;
888 struct bpf_map *maps = obj->maps;
889 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000890 int i, nrels;
891
892 pr_debug("collecting relocating info for: '%s'\n",
893 prog->section_name);
894 nrels = shdr->sh_size / shdr->sh_entsize;
895
896 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
897 if (!prog->reloc_desc) {
898 pr_warning("failed to alloc memory in relocation\n");
899 return -ENOMEM;
900 }
901 prog->nr_reloc = nrels;
902
903 for (i = 0; i < nrels; i++) {
904 GElf_Sym sym;
905 GElf_Rel rel;
906 unsigned int insn_idx;
907 struct bpf_insn *insns = prog->insns;
908 size_t map_idx;
909
910 if (!gelf_getrel(data, i, &rel)) {
911 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000912 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000913 }
914
Wang Nan34090912015-07-01 02:14:02 +0000915 if (!gelf_getsym(symbols,
916 GELF_R_SYM(rel.r_info),
917 &sym)) {
918 pr_warning("relocation: symbol %"PRIx64" not found\n",
919 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000920 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000921 }
David Miller7d9890e2017-12-19 15:53:11 -0500922 pr_debug("relo for %lld value %lld name %d\n",
923 (long long) (rel.r_info >> 32),
924 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +0000925
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800926 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +0000927 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
928 prog->section_name, sym.st_shndx);
929 return -LIBBPF_ERRNO__RELOC;
930 }
931
932 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
933 pr_debug("relocation: insn_idx=%u\n", insn_idx);
934
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800935 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
936 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
937 pr_warning("incorrect bpf_call opcode\n");
938 return -LIBBPF_ERRNO__RELOC;
939 }
940 prog->reloc_desc[i].type = RELO_CALL;
941 prog->reloc_desc[i].insn_idx = insn_idx;
942 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700943 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800944 continue;
945 }
946
Wang Nan34090912015-07-01 02:14:02 +0000947 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
948 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
949 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000950 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000951 }
952
Joe Stringer94e5ade2017-01-22 17:11:22 -0800953 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
954 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
955 if (maps[map_idx].offset == sym.st_value) {
956 pr_debug("relocation: find map %zd (%s) for insn %u\n",
957 map_idx, maps[map_idx].name, insn_idx);
958 break;
959 }
960 }
961
Wang Nan34090912015-07-01 02:14:02 +0000962 if (map_idx >= nr_maps) {
963 pr_warning("bpf relocation: map_idx %d large than %d\n",
964 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000965 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000966 }
967
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800968 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +0000969 prog->reloc_desc[i].insn_idx = insn_idx;
970 prog->reloc_desc[i].map_idx = map_idx;
971 }
972 return 0;
973}
974
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700975static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
976{
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700977 const struct btf_type *container_type;
978 const struct btf_member *key, *value;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700979 struct bpf_map_def *def = &map->def;
980 const size_t max_name = 256;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700981 char container_name[max_name];
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700982 __s64 key_size, value_size;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700983 __s32 container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700984
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700985 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
986 max_name) {
987 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700988 map->name, map->name);
989 return -EINVAL;
990 }
991
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700992 container_id = btf__find_by_name(btf, container_name);
993 if (container_id < 0) {
994 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
995 map->name, container_name);
996 return container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700997 }
998
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700999 container_type = btf__type_by_id(btf, container_id);
1000 if (!container_type) {
1001 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1002 map->name, container_id);
1003 return -EINVAL;
1004 }
1005
1006 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1007 BTF_INFO_VLEN(container_type->info) < 2) {
1008 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1009 map->name, container_name);
1010 return -EINVAL;
1011 }
1012
1013 key = (struct btf_member *)(container_type + 1);
1014 value = key + 1;
1015
1016 key_size = btf__resolve_size(btf, key->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001017 if (key_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001018 pr_warning("map:%s invalid BTF key_type_size\n",
1019 map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001020 return key_size;
1021 }
1022
1023 if (def->key_size != key_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001024 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1025 map->name, (__u32)key_size, def->key_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001026 return -EINVAL;
1027 }
1028
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001029 value_size = btf__resolve_size(btf, value->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001030 if (value_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001031 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001032 return value_size;
1033 }
1034
1035 if (def->value_size != value_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001036 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1037 map->name, (__u32)value_size, def->value_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001038 return -EINVAL;
1039 }
1040
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001041 map->btf_key_type_id = key->type;
1042 map->btf_value_type_id = value->type;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001043
1044 return 0;
1045}
1046
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001047int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1048{
1049 struct bpf_map_info info = {};
1050 __u32 len = sizeof(info);
1051 int new_fd, err;
1052 char *new_name;
1053
1054 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1055 if (err)
1056 return err;
1057
1058 new_name = strdup(info.name);
1059 if (!new_name)
1060 return -errno;
1061
1062 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1063 if (new_fd < 0)
1064 goto err_free_new_name;
1065
1066 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1067 if (new_fd < 0)
1068 goto err_close_new_fd;
1069
1070 err = zclose(map->fd);
1071 if (err)
1072 goto err_close_new_fd;
1073 free(map->name);
1074
1075 map->fd = new_fd;
1076 map->name = new_name;
1077 map->def.type = info.type;
1078 map->def.key_size = info.key_size;
1079 map->def.value_size = info.value_size;
1080 map->def.max_entries = info.max_entries;
1081 map->def.map_flags = info.map_flags;
1082 map->btf_key_type_id = info.btf_key_type_id;
1083 map->btf_value_type_id = info.btf_value_type_id;
1084
1085 return 0;
1086
1087err_close_new_fd:
1088 close(new_fd);
1089err_free_new_name:
1090 free(new_name);
1091 return -errno;
1092}
1093
Wang Nan52d33522015-07-01 02:14:04 +00001094static int
1095bpf_object__create_maps(struct bpf_object *obj)
1096{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001097 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001098 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001099 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001100
Wang Nan9d759a92015-11-27 08:47:35 +00001101 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001102 struct bpf_map *map = &obj->maps[i];
1103 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001104 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001105 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001106
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001107 if (map->fd >= 0) {
1108 pr_debug("skip map create (preset) %s: fd=%d\n",
1109 map->name, map->fd);
1110 continue;
1111 }
1112
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001113 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001114 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001115 create_attr.map_type = def->type;
1116 create_attr.map_flags = def->map_flags;
1117 create_attr.key_size = def->key_size;
1118 create_attr.value_size = def->value_size;
1119 create_attr.max_entries = def->max_entries;
1120 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001121 create_attr.btf_key_type_id = 0;
1122 create_attr.btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001123
1124 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1125 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001126 create_attr.btf_key_type_id = map->btf_key_type_id;
1127 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001128 }
1129
1130 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001131 if (*pfd < 0 && create_attr.btf_key_type_id) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001132 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001133 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001134 map->name, cp, errno);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001135 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001136 create_attr.btf_key_type_id = 0;
1137 create_attr.btf_value_type_id = 0;
1138 map->btf_key_type_id = 0;
1139 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001140 *pfd = bpf_create_map_xattr(&create_attr);
1141 }
1142
Wang Nan52d33522015-07-01 02:14:04 +00001143 if (*pfd < 0) {
1144 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001145
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001146 err = *pfd;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001147 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Eric Leblond49bf4b32017-08-20 21:48:14 +02001148 pr_warning("failed to create map (name: '%s'): %s\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001149 map->name, cp);
Wang Nan52d33522015-07-01 02:14:04 +00001150 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001151 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001152 return err;
1153 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001154 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001155 }
1156
Wang Nan52d33522015-07-01 02:14:04 +00001157 return 0;
1158}
1159
Wang Nan8a47a6c2015-07-01 02:14:05 +00001160static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001161bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1162 struct reloc_desc *relo)
1163{
1164 struct bpf_insn *insn, *new_insn;
1165 struct bpf_program *text;
1166 size_t new_cnt;
1167
1168 if (relo->type != RELO_CALL)
1169 return -LIBBPF_ERRNO__RELOC;
1170
1171 if (prog->idx == obj->efile.text_shndx) {
1172 pr_warning("relo in .text insn %d into off %d\n",
1173 relo->insn_idx, relo->text_off);
1174 return -LIBBPF_ERRNO__RELOC;
1175 }
1176
1177 if (prog->main_prog_cnt == 0) {
1178 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1179 if (!text) {
1180 pr_warning("no .text section found yet relo into text exist\n");
1181 return -LIBBPF_ERRNO__RELOC;
1182 }
1183 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001184 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001185 if (!new_insn) {
1186 pr_warning("oom in prog realloc\n");
1187 return -ENOMEM;
1188 }
1189 memcpy(new_insn + prog->insns_cnt, text->insns,
1190 text->insns_cnt * sizeof(*insn));
1191 prog->insns = new_insn;
1192 prog->main_prog_cnt = prog->insns_cnt;
1193 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001194 pr_debug("added %zd insn from %s to prog %s\n",
1195 text->insns_cnt, text->section_name,
1196 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001197 }
1198 insn = &prog->insns[relo->insn_idx];
1199 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001200 return 0;
1201}
1202
1203static int
Wang Nan9d759a92015-11-27 08:47:35 +00001204bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001205{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001206 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001207
1208 if (!prog || !prog->reloc_desc)
1209 return 0;
1210
1211 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001212 if (prog->reloc_desc[i].type == RELO_LD64) {
1213 struct bpf_insn *insns = prog->insns;
1214 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001215
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001216 insn_idx = prog->reloc_desc[i].insn_idx;
1217 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001218
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001219 if (insn_idx >= (int)prog->insns_cnt) {
1220 pr_warning("relocation out of range: '%s'\n",
1221 prog->section_name);
1222 return -LIBBPF_ERRNO__RELOC;
1223 }
1224 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1225 insns[insn_idx].imm = obj->maps[map_idx].fd;
1226 } else {
1227 err = bpf_program__reloc_text(prog, obj,
1228 &prog->reloc_desc[i]);
1229 if (err)
1230 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001231 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001232 }
1233
1234 zfree(&prog->reloc_desc);
1235 prog->nr_reloc = 0;
1236 return 0;
1237}
1238
1239
1240static int
1241bpf_object__relocate(struct bpf_object *obj)
1242{
1243 struct bpf_program *prog;
1244 size_t i;
1245 int err;
1246
1247 for (i = 0; i < obj->nr_programs; i++) {
1248 prog = &obj->programs[i];
1249
Wang Nan9d759a92015-11-27 08:47:35 +00001250 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001251 if (err) {
1252 pr_warning("failed to relocate '%s'\n",
1253 prog->section_name);
1254 return err;
1255 }
1256 }
1257 return 0;
1258}
1259
Wang Nan34090912015-07-01 02:14:02 +00001260static int bpf_object__collect_reloc(struct bpf_object *obj)
1261{
1262 int i, err;
1263
1264 if (!obj_elf_valid(obj)) {
1265 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001266 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001267 }
1268
1269 for (i = 0; i < obj->efile.nr_reloc; i++) {
1270 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1271 Elf_Data *data = obj->efile.reloc[i].data;
1272 int idx = shdr->sh_info;
1273 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001274
1275 if (shdr->sh_type != SHT_REL) {
1276 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001277 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001278 }
1279
1280 prog = bpf_object__find_prog_by_idx(obj, idx);
1281 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001282 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001283 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001284 }
1285
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001286 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001287 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001288 obj);
Wang Nan34090912015-07-01 02:14:02 +00001289 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001290 return err;
Wang Nan34090912015-07-01 02:14:02 +00001291 }
1292 return 0;
1293}
1294
Wang Nan55cffde2015-07-01 02:14:07 +00001295static int
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001296load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1297 const char *name, struct bpf_insn *insns, int insns_cnt,
Yonghong Song438363c2018-10-09 16:14:47 -07001298 char *license, __u32 kern_version, int *pfd, int prog_ifindex)
Wang Nan55cffde2015-07-01 02:14:07 +00001299{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001300 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001301 char *cp, errmsg[STRERR_BUFSIZE];
Wang Nan55cffde2015-07-01 02:14:07 +00001302 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001303 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001304
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001305 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1306 load_attr.prog_type = type;
1307 load_attr.expected_attach_type = expected_attach_type;
1308 load_attr.name = name;
1309 load_attr.insns = insns;
1310 load_attr.insns_cnt = insns_cnt;
1311 load_attr.license = license;
1312 load_attr.kern_version = kern_version;
David Beckettf0307a72018-05-16 14:02:49 -07001313 load_attr.prog_ifindex = prog_ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001314
1315 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001316 return -EINVAL;
1317
1318 log_buf = malloc(BPF_LOG_BUF_SIZE);
1319 if (!log_buf)
1320 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1321
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001322 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001323
1324 if (ret >= 0) {
1325 *pfd = ret;
1326 ret = 0;
1327 goto out;
1328 }
1329
Wang Nan6371ca3b2015-11-06 13:49:37 +00001330 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001331 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001332 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00001333
Wang Nan6371ca3b2015-11-06 13:49:37 +00001334 if (log_buf && log_buf[0] != '\0') {
1335 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001336 pr_warning("-- BEGIN DUMP LOG ---\n");
1337 pr_warning("\n%s\n", log_buf);
1338 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001339 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1340 pr_warning("Program too large (%zu insns), at most %d insns\n",
1341 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001342 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001343 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001344 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001345 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001346 int fd;
1347
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001348 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1349 load_attr.expected_attach_type = 0;
1350 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001351 if (fd >= 0) {
1352 close(fd);
1353 ret = -LIBBPF_ERRNO__PROGTYPE;
1354 goto out;
1355 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001356 }
Wang Nan705fa212016-07-13 10:44:02 +00001357
1358 if (log_buf)
1359 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001360 }
1361
1362out:
1363 free(log_buf);
1364 return ret;
1365}
1366
Joe Stringer29cd77f2018-10-02 13:35:39 -07001367int
Wang Nan55cffde2015-07-01 02:14:07 +00001368bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07001369 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00001370{
Wang Nanb5805632015-11-16 12:10:09 +00001371 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001372
Wang Nanb5805632015-11-16 12:10:09 +00001373 if (prog->instances.nr < 0 || !prog->instances.fds) {
1374 if (prog->preprocessor) {
1375 pr_warning("Internal error: can't load program '%s'\n",
1376 prog->section_name);
1377 return -LIBBPF_ERRNO__INTERNAL;
1378 }
Wang Nan55cffde2015-07-01 02:14:07 +00001379
Wang Nanb5805632015-11-16 12:10:09 +00001380 prog->instances.fds = malloc(sizeof(int));
1381 if (!prog->instances.fds) {
1382 pr_warning("Not enough memory for BPF fds\n");
1383 return -ENOMEM;
1384 }
1385 prog->instances.nr = 1;
1386 prog->instances.fds[0] = -1;
1387 }
1388
1389 if (!prog->preprocessor) {
1390 if (prog->instances.nr != 1) {
1391 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1392 prog->section_name, prog->instances.nr);
1393 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001394 err = load_program(prog->type, prog->expected_attach_type,
1395 prog->name, prog->insns, prog->insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001396 license, kern_version, &fd,
1397 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001398 if (!err)
1399 prog->instances.fds[0] = fd;
1400 goto out;
1401 }
1402
1403 for (i = 0; i < prog->instances.nr; i++) {
1404 struct bpf_prog_prep_result result;
1405 bpf_program_prep_t preprocessor = prog->preprocessor;
1406
1407 bzero(&result, sizeof(result));
1408 err = preprocessor(prog, i, prog->insns,
1409 prog->insns_cnt, &result);
1410 if (err) {
1411 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1412 i, prog->section_name);
1413 goto out;
1414 }
1415
1416 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1417 pr_debug("Skip loading the %dth instance of program '%s'\n",
1418 i, prog->section_name);
1419 prog->instances.fds[i] = -1;
1420 if (result.pfd)
1421 *result.pfd = -1;
1422 continue;
1423 }
1424
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001425 err = load_program(prog->type, prog->expected_attach_type,
1426 prog->name, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001427 result.new_insn_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001428 license, kern_version, &fd,
1429 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001430
1431 if (err) {
1432 pr_warning("Loading the %dth instance of program '%s' failed\n",
1433 i, prog->section_name);
1434 goto out;
1435 }
1436
1437 if (result.pfd)
1438 *result.pfd = fd;
1439 prog->instances.fds[i] = fd;
1440 }
1441out:
Wang Nan55cffde2015-07-01 02:14:07 +00001442 if (err)
1443 pr_warning("failed to load program '%s'\n",
1444 prog->section_name);
1445 zfree(&prog->insns);
1446 prog->insns_cnt = 0;
1447 return err;
1448}
1449
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001450static bool bpf_program__is_function_storage(struct bpf_program *prog,
1451 struct bpf_object *obj)
1452{
1453 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1454}
1455
Wang Nan55cffde2015-07-01 02:14:07 +00001456static int
1457bpf_object__load_progs(struct bpf_object *obj)
1458{
1459 size_t i;
1460 int err;
1461
1462 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001463 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001464 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001465 err = bpf_program__load(&obj->programs[i],
1466 obj->license,
1467 obj->kern_version);
1468 if (err)
1469 return err;
1470 }
1471 return 0;
1472}
1473
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001474static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001475{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001476 switch (type) {
1477 case BPF_PROG_TYPE_SOCKET_FILTER:
1478 case BPF_PROG_TYPE_SCHED_CLS:
1479 case BPF_PROG_TYPE_SCHED_ACT:
1480 case BPF_PROG_TYPE_XDP:
1481 case BPF_PROG_TYPE_CGROUP_SKB:
1482 case BPF_PROG_TYPE_CGROUP_SOCK:
1483 case BPF_PROG_TYPE_LWT_IN:
1484 case BPF_PROG_TYPE_LWT_OUT:
1485 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001486 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001487 case BPF_PROG_TYPE_SOCK_OPS:
1488 case BPF_PROG_TYPE_SK_SKB:
1489 case BPF_PROG_TYPE_CGROUP_DEVICE:
1490 case BPF_PROG_TYPE_SK_MSG:
1491 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001492 case BPF_PROG_TYPE_LIRC_MODE2:
Martin KaFai Lau6bc8529c2018-08-08 01:01:30 -07001493 case BPF_PROG_TYPE_SK_REUSEPORT:
Petar Penkovc22fbae2018-09-14 07:46:20 -07001494 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001495 return false;
1496 case BPF_PROG_TYPE_UNSPEC:
1497 case BPF_PROG_TYPE_KPROBE:
1498 case BPF_PROG_TYPE_TRACEPOINT:
1499 case BPF_PROG_TYPE_PERF_EVENT:
1500 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1501 default:
1502 return true;
1503 }
1504}
1505
1506static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1507{
1508 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001509 pr_warning("%s doesn't provide kernel version\n",
1510 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001511 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001512 }
1513 return 0;
1514}
1515
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001516static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001517__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1518 bool needs_kver)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001519{
1520 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001521 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001522
1523 if (elf_version(EV_CURRENT) == EV_NONE) {
1524 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001525 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001526 }
1527
Wang Nan6c956392015-07-01 02:13:54 +00001528 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001529 if (IS_ERR(obj))
1530 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001531
Wang Nan6371ca3b2015-11-06 13:49:37 +00001532 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1533 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1534 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1535 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001536 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001537
1538 bpf_object__elf_finish(obj);
1539 return obj;
1540out:
1541 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001542 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001543}
1544
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001545struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001546{
1547 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001548 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001549 return NULL;
1550
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001551 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001552
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001553 return __bpf_object__open(attr->file, NULL, 0,
1554 bpf_prog_type__needs_kver(attr->prog_type));
1555}
1556
1557struct bpf_object *bpf_object__open(const char *path)
1558{
1559 struct bpf_object_open_attr attr = {
1560 .file = path,
1561 .prog_type = BPF_PROG_TYPE_UNSPEC,
1562 };
1563
1564 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00001565}
1566
1567struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001568 size_t obj_buf_sz,
1569 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001570{
Wang Nanacf860a2015-08-27 02:30:55 +00001571 char tmp_name[64];
1572
Wang Nan6c956392015-07-01 02:13:54 +00001573 /* param validation */
1574 if (!obj_buf || obj_buf_sz <= 0)
1575 return NULL;
1576
Wang Nanacf860a2015-08-27 02:30:55 +00001577 if (!name) {
1578 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1579 (unsigned long)obj_buf,
1580 (unsigned long)obj_buf_sz);
1581 tmp_name[sizeof(tmp_name) - 1] = '\0';
1582 name = tmp_name;
1583 }
1584 pr_debug("loading object '%s' from buffer\n",
1585 name);
Wang Nan6c956392015-07-01 02:13:54 +00001586
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001587 return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001588}
1589
Wang Nan52d33522015-07-01 02:14:04 +00001590int bpf_object__unload(struct bpf_object *obj)
1591{
1592 size_t i;
1593
1594 if (!obj)
1595 return -EINVAL;
1596
Wang Nan9d759a92015-11-27 08:47:35 +00001597 for (i = 0; i < obj->nr_maps; i++)
1598 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001599
Wang Nan55cffde2015-07-01 02:14:07 +00001600 for (i = 0; i < obj->nr_programs; i++)
1601 bpf_program__unload(&obj->programs[i]);
1602
Wang Nan52d33522015-07-01 02:14:04 +00001603 return 0;
1604}
1605
1606int bpf_object__load(struct bpf_object *obj)
1607{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001608 int err;
1609
Wang Nan52d33522015-07-01 02:14:04 +00001610 if (!obj)
1611 return -EINVAL;
1612
1613 if (obj->loaded) {
1614 pr_warning("object should not be loaded twice\n");
1615 return -EINVAL;
1616 }
1617
1618 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001619
1620 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1621 CHECK_ERR(bpf_object__relocate(obj), err, out);
1622 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001623
1624 return 0;
1625out:
1626 bpf_object__unload(obj);
1627 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001628 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001629}
1630
Joe Stringerf3675402017-01-26 13:19:56 -08001631static int check_path(const char *path)
1632{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001633 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001634 struct statfs st_fs;
1635 char *dname, *dir;
1636 int err = 0;
1637
1638 if (path == NULL)
1639 return -EINVAL;
1640
1641 dname = strdup(path);
1642 if (dname == NULL)
1643 return -ENOMEM;
1644
1645 dir = dirname(dname);
1646 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001647 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001648 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001649 err = -errno;
1650 }
1651 free(dname);
1652
1653 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1654 pr_warning("specified path %s is not on BPF FS\n", path);
1655 err = -EINVAL;
1656 }
1657
1658 return err;
1659}
1660
1661int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1662 int instance)
1663{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001664 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001665 int err;
1666
1667 err = check_path(path);
1668 if (err)
1669 return err;
1670
1671 if (prog == NULL) {
1672 pr_warning("invalid program pointer\n");
1673 return -EINVAL;
1674 }
1675
1676 if (instance < 0 || instance >= prog->instances.nr) {
1677 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1678 instance, prog->section_name, prog->instances.nr);
1679 return -EINVAL;
1680 }
1681
1682 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001683 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001684 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001685 return -errno;
1686 }
1687 pr_debug("pinned program '%s'\n", path);
1688
1689 return 0;
1690}
1691
1692static int make_dir(const char *path)
1693{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001694 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001695 int err = 0;
1696
1697 if (mkdir(path, 0700) && errno != EEXIST)
1698 err = -errno;
1699
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001700 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001701 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001702 pr_warning("failed to mkdir %s: %s\n", path, cp);
1703 }
Joe Stringerf3675402017-01-26 13:19:56 -08001704 return err;
1705}
1706
1707int bpf_program__pin(struct bpf_program *prog, const char *path)
1708{
1709 int i, err;
1710
1711 err = check_path(path);
1712 if (err)
1713 return err;
1714
1715 if (prog == NULL) {
1716 pr_warning("invalid program pointer\n");
1717 return -EINVAL;
1718 }
1719
1720 if (prog->instances.nr <= 0) {
1721 pr_warning("no instances of prog %s to pin\n",
1722 prog->section_name);
1723 return -EINVAL;
1724 }
1725
1726 err = make_dir(path);
1727 if (err)
1728 return err;
1729
1730 for (i = 0; i < prog->instances.nr; i++) {
1731 char buf[PATH_MAX];
1732 int len;
1733
1734 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1735 if (len < 0)
1736 return -EINVAL;
1737 else if (len >= PATH_MAX)
1738 return -ENAMETOOLONG;
1739
1740 err = bpf_program__pin_instance(prog, buf, i);
1741 if (err)
1742 return err;
1743 }
1744
1745 return 0;
1746}
1747
Joe Stringerb6989f32017-01-26 13:19:57 -08001748int bpf_map__pin(struct bpf_map *map, const char *path)
1749{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001750 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08001751 int err;
1752
1753 err = check_path(path);
1754 if (err)
1755 return err;
1756
1757 if (map == NULL) {
1758 pr_warning("invalid map pointer\n");
1759 return -EINVAL;
1760 }
1761
1762 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001763 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001764 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08001765 return -errno;
1766 }
1767
1768 pr_debug("pinned map '%s'\n", path);
1769 return 0;
1770}
1771
Joe Stringerd5148d82017-01-26 13:19:58 -08001772int bpf_object__pin(struct bpf_object *obj, const char *path)
1773{
1774 struct bpf_program *prog;
1775 struct bpf_map *map;
1776 int err;
1777
1778 if (!obj)
1779 return -ENOENT;
1780
1781 if (!obj->loaded) {
1782 pr_warning("object not yet loaded; load it first\n");
1783 return -ENOENT;
1784 }
1785
1786 err = make_dir(path);
1787 if (err)
1788 return err;
1789
1790 bpf_map__for_each(map, obj) {
1791 char buf[PATH_MAX];
1792 int len;
1793
1794 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1795 bpf_map__name(map));
1796 if (len < 0)
1797 return -EINVAL;
1798 else if (len >= PATH_MAX)
1799 return -ENAMETOOLONG;
1800
1801 err = bpf_map__pin(map, buf);
1802 if (err)
1803 return err;
1804 }
1805
1806 bpf_object__for_each_program(prog, obj) {
1807 char buf[PATH_MAX];
1808 int len;
1809
1810 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1811 prog->section_name);
1812 if (len < 0)
1813 return -EINVAL;
1814 else if (len >= PATH_MAX)
1815 return -ENAMETOOLONG;
1816
1817 err = bpf_program__pin(prog, buf);
1818 if (err)
1819 return err;
1820 }
1821
1822 return 0;
1823}
1824
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001825void bpf_object__close(struct bpf_object *obj)
1826{
Wang Nana5b8bd42015-07-01 02:14:00 +00001827 size_t i;
1828
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001829 if (!obj)
1830 return;
1831
Wang Nan10931d22016-11-26 07:03:26 +00001832 if (obj->clear_priv)
1833 obj->clear_priv(obj, obj->priv);
1834
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001835 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00001836 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001837 btf__free(obj->btf);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001838
Wang Nan9d759a92015-11-27 08:47:35 +00001839 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00001840 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00001841 if (obj->maps[i].clear_priv)
1842 obj->maps[i].clear_priv(&obj->maps[i],
1843 obj->maps[i].priv);
1844 obj->maps[i].priv = NULL;
1845 obj->maps[i].clear_priv = NULL;
1846 }
1847 zfree(&obj->maps);
1848 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00001849
1850 if (obj->programs && obj->nr_programs) {
1851 for (i = 0; i < obj->nr_programs; i++)
1852 bpf_program__exit(&obj->programs[i]);
1853 }
1854 zfree(&obj->programs);
1855
Wang Nan9a208ef2015-07-01 02:14:10 +00001856 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001857 free(obj);
1858}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001859
Wang Nan9a208ef2015-07-01 02:14:10 +00001860struct bpf_object *
1861bpf_object__next(struct bpf_object *prev)
1862{
1863 struct bpf_object *next;
1864
1865 if (!prev)
1866 next = list_first_entry(&bpf_objects_list,
1867 struct bpf_object,
1868 list);
1869 else
1870 next = list_next_entry(prev, list);
1871
1872 /* Empty list is noticed here so don't need checking on entry. */
1873 if (&next->list == &bpf_objects_list)
1874 return NULL;
1875
1876 return next;
1877}
1878
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001879const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00001880{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001881 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00001882}
1883
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001884unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00001885{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001886 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00001887}
1888
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001889int bpf_object__btf_fd(const struct bpf_object *obj)
1890{
1891 return obj->btf ? btf__fd(obj->btf) : -1;
1892}
1893
Wang Nan10931d22016-11-26 07:03:26 +00001894int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1895 bpf_object_clear_priv_t clear_priv)
1896{
1897 if (obj->priv && obj->clear_priv)
1898 obj->clear_priv(obj, obj->priv);
1899
1900 obj->priv = priv;
1901 obj->clear_priv = clear_priv;
1902 return 0;
1903}
1904
1905void *bpf_object__priv(struct bpf_object *obj)
1906{
1907 return obj ? obj->priv : ERR_PTR(-EINVAL);
1908}
1909
Jakub Kicinskieac7d842018-06-28 14:41:39 -07001910static struct bpf_program *
1911__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001912{
1913 size_t idx;
1914
1915 if (!obj->programs)
1916 return NULL;
1917 /* First handler */
1918 if (prev == NULL)
1919 return &obj->programs[0];
1920
1921 if (prev->obj != obj) {
1922 pr_warning("error: program handler doesn't match object\n");
1923 return NULL;
1924 }
1925
1926 idx = (prev - obj->programs) + 1;
1927 if (idx >= obj->nr_programs)
1928 return NULL;
1929 return &obj->programs[idx];
1930}
1931
Jakub Kicinskieac7d842018-06-28 14:41:39 -07001932struct bpf_program *
1933bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1934{
1935 struct bpf_program *prog = prev;
1936
1937 do {
1938 prog = __bpf_program__next(prog, obj);
1939 } while (prog && bpf_program__is_function_storage(prog, obj));
1940
1941 return prog;
1942}
1943
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03001944int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1945 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001946{
1947 if (prog->priv && prog->clear_priv)
1948 prog->clear_priv(prog, prog->priv);
1949
1950 prog->priv = priv;
1951 prog->clear_priv = clear_priv;
1952 return 0;
1953}
1954
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001955void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001956{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001957 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001958}
1959
Jakub Kicinski9aba3612018-06-28 14:41:37 -07001960void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
1961{
1962 prog->prog_ifindex = ifindex;
1963}
1964
Namhyung Kim715f8db2015-11-03 20:21:05 +09001965const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001966{
1967 const char *title;
1968
1969 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09001970 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001971 title = strdup(title);
1972 if (!title) {
1973 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001974 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001975 }
1976 }
1977
1978 return title;
1979}
1980
1981int bpf_program__fd(struct bpf_program *prog)
1982{
Wang Nanb5805632015-11-16 12:10:09 +00001983 return bpf_program__nth_fd(prog, 0);
1984}
1985
1986int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1987 bpf_program_prep_t prep)
1988{
1989 int *instances_fds;
1990
1991 if (nr_instances <= 0 || !prep)
1992 return -EINVAL;
1993
1994 if (prog->instances.nr > 0 || prog->instances.fds) {
1995 pr_warning("Can't set pre-processor after loading\n");
1996 return -EINVAL;
1997 }
1998
1999 instances_fds = malloc(sizeof(int) * nr_instances);
2000 if (!instances_fds) {
2001 pr_warning("alloc memory failed for fds\n");
2002 return -ENOMEM;
2003 }
2004
2005 /* fill all fd with -1 */
2006 memset(instances_fds, -1, sizeof(int) * nr_instances);
2007
2008 prog->instances.nr = nr_instances;
2009 prog->instances.fds = instances_fds;
2010 prog->preprocessor = prep;
2011 return 0;
2012}
2013
2014int bpf_program__nth_fd(struct bpf_program *prog, int n)
2015{
2016 int fd;
2017
Jakub Kicinski1e960042018-07-26 14:32:18 -07002018 if (!prog)
2019 return -EINVAL;
2020
Wang Nanb5805632015-11-16 12:10:09 +00002021 if (n >= prog->instances.nr || n < 0) {
2022 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2023 n, prog->section_name, prog->instances.nr);
2024 return -EINVAL;
2025 }
2026
2027 fd = prog->instances.fds[n];
2028 if (fd < 0) {
2029 pr_warning("%dth instance of program '%s' is invalid\n",
2030 n, prog->section_name);
2031 return -ENOENT;
2032 }
2033
2034 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002035}
Wang Nan9d759a92015-11-27 08:47:35 +00002036
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002037void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002038{
2039 prog->type = type;
2040}
2041
Wang Nan5f44e4c82016-07-13 10:44:01 +00002042static bool bpf_program__is_type(struct bpf_program *prog,
2043 enum bpf_prog_type type)
2044{
2045 return prog ? (prog->type == type) : false;
2046}
2047
Joe Stringered794072017-01-22 17:11:23 -08002048#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2049int bpf_program__set_##NAME(struct bpf_program *prog) \
2050{ \
2051 if (!prog) \
2052 return -EINVAL; \
2053 bpf_program__set_type(prog, TYPE); \
2054 return 0; \
2055} \
2056 \
2057bool bpf_program__is_##NAME(struct bpf_program *prog) \
2058{ \
2059 return bpf_program__is_type(prog, TYPE); \
2060} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002061
Joe Stringer7803ba72017-01-22 17:11:24 -08002062BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002063BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002064BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2065BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002066BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002067BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002068BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2069BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002070
John Fastabend16962b22018-04-23 14:30:38 -07002071void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2072 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002073{
2074 prog->expected_attach_type = type;
2075}
2076
Andrey Ignatov956b6202018-09-26 15:24:53 -07002077#define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \
2078 { string, sizeof(string) - 1, ptype, eatype, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002079
Andrey Ignatov956b6202018-09-26 15:24:53 -07002080/* Programs that can NOT be attached. */
2081#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002082
Andrey Ignatov956b6202018-09-26 15:24:53 -07002083/* Programs that can be attached. */
2084#define BPF_APROG_SEC(string, ptype, atype) \
2085 BPF_PROG_SEC_IMPL(string, ptype, 0, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07002086
Andrey Ignatov956b6202018-09-26 15:24:53 -07002087/* Programs that must specify expected attach type at load time. */
2088#define BPF_EAPROG_SEC(string, ptype, eatype) \
2089 BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype)
2090
2091/* Programs that can be attached but attach type can't be identified by section
2092 * name. Kept for backward compatibility.
2093 */
2094#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002095
Roman Gushchin583c9002017-12-13 15:18:51 +00002096static const struct {
2097 const char *sec;
2098 size_t len;
2099 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002100 enum bpf_attach_type expected_attach_type;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002101 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002102} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07002103 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2104 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2105 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2106 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2107 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2108 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2109 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2110 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2111 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2112 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2113 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2114 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2115 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07002116 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2117 BPF_CGROUP_INET_INGRESS),
2118 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2119 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002120 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2121 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2122 BPF_CGROUP_INET_SOCK_CREATE),
2123 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2124 BPF_CGROUP_INET4_POST_BIND),
2125 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2126 BPF_CGROUP_INET6_POST_BIND),
2127 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2128 BPF_CGROUP_DEVICE),
2129 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2130 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07002131 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2132 BPF_SK_SKB_STREAM_PARSER),
2133 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2134 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002135 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2136 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2137 BPF_SK_MSG_VERDICT),
2138 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2139 BPF_LIRC_MODE2),
2140 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2141 BPF_FLOW_DISSECTOR),
2142 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2143 BPF_CGROUP_INET4_BIND),
2144 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2145 BPF_CGROUP_INET6_BIND),
2146 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2147 BPF_CGROUP_INET4_CONNECT),
2148 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2149 BPF_CGROUP_INET6_CONNECT),
2150 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2151 BPF_CGROUP_UDP4_SENDMSG),
2152 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2153 BPF_CGROUP_UDP6_SENDMSG),
Roman Gushchin583c9002017-12-13 15:18:51 +00002154};
Roman Gushchin583c9002017-12-13 15:18:51 +00002155
Andrey Ignatov956b6202018-09-26 15:24:53 -07002156#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002157#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07002158#undef BPF_APROG_SEC
2159#undef BPF_EAPROG_SEC
2160#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002161
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002162int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2163 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00002164{
2165 int i;
2166
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002167 if (!name)
2168 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00002169
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002170 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2171 if (strncmp(name, section_names[i].sec, section_names[i].len))
2172 continue;
2173 *prog_type = section_names[i].prog_type;
2174 *expected_attach_type = section_names[i].expected_attach_type;
2175 return 0;
2176 }
2177 return -EINVAL;
2178}
Roman Gushchin583c9002017-12-13 15:18:51 +00002179
Andrey Ignatov956b6202018-09-26 15:24:53 -07002180int libbpf_attach_type_by_name(const char *name,
2181 enum bpf_attach_type *attach_type)
2182{
2183 int i;
2184
2185 if (!name)
2186 return -EINVAL;
2187
2188 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2189 if (strncmp(name, section_names[i].sec, section_names[i].len))
2190 continue;
2191 if (section_names[i].attach_type == -EINVAL)
2192 return -EINVAL;
2193 *attach_type = section_names[i].attach_type;
2194 return 0;
2195 }
2196 return -EINVAL;
2197}
2198
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002199static int
2200bpf_program__identify_section(struct bpf_program *prog,
2201 enum bpf_prog_type *prog_type,
2202 enum bpf_attach_type *expected_attach_type)
2203{
2204 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2205 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00002206}
2207
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002208int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002209{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002210 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002211}
2212
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002213const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002214{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002215 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002216}
2217
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002218const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002219{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002220 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002221}
2222
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002223__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002224{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002225 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002226}
2227
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002228__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002229{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002230 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002231}
2232
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002233int bpf_map__set_priv(struct bpf_map *map, void *priv,
2234 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002235{
2236 if (!map)
2237 return -EINVAL;
2238
2239 if (map->priv) {
2240 if (map->clear_priv)
2241 map->clear_priv(map, map->priv);
2242 }
2243
2244 map->priv = priv;
2245 map->clear_priv = clear_priv;
2246 return 0;
2247}
2248
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002249void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002250{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002251 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002252}
2253
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002254bool bpf_map__is_offload_neutral(struct bpf_map *map)
2255{
2256 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2257}
2258
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002259void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2260{
2261 map->map_ifindex = ifindex;
2262}
2263
Wang Nan9d759a92015-11-27 08:47:35 +00002264struct bpf_map *
2265bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2266{
2267 size_t idx;
2268 struct bpf_map *s, *e;
2269
2270 if (!obj || !obj->maps)
2271 return NULL;
2272
2273 s = obj->maps;
2274 e = obj->maps + obj->nr_maps;
2275
2276 if (prev == NULL)
2277 return s;
2278
2279 if ((prev < s) || (prev >= e)) {
2280 pr_warning("error in %s: map handler doesn't belong to object\n",
2281 __func__);
2282 return NULL;
2283 }
2284
2285 idx = (prev - obj->maps) + 1;
2286 if (idx >= obj->nr_maps)
2287 return NULL;
2288 return &obj->maps[idx];
2289}
Wang Nan561bbcc2015-11-27 08:47:36 +00002290
2291struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002292bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002293{
2294 struct bpf_map *pos;
2295
2296 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002297 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002298 return pos;
2299 }
2300 return NULL;
2301}
Wang Nan5a6acad2016-11-26 07:03:27 +00002302
2303struct bpf_map *
2304bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2305{
2306 int i;
2307
2308 for (i = 0; i < obj->nr_maps; i++) {
2309 if (obj->maps[i].offset == offset)
2310 return &obj->maps[i];
2311 }
2312 return ERR_PTR(-ENOENT);
2313}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002314
2315long libbpf_get_error(const void *ptr)
2316{
2317 if (IS_ERR(ptr))
2318 return PTR_ERR(ptr);
2319 return 0;
2320}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002321
2322int bpf_prog_load(const char *file, enum bpf_prog_type type,
2323 struct bpf_object **pobj, int *prog_fd)
2324{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002325 struct bpf_prog_load_attr attr;
2326
2327 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2328 attr.file = file;
2329 attr.prog_type = type;
2330 attr.expected_attach_type = 0;
2331
2332 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2333}
2334
2335int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2336 struct bpf_object **pobj, int *prog_fd)
2337{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002338 struct bpf_object_open_attr open_attr = {
2339 .file = attr->file,
2340 .prog_type = attr->prog_type,
2341 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002342 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002343 enum bpf_attach_type expected_attach_type;
2344 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002345 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002346 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002347 int err;
2348
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002349 if (!attr)
2350 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002351 if (!attr->file)
2352 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002353
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002354 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07002355 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002356 return -ENOENT;
2357
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002358 bpf_object__for_each_program(prog, obj) {
2359 /*
2360 * If type is not specified, try to guess it based on
2361 * section name.
2362 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002363 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002364 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002365 expected_attach_type = attr->expected_attach_type;
2366 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002367 err = bpf_program__identify_section(prog, &prog_type,
2368 &expected_attach_type);
2369 if (err < 0) {
2370 pr_warning("failed to guess program type based on section name %s\n",
2371 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002372 bpf_object__close(obj);
2373 return -EINVAL;
2374 }
2375 }
2376
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002377 bpf_program__set_type(prog, prog_type);
2378 bpf_program__set_expected_attach_type(prog,
2379 expected_attach_type);
2380
Taeung Song69495d22018-09-03 08:30:07 +09002381 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002382 first_prog = prog;
2383 }
2384
David Beckettf0307a72018-05-16 14:02:49 -07002385 bpf_map__for_each(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002386 if (!bpf_map__is_offload_neutral(map))
2387 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07002388 }
2389
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002390 if (!first_prog) {
2391 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002392 bpf_object__close(obj);
2393 return -ENOENT;
2394 }
2395
John Fastabend6f6d33f2017-08-15 22:34:22 -07002396 err = bpf_object__load(obj);
2397 if (err) {
2398 bpf_object__close(obj);
2399 return -EINVAL;
2400 }
2401
2402 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002403 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002404 return 0;
2405}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002406
2407enum bpf_perf_event_ret
2408bpf_perf_event_read_simple(void *mem, unsigned long size,
2409 unsigned long page_size, void **buf, size_t *buf_len,
2410 bpf_perf_event_print_t fn, void *priv)
2411{
2412 volatile struct perf_event_mmap_page *header = mem;
2413 __u64 data_tail = header->data_tail;
2414 __u64 data_head = header->data_head;
Thomas Richterb611da42018-07-27 10:21:26 +02002415 int ret = LIBBPF_PERF_EVENT_ERROR;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002416 void *base, *begin, *end;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002417
2418 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2419 if (data_head == data_tail)
2420 return LIBBPF_PERF_EVENT_CONT;
2421
2422 base = ((char *)header) + page_size;
2423
2424 begin = base + data_tail % size;
2425 end = base + data_head % size;
2426
2427 while (begin != end) {
2428 struct perf_event_header *ehdr;
2429
2430 ehdr = begin;
2431 if (begin + ehdr->size > base + size) {
2432 long len = base + size - begin;
2433
2434 if (*buf_len < ehdr->size) {
2435 free(*buf);
2436 *buf = malloc(ehdr->size);
2437 if (!*buf) {
2438 ret = LIBBPF_PERF_EVENT_ERROR;
2439 break;
2440 }
2441 *buf_len = ehdr->size;
2442 }
2443
2444 memcpy(*buf, begin, len);
2445 memcpy(*buf + len, base, ehdr->size - len);
2446 ehdr = (void *)*buf;
2447 begin = base + ehdr->size - len;
2448 } else if (begin + ehdr->size == base + size) {
2449 begin = base;
2450 } else {
2451 begin += ehdr->size;
2452 }
2453
2454 ret = fn(ehdr, priv);
2455 if (ret != LIBBPF_PERF_EVENT_CONT)
2456 break;
2457
2458 data_tail += ehdr->size;
2459 }
2460
2461 __sync_synchronize(); /* smp_mb() */
2462 header->data_tail = data_tail;
2463
2464 return ret;
2465}