blob: 97ce9f214002144519a6531d2880f5e015e5351a [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000010 */
11
Jakub Kicinski531b0142018-07-10 14:43:05 -070012#define _GNU_SOURCE
Wang Nan1b76c132015-07-01 02:13:51 +000013#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000014#include <stdio.h>
15#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080016#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000017#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000018#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000019#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000020#include <fcntl.h>
21#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080023#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000024#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000025#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070026#include <linux/btf.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000027#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080028#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070029#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020030#include <linux/ring_buffer.h>
Joe Stringerf3675402017-01-26 13:19:56 -080031#include <sys/stat.h>
32#include <sys/types.h>
33#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070034#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000035#include <libelf.h>
36#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000037
38#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000039#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070040#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030041#include "str_error.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000042
Wang Nan9b161372016-07-18 06:01:08 +000043#ifndef EM_BPF
44#define EM_BPF 247
45#endif
46
Joe Stringerf3675402017-01-26 13:19:56 -080047#ifndef BPF_FS_MAGIC
48#define BPF_FS_MAGIC 0xcafe4a11
49#endif
50
Wang Nanb3f59d62015-07-01 02:13:52 +000051#define __printf(a, b) __attribute__((format(printf, a, b)))
52
53__printf(1, 2)
54static int __base_pr(const char *format, ...)
55{
56 va_list args;
57 int err;
58
59 va_start(args, format);
60 err = vfprintf(stderr, format, args);
61 va_end(args);
62 return err;
63}
64
65static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
66static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
67static __printf(1, 2) libbpf_print_fn_t __pr_debug;
68
69#define __pr(func, fmt, ...) \
70do { \
71 if ((func)) \
72 (func)("libbpf: " fmt, ##__VA_ARGS__); \
73} while (0)
74
75#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
76#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
77#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
78
79void libbpf_set_print(libbpf_print_fn_t warn,
80 libbpf_print_fn_t info,
81 libbpf_print_fn_t debug)
82{
83 __pr_warning = warn;
84 __pr_info = info;
85 __pr_debug = debug;
86}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000087
Wang Nan6371ca3b2015-11-06 13:49:37 +000088#define STRERR_BUFSIZE 128
89
Wang Nan6371ca3b2015-11-06 13:49:37 +000090#define CHECK_ERR(action, err, out) do { \
91 err = action; \
92 if (err) \
93 goto out; \
94} while(0)
95
96
Wang Nan1a5e3fb2015-07-01 02:13:53 +000097/* Copied from tools/perf/util/util.h */
98#ifndef zfree
99# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
100#endif
101
102#ifndef zclose
103# define zclose(fd) ({ \
104 int ___err = 0; \
105 if ((fd) >= 0) \
106 ___err = close((fd)); \
107 fd = -1; \
108 ___err; })
109#endif
110
111#ifdef HAVE_LIBELF_MMAP_SUPPORT
112# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
113#else
114# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
115#endif
116
Wang Nana5b8bd42015-07-01 02:14:00 +0000117/*
118 * bpf_prog should be a better name but it has been used in
119 * linux/filter.h.
120 */
121struct bpf_program {
122 /* Index in elf obj file, for relocation use. */
123 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700124 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700125 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000126 char *section_name;
127 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800128 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000129 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000130
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800131 struct reloc_desc {
132 enum {
133 RELO_LD64,
134 RELO_CALL,
135 } type;
Wang Nan34090912015-07-01 02:14:02 +0000136 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800137 union {
138 int map_idx;
139 int text_off;
140 };
Wang Nan34090912015-07-01 02:14:02 +0000141 } *reloc_desc;
142 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000143
Wang Nanb5805632015-11-16 12:10:09 +0000144 struct {
145 int nr;
146 int *fds;
147 } instances;
148 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000149
150 struct bpf_object *obj;
151 void *priv;
152 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700153
154 enum bpf_attach_type expected_attach_type;
Wang Nana5b8bd42015-07-01 02:14:00 +0000155};
156
Wang Nan9d759a92015-11-27 08:47:35 +0000157struct bpf_map {
158 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000159 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000160 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700161 int map_ifindex;
Wang Nan9d759a92015-11-27 08:47:35 +0000162 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700163 __u32 btf_key_type_id;
164 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000165 void *priv;
166 bpf_map_clear_priv_t clear_priv;
167};
168
Wang Nan9a208ef2015-07-01 02:14:10 +0000169static LIST_HEAD(bpf_objects_list);
170
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000171struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000172 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700173 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000174
Wang Nana5b8bd42015-07-01 02:14:00 +0000175 struct bpf_program *programs;
176 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000177 struct bpf_map *maps;
178 size_t nr_maps;
179
Wang Nan52d33522015-07-01 02:14:04 +0000180 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700181 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000182
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000183 /*
184 * Information when doing elf related work. Only valid if fd
185 * is valid.
186 */
187 struct {
188 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000189 void *obj_buf;
190 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000191 Elf *elf;
192 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000193 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000194 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000195 struct {
196 GElf_Shdr shdr;
197 Elf_Data *data;
198 } *reloc;
199 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000200 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800201 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000202 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000203 /*
204 * All loaded bpf_object is linked in a list, which is
205 * hidden to caller. bpf_objects__<func> handlers deal with
206 * all objects.
207 */
208 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000209
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700210 struct btf *btf;
211
Wang Nan10931d22016-11-26 07:03:26 +0000212 void *priv;
213 bpf_object_clear_priv_t clear_priv;
214
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000215 char path[];
216};
217#define obj_elf_valid(o) ((o)->efile.elf)
218
Joe Stringer29cd77f2018-10-02 13:35:39 -0700219void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000220{
Wang Nanb5805632015-11-16 12:10:09 +0000221 int i;
222
Wang Nan55cffde2015-07-01 02:14:07 +0000223 if (!prog)
224 return;
225
Wang Nanb5805632015-11-16 12:10:09 +0000226 /*
227 * If the object is opened but the program was never loaded,
228 * it is possible that prog->instances.nr == -1.
229 */
230 if (prog->instances.nr > 0) {
231 for (i = 0; i < prog->instances.nr; i++)
232 zclose(prog->instances.fds[i]);
233 } else if (prog->instances.nr != -1) {
234 pr_warning("Internal error: instances.nr is %d\n",
235 prog->instances.nr);
236 }
237
238 prog->instances.nr = -1;
239 zfree(&prog->instances.fds);
Wang Nan55cffde2015-07-01 02:14:07 +0000240}
241
Wang Nana5b8bd42015-07-01 02:14:00 +0000242static void bpf_program__exit(struct bpf_program *prog)
243{
244 if (!prog)
245 return;
246
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000247 if (prog->clear_priv)
248 prog->clear_priv(prog, prog->priv);
249
250 prog->priv = NULL;
251 prog->clear_priv = NULL;
252
Wang Nan55cffde2015-07-01 02:14:07 +0000253 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700254 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000255 zfree(&prog->section_name);
256 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000257 zfree(&prog->reloc_desc);
258
259 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000260 prog->insns_cnt = 0;
261 prog->idx = -1;
262}
263
264static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700265bpf_program__init(void *data, size_t size, char *section_name, int idx,
266 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000267{
268 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700269 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000270 return -EINVAL;
271 }
272
273 bzero(prog, sizeof(*prog));
274
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700275 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000276 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100277 pr_warning("failed to alloc name for prog under section(%d) %s\n",
278 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000279 goto errout;
280 }
281
282 prog->insns = malloc(size);
283 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700284 pr_warning("failed to alloc insns for prog under section %s\n",
285 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000286 goto errout;
287 }
288 prog->insns_cnt = size / sizeof(struct bpf_insn);
289 memcpy(prog->insns, data,
290 prog->insns_cnt * sizeof(struct bpf_insn));
291 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000292 prog->instances.fds = NULL;
293 prog->instances.nr = -1;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000294 prog->type = BPF_PROG_TYPE_KPROBE;
Wang Nana5b8bd42015-07-01 02:14:00 +0000295
296 return 0;
297errout:
298 bpf_program__exit(prog);
299 return -ENOMEM;
300}
301
302static int
303bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700304 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000305{
306 struct bpf_program prog, *progs;
307 int nr_progs, err;
308
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700309 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000310 if (err)
311 return err;
312
313 progs = obj->programs;
314 nr_progs = obj->nr_programs;
315
Jakub Kicinski531b0142018-07-10 14:43:05 -0700316 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000317 if (!progs) {
318 /*
319 * In this case the original obj->programs
320 * is still valid, so don't need special treat for
321 * bpf_close_object().
322 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700323 pr_warning("failed to alloc a new program under section '%s'\n",
324 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000325 bpf_program__exit(&prog);
326 return -ENOMEM;
327 }
328
329 pr_debug("found program %s\n", prog.section_name);
330 obj->programs = progs;
331 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000332 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000333 progs[nr_progs] = prog;
334 return 0;
335}
336
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700337static int
338bpf_object__init_prog_names(struct bpf_object *obj)
339{
340 Elf_Data *symbols = obj->efile.symbols;
341 struct bpf_program *prog;
342 size_t pi, si;
343
344 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800345 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700346
347 prog = &obj->programs[pi];
348
349 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
350 si++) {
351 GElf_Sym sym;
352
353 if (!gelf_getsym(symbols, si, &sym))
354 continue;
355 if (sym.st_shndx != prog->idx)
356 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000357 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
358 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700359
360 name = elf_strptr(obj->efile.elf,
361 obj->efile.strtabidx,
362 sym.st_name);
363 if (!name) {
364 pr_warning("failed to get sym name string for prog %s\n",
365 prog->section_name);
366 return -LIBBPF_ERRNO__LIBELF;
367 }
368 }
369
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700370 if (!name && prog->idx == obj->efile.text_shndx)
371 name = ".text";
372
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700373 if (!name) {
374 pr_warning("failed to find sym for prog %s\n",
375 prog->section_name);
376 return -EINVAL;
377 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700378
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700379 prog->name = strdup(name);
380 if (!prog->name) {
381 pr_warning("failed to allocate memory for prog sym %s\n",
382 name);
383 return -ENOMEM;
384 }
385 }
386
387 return 0;
388}
389
Wang Nan6c956392015-07-01 02:13:54 +0000390static struct bpf_object *bpf_object__new(const char *path,
391 void *obj_buf,
392 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000393{
394 struct bpf_object *obj;
395
396 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
397 if (!obj) {
398 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000399 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000400 }
401
402 strcpy(obj->path, path);
403 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000404
405 /*
406 * Caller of this function should also calls
407 * bpf_object__elf_finish() after data collection to return
408 * obj_buf to user. If not, we should duplicate the buffer to
409 * avoid user freeing them before elf finish.
410 */
411 obj->efile.obj_buf = obj_buf;
412 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000413 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000414
Wang Nan52d33522015-07-01 02:14:04 +0000415 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000416
417 INIT_LIST_HEAD(&obj->list);
418 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000419 return obj;
420}
421
422static void bpf_object__elf_finish(struct bpf_object *obj)
423{
424 if (!obj_elf_valid(obj))
425 return;
426
427 if (obj->efile.elf) {
428 elf_end(obj->efile.elf);
429 obj->efile.elf = NULL;
430 }
Wang Nanbec7d682015-07-01 02:13:59 +0000431 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000432
433 zfree(&obj->efile.reloc);
434 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000435 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000436 obj->efile.obj_buf = NULL;
437 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000438}
439
440static int bpf_object__elf_init(struct bpf_object *obj)
441{
442 int err = 0;
443 GElf_Ehdr *ep;
444
445 if (obj_elf_valid(obj)) {
446 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000447 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000448 }
449
Wang Nan6c956392015-07-01 02:13:54 +0000450 if (obj->efile.obj_buf_sz > 0) {
451 /*
452 * obj_buf should have been validated by
453 * bpf_object__open_buffer().
454 */
455 obj->efile.elf = elf_memory(obj->efile.obj_buf,
456 obj->efile.obj_buf_sz);
457 } else {
458 obj->efile.fd = open(obj->path, O_RDONLY);
459 if (obj->efile.fd < 0) {
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200460 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700461 char *cp = libbpf_strerror_r(errno, errmsg,
462 sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200463
464 pr_warning("failed to open %s: %s\n", obj->path, cp);
Wang Nan6c956392015-07-01 02:13:54 +0000465 return -errno;
466 }
467
468 obj->efile.elf = elf_begin(obj->efile.fd,
469 LIBBPF_ELF_C_READ_MMAP,
470 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000471 }
472
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000473 if (!obj->efile.elf) {
474 pr_warning("failed to open %s as ELF file\n",
475 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000476 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000477 goto errout;
478 }
479
480 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
481 pr_warning("failed to get EHDR from %s\n",
482 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000483 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000484 goto errout;
485 }
486 ep = &obj->efile.ehdr;
487
Wang Nan9b161372016-07-18 06:01:08 +0000488 /* Old LLVM set e_machine to EM_NONE */
489 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000490 pr_warning("%s is not an eBPF object file\n",
491 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000492 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000493 goto errout;
494 }
495
496 return 0;
497errout:
498 bpf_object__elf_finish(obj);
499 return err;
500}
501
Wang Nancc4228d2015-07-01 02:13:55 +0000502static int
503bpf_object__check_endianness(struct bpf_object *obj)
504{
505 static unsigned int const endian = 1;
506
507 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
508 case ELFDATA2LSB:
509 /* We are big endian, BPF obj is little endian. */
510 if (*(unsigned char const *)&endian != 1)
511 goto mismatch;
512 break;
513
514 case ELFDATA2MSB:
515 /* We are little endian, BPF obj is big endian. */
516 if (*(unsigned char const *)&endian != 0)
517 goto mismatch;
518 break;
519 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000520 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000521 }
522
523 return 0;
524
525mismatch:
526 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000527 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000528}
529
Wang Nancb1e5e92015-07-01 02:13:57 +0000530static int
531bpf_object__init_license(struct bpf_object *obj,
532 void *data, size_t size)
533{
534 memcpy(obj->license, data,
535 min(size, sizeof(obj->license) - 1));
536 pr_debug("license of %s is %s\n", obj->path, obj->license);
537 return 0;
538}
539
540static int
541bpf_object__init_kversion(struct bpf_object *obj,
542 void *data, size_t size)
543{
Yonghong Song438363c2018-10-09 16:14:47 -0700544 __u32 kver;
Wang Nancb1e5e92015-07-01 02:13:57 +0000545
546 if (size != sizeof(kver)) {
547 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000548 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000549 }
550 memcpy(&kver, data, sizeof(kver));
551 obj->kern_version = kver;
552 pr_debug("kernel version of %s is %x\n", obj->path,
553 obj->kern_version);
554 return 0;
555}
556
Eric Leblond4708bbd2016-11-15 04:05:47 +0000557static int compare_bpf_map(const void *_a, const void *_b)
558{
559 const struct bpf_map *a = _a;
560 const struct bpf_map *b = _b;
561
562 return a->offset - b->offset;
563}
564
565static int
John Fastabendc034a172018-10-15 11:19:55 -0700566bpf_object__init_maps(struct bpf_object *obj, int flags)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000567{
John Fastabendc034a172018-10-15 11:19:55 -0700568 bool strict = !(flags & MAPS_RELAX_COMPAT);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400569 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000570 Elf_Scn *scn;
571 Elf_Data *data;
572 Elf_Data *symbols = obj->efile.symbols;
573
574 if (obj->efile.maps_shndx < 0)
575 return -EINVAL;
576 if (!symbols)
577 return -EINVAL;
578
579 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
580 if (scn)
581 data = elf_getdata(scn, NULL);
582 if (!scn || !data) {
583 pr_warning("failed to get Elf_Data from map section %d\n",
584 obj->efile.maps_shndx);
585 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000586 }
587
Eric Leblond4708bbd2016-11-15 04:05:47 +0000588 /*
589 * Count number of maps. Each map has a name.
590 * Array of maps is not supported: only the first element is
591 * considered.
592 *
593 * TODO: Detect array of map and report error.
594 */
595 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
596 GElf_Sym sym;
597
598 if (!gelf_getsym(symbols, i, &sym))
599 continue;
600 if (sym.st_shndx != obj->efile.maps_shndx)
601 continue;
602 nr_maps++;
603 }
604
605 /* Alloc obj->maps and fill nr_maps. */
606 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
607 nr_maps, data->d_size);
608
609 if (!nr_maps)
610 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000611
Craig Gallekb13c5c12017-10-05 10:41:57 -0400612 /* Assume equally sized map definitions */
613 map_def_sz = data->d_size / nr_maps;
614 if (!data->d_size || (data->d_size % nr_maps) != 0) {
615 pr_warning("unable to determine map definition size "
616 "section %s, %d maps in %zd bytes\n",
617 obj->path, nr_maps, data->d_size);
618 return -EINVAL;
619 }
620
Wang Nan9d759a92015-11-27 08:47:35 +0000621 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
622 if (!obj->maps) {
623 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000624 return -ENOMEM;
625 }
Wang Nan9d759a92015-11-27 08:47:35 +0000626 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000627
Eric Leblond4708bbd2016-11-15 04:05:47 +0000628 /*
629 * fill all fd with -1 so won't close incorrect
630 * fd (fd=0 is stdin) when failure (zclose won't close
631 * negative fd)).
632 */
633 for (i = 0; i < nr_maps; i++)
Wang Nan9d759a92015-11-27 08:47:35 +0000634 obj->maps[i].fd = -1;
635
Eric Leblond4708bbd2016-11-15 04:05:47 +0000636 /*
637 * Fill obj->maps using data in "maps" section.
638 */
639 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000640 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000641 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000642 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000643
644 if (!gelf_getsym(symbols, i, &sym))
645 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000646 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000647 continue;
648
649 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000650 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000651 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000652 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400653 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000654 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
655 obj->path, map_name);
656 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000657 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000658
Wang Nan561bbcc2015-11-27 08:47:36 +0000659 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000660 if (!obj->maps[map_idx].name) {
661 pr_warning("failed to alloc map name\n");
662 return -ENOMEM;
663 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000664 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000665 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000666 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400667 /*
668 * If the definition of the map in the object file fits in
669 * bpf_map_def, copy it. Any extra fields in our version
670 * of bpf_map_def will default to zero as a result of the
671 * calloc above.
672 */
673 if (map_def_sz <= sizeof(struct bpf_map_def)) {
674 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
675 } else {
676 /*
677 * Here the map structure being read is bigger than what
678 * we expect, truncate if the excess bits are all zero.
679 * If they are not zero, reject this map as
680 * incompatible.
681 */
682 char *b;
683 for (b = ((char *)def) + sizeof(struct bpf_map_def);
684 b < ((char *)def) + map_def_sz; b++) {
685 if (*b != 0) {
686 pr_warning("maps section in %s: \"%s\" "
687 "has unrecognized, non-zero "
688 "options\n",
689 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -0700690 if (strict)
691 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400692 }
693 }
694 memcpy(&obj->maps[map_idx].def, def,
695 sizeof(struct bpf_map_def));
696 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000697 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000698 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000699
700 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400701 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000702}
703
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100704static bool section_have_execinstr(struct bpf_object *obj, int idx)
705{
706 Elf_Scn *scn;
707 GElf_Shdr sh;
708
709 scn = elf_getscn(obj->efile.elf, idx);
710 if (!scn)
711 return false;
712
713 if (gelf_getshdr(scn, &sh) != &sh)
714 return false;
715
716 if (sh.sh_flags & SHF_EXECINSTR)
717 return true;
718
719 return false;
720}
721
John Fastabendc034a172018-10-15 11:19:55 -0700722static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
Wang Nan29603662015-07-01 02:13:56 +0000723{
724 Elf *elf = obj->efile.elf;
725 GElf_Ehdr *ep = &obj->efile.ehdr;
726 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000727 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000728
729 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
730 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
731 pr_warning("failed to get e_shstrndx from %s\n",
732 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000733 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000734 }
735
736 while ((scn = elf_nextscn(elf, scn)) != NULL) {
737 char *name;
738 GElf_Shdr sh;
739 Elf_Data *data;
740
741 idx++;
742 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100743 pr_warning("failed to get section(%d) header from %s\n",
744 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000745 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000746 goto out;
747 }
748
749 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
750 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100751 pr_warning("failed to get section(%d) name from %s\n",
752 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000753 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000754 goto out;
755 }
756
757 data = elf_getdata(scn, 0);
758 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100759 pr_warning("failed to get section(%d) data from %s(%s)\n",
760 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000761 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000762 goto out;
763 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100764 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
765 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000766 (int)sh.sh_link, (unsigned long)sh.sh_flags,
767 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000768
769 if (strcmp(name, "license") == 0)
770 err = bpf_object__init_license(obj,
771 data->d_buf,
772 data->d_size);
773 else if (strcmp(name, "version") == 0)
774 err = bpf_object__init_kversion(obj,
775 data->d_buf,
776 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000777 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000778 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700779 else if (strcmp(name, BTF_ELF_SEC) == 0) {
780 obj->btf = btf__new(data->d_buf, data->d_size,
781 __pr_debug);
782 if (IS_ERR(obj->btf)) {
783 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
784 BTF_ELF_SEC, PTR_ERR(obj->btf));
785 obj->btf = NULL;
786 }
787 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000788 if (obj->efile.symbols) {
789 pr_warning("bpf: multiple SYMTAB in %s\n",
790 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000791 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000792 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000793 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000794 obj->efile.strtabidx = sh.sh_link;
795 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000796 } else if ((sh.sh_type == SHT_PROGBITS) &&
797 (sh.sh_flags & SHF_EXECINSTR) &&
798 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800799 if (strcmp(name, ".text") == 0)
800 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000801 err = bpf_object__add_program(obj, data->d_buf,
802 data->d_size, name, idx);
803 if (err) {
Wang Nan6371ca3b2015-11-06 13:49:37 +0000804 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700805 char *cp = libbpf_strerror_r(-err, errmsg,
806 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000807
Wang Nana5b8bd42015-07-01 02:14:00 +0000808 pr_warning("failed to alloc program %s (%s): %s",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200809 name, obj->path, cp);
Wang Nana5b8bd42015-07-01 02:14:00 +0000810 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000811 } else if (sh.sh_type == SHT_REL) {
812 void *reloc = obj->efile.reloc;
813 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100814 int sec = sh.sh_info; /* points to other section */
815
816 /* Only do relo for section with exec instructions */
817 if (!section_have_execinstr(obj, sec)) {
818 pr_debug("skip relo %s(%d) for section(%d)\n",
819 name, idx, sec);
820 continue;
821 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000822
Jakub Kicinski531b0142018-07-10 14:43:05 -0700823 reloc = reallocarray(reloc, nr_reloc,
824 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +0000825 if (!reloc) {
826 pr_warning("realloc failed\n");
827 err = -ENOMEM;
828 } else {
829 int n = nr_reloc - 1;
830
831 obj->efile.reloc = reloc;
832 obj->efile.nr_reloc = nr_reloc;
833
834 obj->efile.reloc[n].shdr = sh;
835 obj->efile.reloc[n].data = data;
836 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100837 } else {
838 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000839 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000840 if (err)
841 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000842 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000843
Wang Nan77ba9a52015-12-08 02:25:30 +0000844 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
845 pr_warning("Corrupted ELF file: index of strtab invalid\n");
846 return LIBBPF_ERRNO__FORMAT;
847 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700848 if (obj->efile.maps_shndx >= 0) {
John Fastabendc034a172018-10-15 11:19:55 -0700849 err = bpf_object__init_maps(obj, flags);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700850 if (err)
851 goto out;
852 }
853 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000854out:
855 return err;
856}
857
Wang Nan34090912015-07-01 02:14:02 +0000858static struct bpf_program *
859bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
860{
861 struct bpf_program *prog;
862 size_t i;
863
864 for (i = 0; i < obj->nr_programs; i++) {
865 prog = &obj->programs[i];
866 if (prog->idx == idx)
867 return prog;
868 }
869 return NULL;
870}
871
Jakub Kicinski6d4b1982018-07-26 14:32:19 -0700872struct bpf_program *
873bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
874{
875 struct bpf_program *pos;
876
877 bpf_object__for_each_program(pos, obj) {
878 if (pos->section_name && !strcmp(pos->section_name, title))
879 return pos;
880 }
881 return NULL;
882}
883
Wang Nan34090912015-07-01 02:14:02 +0000884static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800885bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
886 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000887{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800888 Elf_Data *symbols = obj->efile.symbols;
889 int text_shndx = obj->efile.text_shndx;
890 int maps_shndx = obj->efile.maps_shndx;
891 struct bpf_map *maps = obj->maps;
892 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000893 int i, nrels;
894
895 pr_debug("collecting relocating info for: '%s'\n",
896 prog->section_name);
897 nrels = shdr->sh_size / shdr->sh_entsize;
898
899 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
900 if (!prog->reloc_desc) {
901 pr_warning("failed to alloc memory in relocation\n");
902 return -ENOMEM;
903 }
904 prog->nr_reloc = nrels;
905
906 for (i = 0; i < nrels; i++) {
907 GElf_Sym sym;
908 GElf_Rel rel;
909 unsigned int insn_idx;
910 struct bpf_insn *insns = prog->insns;
911 size_t map_idx;
912
913 if (!gelf_getrel(data, i, &rel)) {
914 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000915 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000916 }
917
Wang Nan34090912015-07-01 02:14:02 +0000918 if (!gelf_getsym(symbols,
919 GELF_R_SYM(rel.r_info),
920 &sym)) {
921 pr_warning("relocation: symbol %"PRIx64" not found\n",
922 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000923 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000924 }
David Miller7d9890e2017-12-19 15:53:11 -0500925 pr_debug("relo for %lld value %lld name %d\n",
926 (long long) (rel.r_info >> 32),
927 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +0000928
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800929 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +0000930 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
931 prog->section_name, sym.st_shndx);
932 return -LIBBPF_ERRNO__RELOC;
933 }
934
935 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
936 pr_debug("relocation: insn_idx=%u\n", insn_idx);
937
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800938 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
939 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
940 pr_warning("incorrect bpf_call opcode\n");
941 return -LIBBPF_ERRNO__RELOC;
942 }
943 prog->reloc_desc[i].type = RELO_CALL;
944 prog->reloc_desc[i].insn_idx = insn_idx;
945 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700946 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800947 continue;
948 }
949
Wang Nan34090912015-07-01 02:14:02 +0000950 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
951 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
952 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000953 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000954 }
955
Joe Stringer94e5ade2017-01-22 17:11:22 -0800956 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
957 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
958 if (maps[map_idx].offset == sym.st_value) {
959 pr_debug("relocation: find map %zd (%s) for insn %u\n",
960 map_idx, maps[map_idx].name, insn_idx);
961 break;
962 }
963 }
964
Wang Nan34090912015-07-01 02:14:02 +0000965 if (map_idx >= nr_maps) {
966 pr_warning("bpf relocation: map_idx %d large than %d\n",
967 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000968 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000969 }
970
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800971 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +0000972 prog->reloc_desc[i].insn_idx = insn_idx;
973 prog->reloc_desc[i].map_idx = map_idx;
974 }
975 return 0;
976}
977
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700978static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
979{
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700980 const struct btf_type *container_type;
981 const struct btf_member *key, *value;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700982 struct bpf_map_def *def = &map->def;
983 const size_t max_name = 256;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700984 char container_name[max_name];
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700985 __s64 key_size, value_size;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700986 __s32 container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700987
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700988 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
989 max_name) {
990 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700991 map->name, map->name);
992 return -EINVAL;
993 }
994
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -0700995 container_id = btf__find_by_name(btf, container_name);
996 if (container_id < 0) {
997 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
998 map->name, container_name);
999 return container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001000 }
1001
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001002 container_type = btf__type_by_id(btf, container_id);
1003 if (!container_type) {
1004 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1005 map->name, container_id);
1006 return -EINVAL;
1007 }
1008
1009 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1010 BTF_INFO_VLEN(container_type->info) < 2) {
1011 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1012 map->name, container_name);
1013 return -EINVAL;
1014 }
1015
1016 key = (struct btf_member *)(container_type + 1);
1017 value = key + 1;
1018
1019 key_size = btf__resolve_size(btf, key->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001020 if (key_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001021 pr_warning("map:%s invalid BTF key_type_size\n",
1022 map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001023 return key_size;
1024 }
1025
1026 if (def->key_size != key_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001027 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1028 map->name, (__u32)key_size, def->key_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001029 return -EINVAL;
1030 }
1031
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001032 value_size = btf__resolve_size(btf, value->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001033 if (value_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001034 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001035 return value_size;
1036 }
1037
1038 if (def->value_size != value_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001039 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1040 map->name, (__u32)value_size, def->value_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001041 return -EINVAL;
1042 }
1043
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001044 map->btf_key_type_id = key->type;
1045 map->btf_value_type_id = value->type;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001046
1047 return 0;
1048}
1049
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001050int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1051{
1052 struct bpf_map_info info = {};
1053 __u32 len = sizeof(info);
1054 int new_fd, err;
1055 char *new_name;
1056
1057 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1058 if (err)
1059 return err;
1060
1061 new_name = strdup(info.name);
1062 if (!new_name)
1063 return -errno;
1064
1065 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1066 if (new_fd < 0)
1067 goto err_free_new_name;
1068
1069 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1070 if (new_fd < 0)
1071 goto err_close_new_fd;
1072
1073 err = zclose(map->fd);
1074 if (err)
1075 goto err_close_new_fd;
1076 free(map->name);
1077
1078 map->fd = new_fd;
1079 map->name = new_name;
1080 map->def.type = info.type;
1081 map->def.key_size = info.key_size;
1082 map->def.value_size = info.value_size;
1083 map->def.max_entries = info.max_entries;
1084 map->def.map_flags = info.map_flags;
1085 map->btf_key_type_id = info.btf_key_type_id;
1086 map->btf_value_type_id = info.btf_value_type_id;
1087
1088 return 0;
1089
1090err_close_new_fd:
1091 close(new_fd);
1092err_free_new_name:
1093 free(new_name);
1094 return -errno;
1095}
1096
Wang Nan52d33522015-07-01 02:14:04 +00001097static int
1098bpf_object__create_maps(struct bpf_object *obj)
1099{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001100 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001101 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001102 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001103
Wang Nan9d759a92015-11-27 08:47:35 +00001104 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001105 struct bpf_map *map = &obj->maps[i];
1106 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001107 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001108 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001109
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001110 if (map->fd >= 0) {
1111 pr_debug("skip map create (preset) %s: fd=%d\n",
1112 map->name, map->fd);
1113 continue;
1114 }
1115
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001116 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001117 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001118 create_attr.map_type = def->type;
1119 create_attr.map_flags = def->map_flags;
1120 create_attr.key_size = def->key_size;
1121 create_attr.value_size = def->value_size;
1122 create_attr.max_entries = def->max_entries;
1123 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001124 create_attr.btf_key_type_id = 0;
1125 create_attr.btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001126
1127 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1128 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001129 create_attr.btf_key_type_id = map->btf_key_type_id;
1130 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001131 }
1132
1133 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001134 if (*pfd < 0 && create_attr.btf_key_type_id) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001135 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001136 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001137 map->name, cp, errno);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001138 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001139 create_attr.btf_key_type_id = 0;
1140 create_attr.btf_value_type_id = 0;
1141 map->btf_key_type_id = 0;
1142 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001143 *pfd = bpf_create_map_xattr(&create_attr);
1144 }
1145
Wang Nan52d33522015-07-01 02:14:04 +00001146 if (*pfd < 0) {
1147 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001148
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001149 err = *pfd;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001150 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Eric Leblond49bf4b32017-08-20 21:48:14 +02001151 pr_warning("failed to create map (name: '%s'): %s\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001152 map->name, cp);
Wang Nan52d33522015-07-01 02:14:04 +00001153 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001154 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001155 return err;
1156 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001157 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001158 }
1159
Wang Nan52d33522015-07-01 02:14:04 +00001160 return 0;
1161}
1162
Wang Nan8a47a6c2015-07-01 02:14:05 +00001163static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001164bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1165 struct reloc_desc *relo)
1166{
1167 struct bpf_insn *insn, *new_insn;
1168 struct bpf_program *text;
1169 size_t new_cnt;
1170
1171 if (relo->type != RELO_CALL)
1172 return -LIBBPF_ERRNO__RELOC;
1173
1174 if (prog->idx == obj->efile.text_shndx) {
1175 pr_warning("relo in .text insn %d into off %d\n",
1176 relo->insn_idx, relo->text_off);
1177 return -LIBBPF_ERRNO__RELOC;
1178 }
1179
1180 if (prog->main_prog_cnt == 0) {
1181 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1182 if (!text) {
1183 pr_warning("no .text section found yet relo into text exist\n");
1184 return -LIBBPF_ERRNO__RELOC;
1185 }
1186 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001187 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001188 if (!new_insn) {
1189 pr_warning("oom in prog realloc\n");
1190 return -ENOMEM;
1191 }
1192 memcpy(new_insn + prog->insns_cnt, text->insns,
1193 text->insns_cnt * sizeof(*insn));
1194 prog->insns = new_insn;
1195 prog->main_prog_cnt = prog->insns_cnt;
1196 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001197 pr_debug("added %zd insn from %s to prog %s\n",
1198 text->insns_cnt, text->section_name,
1199 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001200 }
1201 insn = &prog->insns[relo->insn_idx];
1202 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001203 return 0;
1204}
1205
1206static int
Wang Nan9d759a92015-11-27 08:47:35 +00001207bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001208{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001209 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001210
1211 if (!prog || !prog->reloc_desc)
1212 return 0;
1213
1214 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001215 if (prog->reloc_desc[i].type == RELO_LD64) {
1216 struct bpf_insn *insns = prog->insns;
1217 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001218
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001219 insn_idx = prog->reloc_desc[i].insn_idx;
1220 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001221
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001222 if (insn_idx >= (int)prog->insns_cnt) {
1223 pr_warning("relocation out of range: '%s'\n",
1224 prog->section_name);
1225 return -LIBBPF_ERRNO__RELOC;
1226 }
1227 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1228 insns[insn_idx].imm = obj->maps[map_idx].fd;
1229 } else {
1230 err = bpf_program__reloc_text(prog, obj,
1231 &prog->reloc_desc[i]);
1232 if (err)
1233 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001234 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001235 }
1236
1237 zfree(&prog->reloc_desc);
1238 prog->nr_reloc = 0;
1239 return 0;
1240}
1241
1242
1243static int
1244bpf_object__relocate(struct bpf_object *obj)
1245{
1246 struct bpf_program *prog;
1247 size_t i;
1248 int err;
1249
1250 for (i = 0; i < obj->nr_programs; i++) {
1251 prog = &obj->programs[i];
1252
Wang Nan9d759a92015-11-27 08:47:35 +00001253 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001254 if (err) {
1255 pr_warning("failed to relocate '%s'\n",
1256 prog->section_name);
1257 return err;
1258 }
1259 }
1260 return 0;
1261}
1262
Wang Nan34090912015-07-01 02:14:02 +00001263static int bpf_object__collect_reloc(struct bpf_object *obj)
1264{
1265 int i, err;
1266
1267 if (!obj_elf_valid(obj)) {
1268 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001269 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001270 }
1271
1272 for (i = 0; i < obj->efile.nr_reloc; i++) {
1273 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1274 Elf_Data *data = obj->efile.reloc[i].data;
1275 int idx = shdr->sh_info;
1276 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001277
1278 if (shdr->sh_type != SHT_REL) {
1279 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001280 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001281 }
1282
1283 prog = bpf_object__find_prog_by_idx(obj, idx);
1284 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001285 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001286 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001287 }
1288
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001289 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001290 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001291 obj);
Wang Nan34090912015-07-01 02:14:02 +00001292 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001293 return err;
Wang Nan34090912015-07-01 02:14:02 +00001294 }
1295 return 0;
1296}
1297
Wang Nan55cffde2015-07-01 02:14:07 +00001298static int
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001299load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1300 const char *name, struct bpf_insn *insns, int insns_cnt,
Yonghong Song438363c2018-10-09 16:14:47 -07001301 char *license, __u32 kern_version, int *pfd, int prog_ifindex)
Wang Nan55cffde2015-07-01 02:14:07 +00001302{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001303 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001304 char *cp, errmsg[STRERR_BUFSIZE];
Wang Nan55cffde2015-07-01 02:14:07 +00001305 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001306 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001307
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001308 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1309 load_attr.prog_type = type;
1310 load_attr.expected_attach_type = expected_attach_type;
1311 load_attr.name = name;
1312 load_attr.insns = insns;
1313 load_attr.insns_cnt = insns_cnt;
1314 load_attr.license = license;
1315 load_attr.kern_version = kern_version;
David Beckettf0307a72018-05-16 14:02:49 -07001316 load_attr.prog_ifindex = prog_ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001317
1318 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001319 return -EINVAL;
1320
1321 log_buf = malloc(BPF_LOG_BUF_SIZE);
1322 if (!log_buf)
1323 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1324
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001325 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001326
1327 if (ret >= 0) {
1328 *pfd = ret;
1329 ret = 0;
1330 goto out;
1331 }
1332
Wang Nan6371ca3b2015-11-06 13:49:37 +00001333 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001334 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001335 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00001336
Wang Nan6371ca3b2015-11-06 13:49:37 +00001337 if (log_buf && log_buf[0] != '\0') {
1338 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001339 pr_warning("-- BEGIN DUMP LOG ---\n");
1340 pr_warning("\n%s\n", log_buf);
1341 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001342 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1343 pr_warning("Program too large (%zu insns), at most %d insns\n",
1344 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001345 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001346 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001347 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001348 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001349 int fd;
1350
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001351 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1352 load_attr.expected_attach_type = 0;
1353 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001354 if (fd >= 0) {
1355 close(fd);
1356 ret = -LIBBPF_ERRNO__PROGTYPE;
1357 goto out;
1358 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001359 }
Wang Nan705fa212016-07-13 10:44:02 +00001360
1361 if (log_buf)
1362 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001363 }
1364
1365out:
1366 free(log_buf);
1367 return ret;
1368}
1369
Joe Stringer29cd77f2018-10-02 13:35:39 -07001370int
Wang Nan55cffde2015-07-01 02:14:07 +00001371bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07001372 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00001373{
Wang Nanb5805632015-11-16 12:10:09 +00001374 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001375
Wang Nanb5805632015-11-16 12:10:09 +00001376 if (prog->instances.nr < 0 || !prog->instances.fds) {
1377 if (prog->preprocessor) {
1378 pr_warning("Internal error: can't load program '%s'\n",
1379 prog->section_name);
1380 return -LIBBPF_ERRNO__INTERNAL;
1381 }
Wang Nan55cffde2015-07-01 02:14:07 +00001382
Wang Nanb5805632015-11-16 12:10:09 +00001383 prog->instances.fds = malloc(sizeof(int));
1384 if (!prog->instances.fds) {
1385 pr_warning("Not enough memory for BPF fds\n");
1386 return -ENOMEM;
1387 }
1388 prog->instances.nr = 1;
1389 prog->instances.fds[0] = -1;
1390 }
1391
1392 if (!prog->preprocessor) {
1393 if (prog->instances.nr != 1) {
1394 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1395 prog->section_name, prog->instances.nr);
1396 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001397 err = load_program(prog->type, prog->expected_attach_type,
1398 prog->name, prog->insns, prog->insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001399 license, kern_version, &fd,
1400 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001401 if (!err)
1402 prog->instances.fds[0] = fd;
1403 goto out;
1404 }
1405
1406 for (i = 0; i < prog->instances.nr; i++) {
1407 struct bpf_prog_prep_result result;
1408 bpf_program_prep_t preprocessor = prog->preprocessor;
1409
1410 bzero(&result, sizeof(result));
1411 err = preprocessor(prog, i, prog->insns,
1412 prog->insns_cnt, &result);
1413 if (err) {
1414 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1415 i, prog->section_name);
1416 goto out;
1417 }
1418
1419 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1420 pr_debug("Skip loading the %dth instance of program '%s'\n",
1421 i, prog->section_name);
1422 prog->instances.fds[i] = -1;
1423 if (result.pfd)
1424 *result.pfd = -1;
1425 continue;
1426 }
1427
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001428 err = load_program(prog->type, prog->expected_attach_type,
1429 prog->name, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001430 result.new_insn_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001431 license, kern_version, &fd,
1432 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001433
1434 if (err) {
1435 pr_warning("Loading the %dth instance of program '%s' failed\n",
1436 i, prog->section_name);
1437 goto out;
1438 }
1439
1440 if (result.pfd)
1441 *result.pfd = fd;
1442 prog->instances.fds[i] = fd;
1443 }
1444out:
Wang Nan55cffde2015-07-01 02:14:07 +00001445 if (err)
1446 pr_warning("failed to load program '%s'\n",
1447 prog->section_name);
1448 zfree(&prog->insns);
1449 prog->insns_cnt = 0;
1450 return err;
1451}
1452
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001453static bool bpf_program__is_function_storage(struct bpf_program *prog,
1454 struct bpf_object *obj)
1455{
1456 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1457}
1458
Wang Nan55cffde2015-07-01 02:14:07 +00001459static int
1460bpf_object__load_progs(struct bpf_object *obj)
1461{
1462 size_t i;
1463 int err;
1464
1465 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001466 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001467 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001468 err = bpf_program__load(&obj->programs[i],
1469 obj->license,
1470 obj->kern_version);
1471 if (err)
1472 return err;
1473 }
1474 return 0;
1475}
1476
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001477static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001478{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001479 switch (type) {
1480 case BPF_PROG_TYPE_SOCKET_FILTER:
1481 case BPF_PROG_TYPE_SCHED_CLS:
1482 case BPF_PROG_TYPE_SCHED_ACT:
1483 case BPF_PROG_TYPE_XDP:
1484 case BPF_PROG_TYPE_CGROUP_SKB:
1485 case BPF_PROG_TYPE_CGROUP_SOCK:
1486 case BPF_PROG_TYPE_LWT_IN:
1487 case BPF_PROG_TYPE_LWT_OUT:
1488 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001489 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001490 case BPF_PROG_TYPE_SOCK_OPS:
1491 case BPF_PROG_TYPE_SK_SKB:
1492 case BPF_PROG_TYPE_CGROUP_DEVICE:
1493 case BPF_PROG_TYPE_SK_MSG:
1494 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001495 case BPF_PROG_TYPE_LIRC_MODE2:
Martin KaFai Lau6bc8529c2018-08-08 01:01:30 -07001496 case BPF_PROG_TYPE_SK_REUSEPORT:
Petar Penkovc22fbae2018-09-14 07:46:20 -07001497 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001498 return false;
1499 case BPF_PROG_TYPE_UNSPEC:
1500 case BPF_PROG_TYPE_KPROBE:
1501 case BPF_PROG_TYPE_TRACEPOINT:
1502 case BPF_PROG_TYPE_PERF_EVENT:
1503 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1504 default:
1505 return true;
1506 }
1507}
1508
1509static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1510{
1511 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001512 pr_warning("%s doesn't provide kernel version\n",
1513 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001514 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001515 }
1516 return 0;
1517}
1518
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001519static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001520__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
John Fastabendc034a172018-10-15 11:19:55 -07001521 bool needs_kver, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001522{
1523 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001524 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001525
1526 if (elf_version(EV_CURRENT) == EV_NONE) {
1527 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001528 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001529 }
1530
Wang Nan6c956392015-07-01 02:13:54 +00001531 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001532 if (IS_ERR(obj))
1533 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001534
Wang Nan6371ca3b2015-11-06 13:49:37 +00001535 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1536 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
John Fastabendc034a172018-10-15 11:19:55 -07001537 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001538 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001539 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001540
1541 bpf_object__elf_finish(obj);
1542 return obj;
1543out:
1544 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001545 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001546}
1547
John Fastabendc034a172018-10-15 11:19:55 -07001548struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1549 int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001550{
1551 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001552 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001553 return NULL;
1554
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001555 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001556
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001557 return __bpf_object__open(attr->file, NULL, 0,
John Fastabendc034a172018-10-15 11:19:55 -07001558 bpf_prog_type__needs_kver(attr->prog_type),
1559 flags);
1560}
1561
1562struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1563{
1564 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001565}
1566
1567struct bpf_object *bpf_object__open(const char *path)
1568{
1569 struct bpf_object_open_attr attr = {
1570 .file = path,
1571 .prog_type = BPF_PROG_TYPE_UNSPEC,
1572 };
1573
1574 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00001575}
1576
1577struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001578 size_t obj_buf_sz,
1579 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001580{
Wang Nanacf860a2015-08-27 02:30:55 +00001581 char tmp_name[64];
1582
Wang Nan6c956392015-07-01 02:13:54 +00001583 /* param validation */
1584 if (!obj_buf || obj_buf_sz <= 0)
1585 return NULL;
1586
Wang Nanacf860a2015-08-27 02:30:55 +00001587 if (!name) {
1588 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1589 (unsigned long)obj_buf,
1590 (unsigned long)obj_buf_sz);
1591 tmp_name[sizeof(tmp_name) - 1] = '\0';
1592 name = tmp_name;
1593 }
1594 pr_debug("loading object '%s' from buffer\n",
1595 name);
Wang Nan6c956392015-07-01 02:13:54 +00001596
John Fastabendc034a172018-10-15 11:19:55 -07001597 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001598}
1599
Wang Nan52d33522015-07-01 02:14:04 +00001600int bpf_object__unload(struct bpf_object *obj)
1601{
1602 size_t i;
1603
1604 if (!obj)
1605 return -EINVAL;
1606
Wang Nan9d759a92015-11-27 08:47:35 +00001607 for (i = 0; i < obj->nr_maps; i++)
1608 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001609
Wang Nan55cffde2015-07-01 02:14:07 +00001610 for (i = 0; i < obj->nr_programs; i++)
1611 bpf_program__unload(&obj->programs[i]);
1612
Wang Nan52d33522015-07-01 02:14:04 +00001613 return 0;
1614}
1615
1616int bpf_object__load(struct bpf_object *obj)
1617{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001618 int err;
1619
Wang Nan52d33522015-07-01 02:14:04 +00001620 if (!obj)
1621 return -EINVAL;
1622
1623 if (obj->loaded) {
1624 pr_warning("object should not be loaded twice\n");
1625 return -EINVAL;
1626 }
1627
1628 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001629
1630 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1631 CHECK_ERR(bpf_object__relocate(obj), err, out);
1632 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001633
1634 return 0;
1635out:
1636 bpf_object__unload(obj);
1637 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001638 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001639}
1640
Joe Stringerf3675402017-01-26 13:19:56 -08001641static int check_path(const char *path)
1642{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001643 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001644 struct statfs st_fs;
1645 char *dname, *dir;
1646 int err = 0;
1647
1648 if (path == NULL)
1649 return -EINVAL;
1650
1651 dname = strdup(path);
1652 if (dname == NULL)
1653 return -ENOMEM;
1654
1655 dir = dirname(dname);
1656 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001657 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001658 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001659 err = -errno;
1660 }
1661 free(dname);
1662
1663 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1664 pr_warning("specified path %s is not on BPF FS\n", path);
1665 err = -EINVAL;
1666 }
1667
1668 return err;
1669}
1670
1671int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1672 int instance)
1673{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001674 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001675 int err;
1676
1677 err = check_path(path);
1678 if (err)
1679 return err;
1680
1681 if (prog == NULL) {
1682 pr_warning("invalid program pointer\n");
1683 return -EINVAL;
1684 }
1685
1686 if (instance < 0 || instance >= prog->instances.nr) {
1687 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1688 instance, prog->section_name, prog->instances.nr);
1689 return -EINVAL;
1690 }
1691
1692 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001693 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001694 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001695 return -errno;
1696 }
1697 pr_debug("pinned program '%s'\n", path);
1698
1699 return 0;
1700}
1701
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001702int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1703 int instance)
1704{
1705 int err;
1706
1707 err = check_path(path);
1708 if (err)
1709 return err;
1710
1711 if (prog == NULL) {
1712 pr_warning("invalid program pointer\n");
1713 return -EINVAL;
1714 }
1715
1716 if (instance < 0 || instance >= prog->instances.nr) {
1717 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1718 instance, prog->section_name, prog->instances.nr);
1719 return -EINVAL;
1720 }
1721
1722 err = unlink(path);
1723 if (err != 0)
1724 return -errno;
1725 pr_debug("unpinned program '%s'\n", path);
1726
1727 return 0;
1728}
1729
Joe Stringerf3675402017-01-26 13:19:56 -08001730static int make_dir(const char *path)
1731{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001732 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001733 int err = 0;
1734
1735 if (mkdir(path, 0700) && errno != EEXIST)
1736 err = -errno;
1737
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001738 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001739 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001740 pr_warning("failed to mkdir %s: %s\n", path, cp);
1741 }
Joe Stringerf3675402017-01-26 13:19:56 -08001742 return err;
1743}
1744
1745int bpf_program__pin(struct bpf_program *prog, const char *path)
1746{
1747 int i, err;
1748
1749 err = check_path(path);
1750 if (err)
1751 return err;
1752
1753 if (prog == NULL) {
1754 pr_warning("invalid program pointer\n");
1755 return -EINVAL;
1756 }
1757
1758 if (prog->instances.nr <= 0) {
1759 pr_warning("no instances of prog %s to pin\n",
1760 prog->section_name);
1761 return -EINVAL;
1762 }
1763
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08001764 if (prog->instances.nr == 1) {
1765 /* don't create subdirs when pinning single instance */
1766 return bpf_program__pin_instance(prog, path, 0);
1767 }
1768
Joe Stringerf3675402017-01-26 13:19:56 -08001769 err = make_dir(path);
1770 if (err)
1771 return err;
1772
1773 for (i = 0; i < prog->instances.nr; i++) {
1774 char buf[PATH_MAX];
1775 int len;
1776
1777 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001778 if (len < 0) {
1779 err = -EINVAL;
1780 goto err_unpin;
1781 } else if (len >= PATH_MAX) {
1782 err = -ENAMETOOLONG;
1783 goto err_unpin;
1784 }
1785
1786 err = bpf_program__pin_instance(prog, buf, i);
1787 if (err)
1788 goto err_unpin;
1789 }
1790
1791 return 0;
1792
1793err_unpin:
1794 for (i = i - 1; i >= 0; i--) {
1795 char buf[PATH_MAX];
1796 int len;
1797
1798 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1799 if (len < 0)
1800 continue;
1801 else if (len >= PATH_MAX)
1802 continue;
1803
1804 bpf_program__unpin_instance(prog, buf, i);
1805 }
1806
1807 rmdir(path);
1808
1809 return err;
1810}
1811
1812int bpf_program__unpin(struct bpf_program *prog, const char *path)
1813{
1814 int i, err;
1815
1816 err = check_path(path);
1817 if (err)
1818 return err;
1819
1820 if (prog == NULL) {
1821 pr_warning("invalid program pointer\n");
1822 return -EINVAL;
1823 }
1824
1825 if (prog->instances.nr <= 0) {
1826 pr_warning("no instances of prog %s to pin\n",
1827 prog->section_name);
1828 return -EINVAL;
1829 }
1830
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08001831 if (prog->instances.nr == 1) {
1832 /* don't create subdirs when pinning single instance */
1833 return bpf_program__unpin_instance(prog, path, 0);
1834 }
1835
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001836 for (i = 0; i < prog->instances.nr; i++) {
1837 char buf[PATH_MAX];
1838 int len;
1839
1840 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08001841 if (len < 0)
1842 return -EINVAL;
1843 else if (len >= PATH_MAX)
1844 return -ENAMETOOLONG;
1845
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001846 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08001847 if (err)
1848 return err;
1849 }
1850
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001851 err = rmdir(path);
1852 if (err)
1853 return -errno;
1854
Joe Stringerf3675402017-01-26 13:19:56 -08001855 return 0;
1856}
1857
Joe Stringerb6989f32017-01-26 13:19:57 -08001858int bpf_map__pin(struct bpf_map *map, const char *path)
1859{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001860 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08001861 int err;
1862
1863 err = check_path(path);
1864 if (err)
1865 return err;
1866
1867 if (map == NULL) {
1868 pr_warning("invalid map pointer\n");
1869 return -EINVAL;
1870 }
1871
1872 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001873 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001874 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08001875 return -errno;
1876 }
1877
1878 pr_debug("pinned map '%s'\n", path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001879
Joe Stringerb6989f32017-01-26 13:19:57 -08001880 return 0;
1881}
1882
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001883int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08001884{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001885 int err;
1886
1887 err = check_path(path);
1888 if (err)
1889 return err;
1890
1891 if (map == NULL) {
1892 pr_warning("invalid map pointer\n");
1893 return -EINVAL;
1894 }
1895
1896 err = unlink(path);
1897 if (err != 0)
1898 return -errno;
1899 pr_debug("unpinned map '%s'\n", path);
1900
1901 return 0;
1902}
1903
1904int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
1905{
Joe Stringerd5148d82017-01-26 13:19:58 -08001906 struct bpf_map *map;
1907 int err;
1908
1909 if (!obj)
1910 return -ENOENT;
1911
1912 if (!obj->loaded) {
1913 pr_warning("object not yet loaded; load it first\n");
1914 return -ENOENT;
1915 }
1916
1917 err = make_dir(path);
1918 if (err)
1919 return err;
1920
1921 bpf_map__for_each(map, obj) {
1922 char buf[PATH_MAX];
1923 int len;
1924
1925 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1926 bpf_map__name(map));
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001927 if (len < 0) {
1928 err = -EINVAL;
1929 goto err_unpin_maps;
1930 } else if (len >= PATH_MAX) {
1931 err = -ENAMETOOLONG;
1932 goto err_unpin_maps;
1933 }
1934
1935 err = bpf_map__pin(map, buf);
1936 if (err)
1937 goto err_unpin_maps;
1938 }
1939
1940 return 0;
1941
1942err_unpin_maps:
1943 while ((map = bpf_map__prev(map, obj))) {
1944 char buf[PATH_MAX];
1945 int len;
1946
1947 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1948 bpf_map__name(map));
1949 if (len < 0)
1950 continue;
1951 else if (len >= PATH_MAX)
1952 continue;
1953
1954 bpf_map__unpin(map, buf);
1955 }
1956
1957 return err;
1958}
1959
1960int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
1961{
1962 struct bpf_map *map;
1963 int err;
1964
1965 if (!obj)
1966 return -ENOENT;
1967
1968 bpf_map__for_each(map, obj) {
1969 char buf[PATH_MAX];
1970 int len;
1971
1972 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1973 bpf_map__name(map));
Joe Stringerd5148d82017-01-26 13:19:58 -08001974 if (len < 0)
1975 return -EINVAL;
1976 else if (len >= PATH_MAX)
1977 return -ENAMETOOLONG;
1978
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001979 err = bpf_map__unpin(map, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08001980 if (err)
1981 return err;
1982 }
1983
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001984 return 0;
1985}
1986
1987int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
1988{
1989 struct bpf_program *prog;
1990 int err;
1991
1992 if (!obj)
1993 return -ENOENT;
1994
1995 if (!obj->loaded) {
1996 pr_warning("object not yet loaded; load it first\n");
1997 return -ENOENT;
1998 }
1999
2000 err = make_dir(path);
2001 if (err)
2002 return err;
2003
2004 bpf_object__for_each_program(prog, obj) {
2005 char buf[PATH_MAX];
2006 int len;
2007
2008 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2009 prog->section_name);
2010 if (len < 0) {
2011 err = -EINVAL;
2012 goto err_unpin_programs;
2013 } else if (len >= PATH_MAX) {
2014 err = -ENAMETOOLONG;
2015 goto err_unpin_programs;
2016 }
2017
2018 err = bpf_program__pin(prog, buf);
2019 if (err)
2020 goto err_unpin_programs;
2021 }
2022
2023 return 0;
2024
2025err_unpin_programs:
2026 while ((prog = bpf_program__prev(prog, obj))) {
2027 char buf[PATH_MAX];
2028 int len;
2029
2030 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2031 prog->section_name);
2032 if (len < 0)
2033 continue;
2034 else if (len >= PATH_MAX)
2035 continue;
2036
2037 bpf_program__unpin(prog, buf);
2038 }
2039
2040 return err;
2041}
2042
2043int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2044{
2045 struct bpf_program *prog;
2046 int err;
2047
2048 if (!obj)
2049 return -ENOENT;
2050
Joe Stringerd5148d82017-01-26 13:19:58 -08002051 bpf_object__for_each_program(prog, obj) {
2052 char buf[PATH_MAX];
2053 int len;
2054
2055 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2056 prog->section_name);
2057 if (len < 0)
2058 return -EINVAL;
2059 else if (len >= PATH_MAX)
2060 return -ENAMETOOLONG;
2061
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002062 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002063 if (err)
2064 return err;
2065 }
2066
2067 return 0;
2068}
2069
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002070int bpf_object__pin(struct bpf_object *obj, const char *path)
2071{
2072 int err;
2073
2074 err = bpf_object__pin_maps(obj, path);
2075 if (err)
2076 return err;
2077
2078 err = bpf_object__pin_programs(obj, path);
2079 if (err) {
2080 bpf_object__unpin_maps(obj, path);
2081 return err;
2082 }
2083
2084 return 0;
2085}
2086
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002087void bpf_object__close(struct bpf_object *obj)
2088{
Wang Nana5b8bd42015-07-01 02:14:00 +00002089 size_t i;
2090
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002091 if (!obj)
2092 return;
2093
Wang Nan10931d22016-11-26 07:03:26 +00002094 if (obj->clear_priv)
2095 obj->clear_priv(obj, obj->priv);
2096
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002097 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00002098 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002099 btf__free(obj->btf);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002100
Wang Nan9d759a92015-11-27 08:47:35 +00002101 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00002102 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00002103 if (obj->maps[i].clear_priv)
2104 obj->maps[i].clear_priv(&obj->maps[i],
2105 obj->maps[i].priv);
2106 obj->maps[i].priv = NULL;
2107 obj->maps[i].clear_priv = NULL;
2108 }
2109 zfree(&obj->maps);
2110 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00002111
2112 if (obj->programs && obj->nr_programs) {
2113 for (i = 0; i < obj->nr_programs; i++)
2114 bpf_program__exit(&obj->programs[i]);
2115 }
2116 zfree(&obj->programs);
2117
Wang Nan9a208ef2015-07-01 02:14:10 +00002118 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002119 free(obj);
2120}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002121
Wang Nan9a208ef2015-07-01 02:14:10 +00002122struct bpf_object *
2123bpf_object__next(struct bpf_object *prev)
2124{
2125 struct bpf_object *next;
2126
2127 if (!prev)
2128 next = list_first_entry(&bpf_objects_list,
2129 struct bpf_object,
2130 list);
2131 else
2132 next = list_next_entry(prev, list);
2133
2134 /* Empty list is noticed here so don't need checking on entry. */
2135 if (&next->list == &bpf_objects_list)
2136 return NULL;
2137
2138 return next;
2139}
2140
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002141const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00002142{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002143 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00002144}
2145
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002146unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00002147{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002148 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00002149}
2150
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002151int bpf_object__btf_fd(const struct bpf_object *obj)
2152{
2153 return obj->btf ? btf__fd(obj->btf) : -1;
2154}
2155
Wang Nan10931d22016-11-26 07:03:26 +00002156int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2157 bpf_object_clear_priv_t clear_priv)
2158{
2159 if (obj->priv && obj->clear_priv)
2160 obj->clear_priv(obj, obj->priv);
2161
2162 obj->priv = priv;
2163 obj->clear_priv = clear_priv;
2164 return 0;
2165}
2166
2167void *bpf_object__priv(struct bpf_object *obj)
2168{
2169 return obj ? obj->priv : ERR_PTR(-EINVAL);
2170}
2171
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002172static struct bpf_program *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002173__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, int i)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002174{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002175 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002176
2177 if (!obj->programs)
2178 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002179
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002180 if (p->obj != obj) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002181 pr_warning("error: program handler doesn't match object\n");
2182 return NULL;
2183 }
2184
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002185 idx = (p - obj->programs) + i;
2186 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002187 return NULL;
2188 return &obj->programs[idx];
2189}
2190
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002191struct bpf_program *
2192bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2193{
2194 struct bpf_program *prog = prev;
2195
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002196 if (prev == NULL)
2197 return obj->programs;
2198
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002199 do {
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002200 prog = __bpf_program__iter(prog, obj, 1);
2201 } while (prog && bpf_program__is_function_storage(prog, obj));
2202
2203 return prog;
2204}
2205
2206struct bpf_program *
2207bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2208{
2209 struct bpf_program *prog = next;
2210
2211 if (next == NULL) {
2212 if (!obj->nr_programs)
2213 return NULL;
2214 return obj->programs + obj->nr_programs - 1;
2215 }
2216
2217 do {
2218 prog = __bpf_program__iter(prog, obj, -1);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002219 } while (prog && bpf_program__is_function_storage(prog, obj));
2220
2221 return prog;
2222}
2223
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002224int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2225 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002226{
2227 if (prog->priv && prog->clear_priv)
2228 prog->clear_priv(prog, prog->priv);
2229
2230 prog->priv = priv;
2231 prog->clear_priv = clear_priv;
2232 return 0;
2233}
2234
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002235void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002236{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002237 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002238}
2239
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002240void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2241{
2242 prog->prog_ifindex = ifindex;
2243}
2244
Namhyung Kim715f8db2015-11-03 20:21:05 +09002245const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002246{
2247 const char *title;
2248
2249 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09002250 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002251 title = strdup(title);
2252 if (!title) {
2253 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00002254 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002255 }
2256 }
2257
2258 return title;
2259}
2260
2261int bpf_program__fd(struct bpf_program *prog)
2262{
Wang Nanb5805632015-11-16 12:10:09 +00002263 return bpf_program__nth_fd(prog, 0);
2264}
2265
2266int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2267 bpf_program_prep_t prep)
2268{
2269 int *instances_fds;
2270
2271 if (nr_instances <= 0 || !prep)
2272 return -EINVAL;
2273
2274 if (prog->instances.nr > 0 || prog->instances.fds) {
2275 pr_warning("Can't set pre-processor after loading\n");
2276 return -EINVAL;
2277 }
2278
2279 instances_fds = malloc(sizeof(int) * nr_instances);
2280 if (!instances_fds) {
2281 pr_warning("alloc memory failed for fds\n");
2282 return -ENOMEM;
2283 }
2284
2285 /* fill all fd with -1 */
2286 memset(instances_fds, -1, sizeof(int) * nr_instances);
2287
2288 prog->instances.nr = nr_instances;
2289 prog->instances.fds = instances_fds;
2290 prog->preprocessor = prep;
2291 return 0;
2292}
2293
2294int bpf_program__nth_fd(struct bpf_program *prog, int n)
2295{
2296 int fd;
2297
Jakub Kicinski1e960042018-07-26 14:32:18 -07002298 if (!prog)
2299 return -EINVAL;
2300
Wang Nanb5805632015-11-16 12:10:09 +00002301 if (n >= prog->instances.nr || n < 0) {
2302 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2303 n, prog->section_name, prog->instances.nr);
2304 return -EINVAL;
2305 }
2306
2307 fd = prog->instances.fds[n];
2308 if (fd < 0) {
2309 pr_warning("%dth instance of program '%s' is invalid\n",
2310 n, prog->section_name);
2311 return -ENOENT;
2312 }
2313
2314 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002315}
Wang Nan9d759a92015-11-27 08:47:35 +00002316
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002317void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002318{
2319 prog->type = type;
2320}
2321
Wang Nan5f44e4c82016-07-13 10:44:01 +00002322static bool bpf_program__is_type(struct bpf_program *prog,
2323 enum bpf_prog_type type)
2324{
2325 return prog ? (prog->type == type) : false;
2326}
2327
Joe Stringered794072017-01-22 17:11:23 -08002328#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2329int bpf_program__set_##NAME(struct bpf_program *prog) \
2330{ \
2331 if (!prog) \
2332 return -EINVAL; \
2333 bpf_program__set_type(prog, TYPE); \
2334 return 0; \
2335} \
2336 \
2337bool bpf_program__is_##NAME(struct bpf_program *prog) \
2338{ \
2339 return bpf_program__is_type(prog, TYPE); \
2340} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002341
Joe Stringer7803ba72017-01-22 17:11:24 -08002342BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002343BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002344BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2345BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002346BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002347BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002348BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2349BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002350
John Fastabend16962b22018-04-23 14:30:38 -07002351void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2352 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002353{
2354 prog->expected_attach_type = type;
2355}
2356
Andrey Ignatov36153532018-10-31 12:57:18 -07002357#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2358 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002359
Andrey Ignatov956b6202018-09-26 15:24:53 -07002360/* Programs that can NOT be attached. */
Andrey Ignatov36153532018-10-31 12:57:18 -07002361#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002362
Andrey Ignatov956b6202018-09-26 15:24:53 -07002363/* Programs that can be attached. */
2364#define BPF_APROG_SEC(string, ptype, atype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002365 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07002366
Andrey Ignatov956b6202018-09-26 15:24:53 -07002367/* Programs that must specify expected attach type at load time. */
2368#define BPF_EAPROG_SEC(string, ptype, eatype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002369 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002370
2371/* Programs that can be attached but attach type can't be identified by section
2372 * name. Kept for backward compatibility.
2373 */
2374#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002375
Roman Gushchin583c9002017-12-13 15:18:51 +00002376static const struct {
2377 const char *sec;
2378 size_t len;
2379 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002380 enum bpf_attach_type expected_attach_type;
Andrey Ignatov36153532018-10-31 12:57:18 -07002381 int is_attachable;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002382 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002383} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07002384 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2385 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2386 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2387 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2388 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2389 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2390 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2391 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2392 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2393 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2394 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2395 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2396 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07002397 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2398 BPF_CGROUP_INET_INGRESS),
2399 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2400 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002401 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2402 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2403 BPF_CGROUP_INET_SOCK_CREATE),
2404 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2405 BPF_CGROUP_INET4_POST_BIND),
2406 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2407 BPF_CGROUP_INET6_POST_BIND),
2408 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2409 BPF_CGROUP_DEVICE),
2410 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2411 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07002412 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2413 BPF_SK_SKB_STREAM_PARSER),
2414 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2415 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002416 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2417 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2418 BPF_SK_MSG_VERDICT),
2419 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2420 BPF_LIRC_MODE2),
2421 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2422 BPF_FLOW_DISSECTOR),
2423 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2424 BPF_CGROUP_INET4_BIND),
2425 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2426 BPF_CGROUP_INET6_BIND),
2427 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2428 BPF_CGROUP_INET4_CONNECT),
2429 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2430 BPF_CGROUP_INET6_CONNECT),
2431 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2432 BPF_CGROUP_UDP4_SENDMSG),
2433 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2434 BPF_CGROUP_UDP6_SENDMSG),
Roman Gushchin583c9002017-12-13 15:18:51 +00002435};
Roman Gushchin583c9002017-12-13 15:18:51 +00002436
Andrey Ignatov956b6202018-09-26 15:24:53 -07002437#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002438#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07002439#undef BPF_APROG_SEC
2440#undef BPF_EAPROG_SEC
2441#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002442
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002443int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2444 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00002445{
2446 int i;
2447
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002448 if (!name)
2449 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00002450
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002451 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2452 if (strncmp(name, section_names[i].sec, section_names[i].len))
2453 continue;
2454 *prog_type = section_names[i].prog_type;
2455 *expected_attach_type = section_names[i].expected_attach_type;
2456 return 0;
2457 }
2458 return -EINVAL;
2459}
Roman Gushchin583c9002017-12-13 15:18:51 +00002460
Andrey Ignatov956b6202018-09-26 15:24:53 -07002461int libbpf_attach_type_by_name(const char *name,
2462 enum bpf_attach_type *attach_type)
2463{
2464 int i;
2465
2466 if (!name)
2467 return -EINVAL;
2468
2469 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2470 if (strncmp(name, section_names[i].sec, section_names[i].len))
2471 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07002472 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002473 return -EINVAL;
2474 *attach_type = section_names[i].attach_type;
2475 return 0;
2476 }
2477 return -EINVAL;
2478}
2479
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002480static int
2481bpf_program__identify_section(struct bpf_program *prog,
2482 enum bpf_prog_type *prog_type,
2483 enum bpf_attach_type *expected_attach_type)
2484{
2485 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2486 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00002487}
2488
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002489int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002490{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002491 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002492}
2493
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002494const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002495{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002496 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002497}
2498
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002499const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002500{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002501 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002502}
2503
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002504__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002505{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002506 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002507}
2508
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002509__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002510{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002511 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002512}
2513
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002514int bpf_map__set_priv(struct bpf_map *map, void *priv,
2515 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002516{
2517 if (!map)
2518 return -EINVAL;
2519
2520 if (map->priv) {
2521 if (map->clear_priv)
2522 map->clear_priv(map, map->priv);
2523 }
2524
2525 map->priv = priv;
2526 map->clear_priv = clear_priv;
2527 return 0;
2528}
2529
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002530void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002531{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002532 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002533}
2534
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002535bool bpf_map__is_offload_neutral(struct bpf_map *map)
2536{
2537 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2538}
2539
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002540void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2541{
2542 map->map_ifindex = ifindex;
2543}
2544
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002545static struct bpf_map *
2546__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00002547{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002548 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00002549 struct bpf_map *s, *e;
2550
2551 if (!obj || !obj->maps)
2552 return NULL;
2553
2554 s = obj->maps;
2555 e = obj->maps + obj->nr_maps;
2556
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002557 if ((m < s) || (m >= e)) {
Wang Nan9d759a92015-11-27 08:47:35 +00002558 pr_warning("error in %s: map handler doesn't belong to object\n",
2559 __func__);
2560 return NULL;
2561 }
2562
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002563 idx = (m - obj->maps) + i;
2564 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00002565 return NULL;
2566 return &obj->maps[idx];
2567}
Wang Nan561bbcc2015-11-27 08:47:36 +00002568
2569struct bpf_map *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002570bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2571{
2572 if (prev == NULL)
2573 return obj->maps;
2574
2575 return __bpf_map__iter(prev, obj, 1);
2576}
2577
2578struct bpf_map *
2579bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2580{
2581 if (next == NULL) {
2582 if (!obj->nr_maps)
2583 return NULL;
2584 return obj->maps + obj->nr_maps - 1;
2585 }
2586
2587 return __bpf_map__iter(next, obj, -1);
2588}
2589
2590struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002591bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002592{
2593 struct bpf_map *pos;
2594
2595 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002596 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002597 return pos;
2598 }
2599 return NULL;
2600}
Wang Nan5a6acad2016-11-26 07:03:27 +00002601
2602struct bpf_map *
2603bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2604{
2605 int i;
2606
2607 for (i = 0; i < obj->nr_maps; i++) {
2608 if (obj->maps[i].offset == offset)
2609 return &obj->maps[i];
2610 }
2611 return ERR_PTR(-ENOENT);
2612}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002613
2614long libbpf_get_error(const void *ptr)
2615{
2616 if (IS_ERR(ptr))
2617 return PTR_ERR(ptr);
2618 return 0;
2619}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002620
2621int bpf_prog_load(const char *file, enum bpf_prog_type type,
2622 struct bpf_object **pobj, int *prog_fd)
2623{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002624 struct bpf_prog_load_attr attr;
2625
2626 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2627 attr.file = file;
2628 attr.prog_type = type;
2629 attr.expected_attach_type = 0;
2630
2631 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2632}
2633
2634int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2635 struct bpf_object **pobj, int *prog_fd)
2636{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002637 struct bpf_object_open_attr open_attr = {
2638 .file = attr->file,
2639 .prog_type = attr->prog_type,
2640 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002641 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002642 enum bpf_attach_type expected_attach_type;
2643 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002644 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002645 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002646 int err;
2647
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002648 if (!attr)
2649 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002650 if (!attr->file)
2651 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002652
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002653 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07002654 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002655 return -ENOENT;
2656
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002657 bpf_object__for_each_program(prog, obj) {
2658 /*
2659 * If type is not specified, try to guess it based on
2660 * section name.
2661 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002662 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002663 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002664 expected_attach_type = attr->expected_attach_type;
2665 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002666 err = bpf_program__identify_section(prog, &prog_type,
2667 &expected_attach_type);
2668 if (err < 0) {
2669 pr_warning("failed to guess program type based on section name %s\n",
2670 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002671 bpf_object__close(obj);
2672 return -EINVAL;
2673 }
2674 }
2675
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002676 bpf_program__set_type(prog, prog_type);
2677 bpf_program__set_expected_attach_type(prog,
2678 expected_attach_type);
2679
Taeung Song69495d22018-09-03 08:30:07 +09002680 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002681 first_prog = prog;
2682 }
2683
David Beckettf0307a72018-05-16 14:02:49 -07002684 bpf_map__for_each(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002685 if (!bpf_map__is_offload_neutral(map))
2686 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07002687 }
2688
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002689 if (!first_prog) {
2690 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002691 bpf_object__close(obj);
2692 return -ENOENT;
2693 }
2694
John Fastabend6f6d33f2017-08-15 22:34:22 -07002695 err = bpf_object__load(obj);
2696 if (err) {
2697 bpf_object__close(obj);
2698 return -EINVAL;
2699 }
2700
2701 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002702 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002703 return 0;
2704}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002705
2706enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002707bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2708 void **copy_mem, size_t *copy_size,
2709 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002710{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002711 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002712 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002713 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002714 void *base = ((__u8 *)header) + page_size;
2715 int ret = LIBBPF_PERF_EVENT_CONT;
2716 struct perf_event_header *ehdr;
2717 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002718
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002719 while (data_head != data_tail) {
2720 ehdr = base + (data_tail & (mmap_size - 1));
2721 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002722
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002723 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2724 void *copy_start = ehdr;
2725 size_t len_first = base + mmap_size - copy_start;
2726 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002727
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002728 if (*copy_size < ehdr_size) {
2729 free(*copy_mem);
2730 *copy_mem = malloc(ehdr_size);
2731 if (!*copy_mem) {
2732 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002733 ret = LIBBPF_PERF_EVENT_ERROR;
2734 break;
2735 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002736 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002737 }
2738
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002739 memcpy(*copy_mem, copy_start, len_first);
2740 memcpy(*copy_mem + len_first, base, len_secnd);
2741 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002742 }
2743
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002744 ret = fn(ehdr, private_data);
2745 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002746 if (ret != LIBBPF_PERF_EVENT_CONT)
2747 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002748 }
2749
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002750 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002751 return ret;
2752}