blob: e827542ffa3aff9e7d1582732a2943237713c783 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000010 */
11
Jakub Kicinski531b0142018-07-10 14:43:05 -070012#define _GNU_SOURCE
Wang Nan1b76c132015-07-01 02:13:51 +000013#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000014#include <stdio.h>
15#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080016#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000017#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000018#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000019#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000020#include <fcntl.h>
21#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080023#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000024#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000025#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070026#include <linux/btf.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000027#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080028#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070029#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020030#include <linux/ring_buffer.h>
Joe Stringerf3675402017-01-26 13:19:56 -080031#include <sys/stat.h>
32#include <sys/types.h>
33#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070034#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000035#include <libelf.h>
36#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000037
38#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000039#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070040#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030041#include "str_error.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000042
Wang Nan9b161372016-07-18 06:01:08 +000043#ifndef EM_BPF
44#define EM_BPF 247
45#endif
46
Joe Stringerf3675402017-01-26 13:19:56 -080047#ifndef BPF_FS_MAGIC
48#define BPF_FS_MAGIC 0xcafe4a11
49#endif
50
Wang Nanb3f59d62015-07-01 02:13:52 +000051#define __printf(a, b) __attribute__((format(printf, a, b)))
52
53__printf(1, 2)
54static int __base_pr(const char *format, ...)
55{
56 va_list args;
57 int err;
58
59 va_start(args, format);
60 err = vfprintf(stderr, format, args);
61 va_end(args);
62 return err;
63}
64
65static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
66static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
67static __printf(1, 2) libbpf_print_fn_t __pr_debug;
68
69#define __pr(func, fmt, ...) \
70do { \
71 if ((func)) \
72 (func)("libbpf: " fmt, ##__VA_ARGS__); \
73} while (0)
74
75#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
76#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
77#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
78
79void libbpf_set_print(libbpf_print_fn_t warn,
80 libbpf_print_fn_t info,
81 libbpf_print_fn_t debug)
82{
83 __pr_warning = warn;
84 __pr_info = info;
85 __pr_debug = debug;
86}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000087
Wang Nan6371ca3b2015-11-06 13:49:37 +000088#define STRERR_BUFSIZE 128
89
Wang Nan6371ca3b2015-11-06 13:49:37 +000090#define CHECK_ERR(action, err, out) do { \
91 err = action; \
92 if (err) \
93 goto out; \
94} while(0)
95
96
Wang Nan1a5e3fb2015-07-01 02:13:53 +000097/* Copied from tools/perf/util/util.h */
98#ifndef zfree
99# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
100#endif
101
102#ifndef zclose
103# define zclose(fd) ({ \
104 int ___err = 0; \
105 if ((fd) >= 0) \
106 ___err = close((fd)); \
107 fd = -1; \
108 ___err; })
109#endif
110
111#ifdef HAVE_LIBELF_MMAP_SUPPORT
112# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
113#else
114# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
115#endif
116
Wang Nana5b8bd42015-07-01 02:14:00 +0000117/*
118 * bpf_prog should be a better name but it has been used in
119 * linux/filter.h.
120 */
121struct bpf_program {
122 /* Index in elf obj file, for relocation use. */
123 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700124 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700125 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000126 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800127 /* section_name with / replaced by _; makes recursive pinning
128 * in bpf_object__pin_programs easier
129 */
130 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000131 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800132 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000133 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000134
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800135 struct reloc_desc {
136 enum {
137 RELO_LD64,
138 RELO_CALL,
139 } type;
Wang Nan34090912015-07-01 02:14:02 +0000140 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800141 union {
142 int map_idx;
143 int text_off;
144 };
Wang Nan34090912015-07-01 02:14:02 +0000145 } *reloc_desc;
146 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000147
Wang Nanb5805632015-11-16 12:10:09 +0000148 struct {
149 int nr;
150 int *fds;
151 } instances;
152 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000153
154 struct bpf_object *obj;
155 void *priv;
156 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700157
158 enum bpf_attach_type expected_attach_type;
Wang Nana5b8bd42015-07-01 02:14:00 +0000159};
160
Wang Nan9d759a92015-11-27 08:47:35 +0000161struct bpf_map {
162 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000163 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000164 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700165 int map_ifindex;
Wang Nan9d759a92015-11-27 08:47:35 +0000166 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700167 __u32 btf_key_type_id;
168 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000169 void *priv;
170 bpf_map_clear_priv_t clear_priv;
171};
172
Wang Nan9a208ef2015-07-01 02:14:10 +0000173static LIST_HEAD(bpf_objects_list);
174
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000175struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000176 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700177 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000178
Wang Nana5b8bd42015-07-01 02:14:00 +0000179 struct bpf_program *programs;
180 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000181 struct bpf_map *maps;
182 size_t nr_maps;
183
Wang Nan52d33522015-07-01 02:14:04 +0000184 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700185 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000186
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000187 /*
188 * Information when doing elf related work. Only valid if fd
189 * is valid.
190 */
191 struct {
192 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000193 void *obj_buf;
194 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000195 Elf *elf;
196 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000197 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000198 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000199 struct {
200 GElf_Shdr shdr;
201 Elf_Data *data;
202 } *reloc;
203 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000204 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800205 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000206 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000207 /*
208 * All loaded bpf_object is linked in a list, which is
209 * hidden to caller. bpf_objects__<func> handlers deal with
210 * all objects.
211 */
212 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000213
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700214 struct btf *btf;
215
Wang Nan10931d22016-11-26 07:03:26 +0000216 void *priv;
217 bpf_object_clear_priv_t clear_priv;
218
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000219 char path[];
220};
221#define obj_elf_valid(o) ((o)->efile.elf)
222
Joe Stringer29cd77f2018-10-02 13:35:39 -0700223void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000224{
Wang Nanb5805632015-11-16 12:10:09 +0000225 int i;
226
Wang Nan55cffde2015-07-01 02:14:07 +0000227 if (!prog)
228 return;
229
Wang Nanb5805632015-11-16 12:10:09 +0000230 /*
231 * If the object is opened but the program was never loaded,
232 * it is possible that prog->instances.nr == -1.
233 */
234 if (prog->instances.nr > 0) {
235 for (i = 0; i < prog->instances.nr; i++)
236 zclose(prog->instances.fds[i]);
237 } else if (prog->instances.nr != -1) {
238 pr_warning("Internal error: instances.nr is %d\n",
239 prog->instances.nr);
240 }
241
242 prog->instances.nr = -1;
243 zfree(&prog->instances.fds);
Wang Nan55cffde2015-07-01 02:14:07 +0000244}
245
Wang Nana5b8bd42015-07-01 02:14:00 +0000246static void bpf_program__exit(struct bpf_program *prog)
247{
248 if (!prog)
249 return;
250
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000251 if (prog->clear_priv)
252 prog->clear_priv(prog, prog->priv);
253
254 prog->priv = NULL;
255 prog->clear_priv = NULL;
256
Wang Nan55cffde2015-07-01 02:14:07 +0000257 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700258 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000259 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800260 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000261 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000262 zfree(&prog->reloc_desc);
263
264 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000265 prog->insns_cnt = 0;
266 prog->idx = -1;
267}
268
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800269static char *__bpf_program__pin_name(struct bpf_program *prog)
270{
271 char *name, *p;
272
273 name = p = strdup(prog->section_name);
274 while ((p = strchr(p, '/')))
275 *p = '_';
276
277 return name;
278}
279
Wang Nana5b8bd42015-07-01 02:14:00 +0000280static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700281bpf_program__init(void *data, size_t size, char *section_name, int idx,
282 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000283{
284 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700285 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000286 return -EINVAL;
287 }
288
289 bzero(prog, sizeof(*prog));
290
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700291 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000292 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100293 pr_warning("failed to alloc name for prog under section(%d) %s\n",
294 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000295 goto errout;
296 }
297
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800298 prog->pin_name = __bpf_program__pin_name(prog);
299 if (!prog->pin_name) {
300 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
301 idx, section_name);
302 goto errout;
303 }
304
Wang Nana5b8bd42015-07-01 02:14:00 +0000305 prog->insns = malloc(size);
306 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700307 pr_warning("failed to alloc insns for prog under section %s\n",
308 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000309 goto errout;
310 }
311 prog->insns_cnt = size / sizeof(struct bpf_insn);
312 memcpy(prog->insns, data,
313 prog->insns_cnt * sizeof(struct bpf_insn));
314 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000315 prog->instances.fds = NULL;
316 prog->instances.nr = -1;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000317 prog->type = BPF_PROG_TYPE_KPROBE;
Wang Nana5b8bd42015-07-01 02:14:00 +0000318
319 return 0;
320errout:
321 bpf_program__exit(prog);
322 return -ENOMEM;
323}
324
325static int
326bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700327 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000328{
329 struct bpf_program prog, *progs;
330 int nr_progs, err;
331
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700332 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000333 if (err)
334 return err;
335
336 progs = obj->programs;
337 nr_progs = obj->nr_programs;
338
Jakub Kicinski531b0142018-07-10 14:43:05 -0700339 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000340 if (!progs) {
341 /*
342 * In this case the original obj->programs
343 * is still valid, so don't need special treat for
344 * bpf_close_object().
345 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700346 pr_warning("failed to alloc a new program under section '%s'\n",
347 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000348 bpf_program__exit(&prog);
349 return -ENOMEM;
350 }
351
352 pr_debug("found program %s\n", prog.section_name);
353 obj->programs = progs;
354 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000355 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000356 progs[nr_progs] = prog;
357 return 0;
358}
359
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700360static int
361bpf_object__init_prog_names(struct bpf_object *obj)
362{
363 Elf_Data *symbols = obj->efile.symbols;
364 struct bpf_program *prog;
365 size_t pi, si;
366
367 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800368 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700369
370 prog = &obj->programs[pi];
371
372 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
373 si++) {
374 GElf_Sym sym;
375
376 if (!gelf_getsym(symbols, si, &sym))
377 continue;
378 if (sym.st_shndx != prog->idx)
379 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000380 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
381 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700382
383 name = elf_strptr(obj->efile.elf,
384 obj->efile.strtabidx,
385 sym.st_name);
386 if (!name) {
387 pr_warning("failed to get sym name string for prog %s\n",
388 prog->section_name);
389 return -LIBBPF_ERRNO__LIBELF;
390 }
391 }
392
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700393 if (!name && prog->idx == obj->efile.text_shndx)
394 name = ".text";
395
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700396 if (!name) {
397 pr_warning("failed to find sym for prog %s\n",
398 prog->section_name);
399 return -EINVAL;
400 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700401
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700402 prog->name = strdup(name);
403 if (!prog->name) {
404 pr_warning("failed to allocate memory for prog sym %s\n",
405 name);
406 return -ENOMEM;
407 }
408 }
409
410 return 0;
411}
412
Wang Nan6c956392015-07-01 02:13:54 +0000413static struct bpf_object *bpf_object__new(const char *path,
414 void *obj_buf,
415 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000416{
417 struct bpf_object *obj;
418
419 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
420 if (!obj) {
421 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000422 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000423 }
424
425 strcpy(obj->path, path);
426 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000427
428 /*
429 * Caller of this function should also calls
430 * bpf_object__elf_finish() after data collection to return
431 * obj_buf to user. If not, we should duplicate the buffer to
432 * avoid user freeing them before elf finish.
433 */
434 obj->efile.obj_buf = obj_buf;
435 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000436 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000437
Wang Nan52d33522015-07-01 02:14:04 +0000438 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000439
440 INIT_LIST_HEAD(&obj->list);
441 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000442 return obj;
443}
444
445static void bpf_object__elf_finish(struct bpf_object *obj)
446{
447 if (!obj_elf_valid(obj))
448 return;
449
450 if (obj->efile.elf) {
451 elf_end(obj->efile.elf);
452 obj->efile.elf = NULL;
453 }
Wang Nanbec7d682015-07-01 02:13:59 +0000454 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000455
456 zfree(&obj->efile.reloc);
457 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000458 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000459 obj->efile.obj_buf = NULL;
460 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000461}
462
463static int bpf_object__elf_init(struct bpf_object *obj)
464{
465 int err = 0;
466 GElf_Ehdr *ep;
467
468 if (obj_elf_valid(obj)) {
469 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000470 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000471 }
472
Wang Nan6c956392015-07-01 02:13:54 +0000473 if (obj->efile.obj_buf_sz > 0) {
474 /*
475 * obj_buf should have been validated by
476 * bpf_object__open_buffer().
477 */
478 obj->efile.elf = elf_memory(obj->efile.obj_buf,
479 obj->efile.obj_buf_sz);
480 } else {
481 obj->efile.fd = open(obj->path, O_RDONLY);
482 if (obj->efile.fd < 0) {
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200483 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700484 char *cp = libbpf_strerror_r(errno, errmsg,
485 sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200486
487 pr_warning("failed to open %s: %s\n", obj->path, cp);
Wang Nan6c956392015-07-01 02:13:54 +0000488 return -errno;
489 }
490
491 obj->efile.elf = elf_begin(obj->efile.fd,
492 LIBBPF_ELF_C_READ_MMAP,
493 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000494 }
495
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000496 if (!obj->efile.elf) {
497 pr_warning("failed to open %s as ELF file\n",
498 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000499 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000500 goto errout;
501 }
502
503 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
504 pr_warning("failed to get EHDR from %s\n",
505 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000506 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000507 goto errout;
508 }
509 ep = &obj->efile.ehdr;
510
Wang Nan9b161372016-07-18 06:01:08 +0000511 /* Old LLVM set e_machine to EM_NONE */
512 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000513 pr_warning("%s is not an eBPF object file\n",
514 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000515 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000516 goto errout;
517 }
518
519 return 0;
520errout:
521 bpf_object__elf_finish(obj);
522 return err;
523}
524
Wang Nancc4228d2015-07-01 02:13:55 +0000525static int
526bpf_object__check_endianness(struct bpf_object *obj)
527{
528 static unsigned int const endian = 1;
529
530 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
531 case ELFDATA2LSB:
532 /* We are big endian, BPF obj is little endian. */
533 if (*(unsigned char const *)&endian != 1)
534 goto mismatch;
535 break;
536
537 case ELFDATA2MSB:
538 /* We are little endian, BPF obj is big endian. */
539 if (*(unsigned char const *)&endian != 0)
540 goto mismatch;
541 break;
542 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000543 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000544 }
545
546 return 0;
547
548mismatch:
549 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000550 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000551}
552
Wang Nancb1e5e92015-07-01 02:13:57 +0000553static int
554bpf_object__init_license(struct bpf_object *obj,
555 void *data, size_t size)
556{
557 memcpy(obj->license, data,
558 min(size, sizeof(obj->license) - 1));
559 pr_debug("license of %s is %s\n", obj->path, obj->license);
560 return 0;
561}
562
563static int
564bpf_object__init_kversion(struct bpf_object *obj,
565 void *data, size_t size)
566{
Yonghong Song438363c2018-10-09 16:14:47 -0700567 __u32 kver;
Wang Nancb1e5e92015-07-01 02:13:57 +0000568
569 if (size != sizeof(kver)) {
570 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000571 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000572 }
573 memcpy(&kver, data, sizeof(kver));
574 obj->kern_version = kver;
575 pr_debug("kernel version of %s is %x\n", obj->path,
576 obj->kern_version);
577 return 0;
578}
579
Eric Leblond4708bbd2016-11-15 04:05:47 +0000580static int compare_bpf_map(const void *_a, const void *_b)
581{
582 const struct bpf_map *a = _a;
583 const struct bpf_map *b = _b;
584
585 return a->offset - b->offset;
586}
587
588static int
John Fastabendc034a172018-10-15 11:19:55 -0700589bpf_object__init_maps(struct bpf_object *obj, int flags)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000590{
John Fastabendc034a172018-10-15 11:19:55 -0700591 bool strict = !(flags & MAPS_RELAX_COMPAT);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400592 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000593 Elf_Scn *scn;
594 Elf_Data *data;
595 Elf_Data *symbols = obj->efile.symbols;
596
597 if (obj->efile.maps_shndx < 0)
598 return -EINVAL;
599 if (!symbols)
600 return -EINVAL;
601
602 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
603 if (scn)
604 data = elf_getdata(scn, NULL);
605 if (!scn || !data) {
606 pr_warning("failed to get Elf_Data from map section %d\n",
607 obj->efile.maps_shndx);
608 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000609 }
610
Eric Leblond4708bbd2016-11-15 04:05:47 +0000611 /*
612 * Count number of maps. Each map has a name.
613 * Array of maps is not supported: only the first element is
614 * considered.
615 *
616 * TODO: Detect array of map and report error.
617 */
618 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
619 GElf_Sym sym;
620
621 if (!gelf_getsym(symbols, i, &sym))
622 continue;
623 if (sym.st_shndx != obj->efile.maps_shndx)
624 continue;
625 nr_maps++;
626 }
627
628 /* Alloc obj->maps and fill nr_maps. */
629 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
630 nr_maps, data->d_size);
631
632 if (!nr_maps)
633 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000634
Craig Gallekb13c5c12017-10-05 10:41:57 -0400635 /* Assume equally sized map definitions */
636 map_def_sz = data->d_size / nr_maps;
637 if (!data->d_size || (data->d_size % nr_maps) != 0) {
638 pr_warning("unable to determine map definition size "
639 "section %s, %d maps in %zd bytes\n",
640 obj->path, nr_maps, data->d_size);
641 return -EINVAL;
642 }
643
Wang Nan9d759a92015-11-27 08:47:35 +0000644 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
645 if (!obj->maps) {
646 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000647 return -ENOMEM;
648 }
Wang Nan9d759a92015-11-27 08:47:35 +0000649 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000650
Eric Leblond4708bbd2016-11-15 04:05:47 +0000651 /*
652 * fill all fd with -1 so won't close incorrect
653 * fd (fd=0 is stdin) when failure (zclose won't close
654 * negative fd)).
655 */
656 for (i = 0; i < nr_maps; i++)
Wang Nan9d759a92015-11-27 08:47:35 +0000657 obj->maps[i].fd = -1;
658
Eric Leblond4708bbd2016-11-15 04:05:47 +0000659 /*
660 * Fill obj->maps using data in "maps" section.
661 */
662 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000663 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000664 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000665 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000666
667 if (!gelf_getsym(symbols, i, &sym))
668 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000669 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000670 continue;
671
672 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000673 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000674 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000675 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400676 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000677 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
678 obj->path, map_name);
679 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000680 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000681
Wang Nan561bbcc2015-11-27 08:47:36 +0000682 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000683 if (!obj->maps[map_idx].name) {
684 pr_warning("failed to alloc map name\n");
685 return -ENOMEM;
686 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000687 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000688 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000689 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400690 /*
691 * If the definition of the map in the object file fits in
692 * bpf_map_def, copy it. Any extra fields in our version
693 * of bpf_map_def will default to zero as a result of the
694 * calloc above.
695 */
696 if (map_def_sz <= sizeof(struct bpf_map_def)) {
697 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
698 } else {
699 /*
700 * Here the map structure being read is bigger than what
701 * we expect, truncate if the excess bits are all zero.
702 * If they are not zero, reject this map as
703 * incompatible.
704 */
705 char *b;
706 for (b = ((char *)def) + sizeof(struct bpf_map_def);
707 b < ((char *)def) + map_def_sz; b++) {
708 if (*b != 0) {
709 pr_warning("maps section in %s: \"%s\" "
710 "has unrecognized, non-zero "
711 "options\n",
712 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -0700713 if (strict)
714 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400715 }
716 }
717 memcpy(&obj->maps[map_idx].def, def,
718 sizeof(struct bpf_map_def));
719 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000720 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000721 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000722
723 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400724 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000725}
726
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100727static bool section_have_execinstr(struct bpf_object *obj, int idx)
728{
729 Elf_Scn *scn;
730 GElf_Shdr sh;
731
732 scn = elf_getscn(obj->efile.elf, idx);
733 if (!scn)
734 return false;
735
736 if (gelf_getshdr(scn, &sh) != &sh)
737 return false;
738
739 if (sh.sh_flags & SHF_EXECINSTR)
740 return true;
741
742 return false;
743}
744
John Fastabendc034a172018-10-15 11:19:55 -0700745static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
Wang Nan29603662015-07-01 02:13:56 +0000746{
747 Elf *elf = obj->efile.elf;
748 GElf_Ehdr *ep = &obj->efile.ehdr;
749 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000750 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000751
752 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
753 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
754 pr_warning("failed to get e_shstrndx from %s\n",
755 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000756 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000757 }
758
759 while ((scn = elf_nextscn(elf, scn)) != NULL) {
760 char *name;
761 GElf_Shdr sh;
762 Elf_Data *data;
763
764 idx++;
765 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100766 pr_warning("failed to get section(%d) header from %s\n",
767 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000768 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000769 goto out;
770 }
771
772 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
773 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100774 pr_warning("failed to get section(%d) name from %s\n",
775 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000776 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000777 goto out;
778 }
779
780 data = elf_getdata(scn, 0);
781 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100782 pr_warning("failed to get section(%d) data from %s(%s)\n",
783 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000784 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000785 goto out;
786 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100787 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
788 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000789 (int)sh.sh_link, (unsigned long)sh.sh_flags,
790 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000791
792 if (strcmp(name, "license") == 0)
793 err = bpf_object__init_license(obj,
794 data->d_buf,
795 data->d_size);
796 else if (strcmp(name, "version") == 0)
797 err = bpf_object__init_kversion(obj,
798 data->d_buf,
799 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000800 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000801 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700802 else if (strcmp(name, BTF_ELF_SEC) == 0) {
803 obj->btf = btf__new(data->d_buf, data->d_size,
804 __pr_debug);
805 if (IS_ERR(obj->btf)) {
806 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
807 BTF_ELF_SEC, PTR_ERR(obj->btf));
808 obj->btf = NULL;
809 }
810 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000811 if (obj->efile.symbols) {
812 pr_warning("bpf: multiple SYMTAB in %s\n",
813 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000814 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000815 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000816 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000817 obj->efile.strtabidx = sh.sh_link;
818 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000819 } else if ((sh.sh_type == SHT_PROGBITS) &&
820 (sh.sh_flags & SHF_EXECINSTR) &&
821 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800822 if (strcmp(name, ".text") == 0)
823 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000824 err = bpf_object__add_program(obj, data->d_buf,
825 data->d_size, name, idx);
826 if (err) {
Wang Nan6371ca3b2015-11-06 13:49:37 +0000827 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700828 char *cp = libbpf_strerror_r(-err, errmsg,
829 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000830
Wang Nana5b8bd42015-07-01 02:14:00 +0000831 pr_warning("failed to alloc program %s (%s): %s",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200832 name, obj->path, cp);
Wang Nana5b8bd42015-07-01 02:14:00 +0000833 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000834 } else if (sh.sh_type == SHT_REL) {
835 void *reloc = obj->efile.reloc;
836 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100837 int sec = sh.sh_info; /* points to other section */
838
839 /* Only do relo for section with exec instructions */
840 if (!section_have_execinstr(obj, sec)) {
841 pr_debug("skip relo %s(%d) for section(%d)\n",
842 name, idx, sec);
843 continue;
844 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000845
Jakub Kicinski531b0142018-07-10 14:43:05 -0700846 reloc = reallocarray(reloc, nr_reloc,
847 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +0000848 if (!reloc) {
849 pr_warning("realloc failed\n");
850 err = -ENOMEM;
851 } else {
852 int n = nr_reloc - 1;
853
854 obj->efile.reloc = reloc;
855 obj->efile.nr_reloc = nr_reloc;
856
857 obj->efile.reloc[n].shdr = sh;
858 obj->efile.reloc[n].data = data;
859 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100860 } else {
861 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000862 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000863 if (err)
864 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000865 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000866
Wang Nan77ba9a52015-12-08 02:25:30 +0000867 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
868 pr_warning("Corrupted ELF file: index of strtab invalid\n");
869 return LIBBPF_ERRNO__FORMAT;
870 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700871 if (obj->efile.maps_shndx >= 0) {
John Fastabendc034a172018-10-15 11:19:55 -0700872 err = bpf_object__init_maps(obj, flags);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700873 if (err)
874 goto out;
875 }
876 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000877out:
878 return err;
879}
880
Wang Nan34090912015-07-01 02:14:02 +0000881static struct bpf_program *
882bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
883{
884 struct bpf_program *prog;
885 size_t i;
886
887 for (i = 0; i < obj->nr_programs; i++) {
888 prog = &obj->programs[i];
889 if (prog->idx == idx)
890 return prog;
891 }
892 return NULL;
893}
894
Jakub Kicinski6d4b1982018-07-26 14:32:19 -0700895struct bpf_program *
896bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
897{
898 struct bpf_program *pos;
899
900 bpf_object__for_each_program(pos, obj) {
901 if (pos->section_name && !strcmp(pos->section_name, title))
902 return pos;
903 }
904 return NULL;
905}
906
Wang Nan34090912015-07-01 02:14:02 +0000907static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800908bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
909 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000910{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800911 Elf_Data *symbols = obj->efile.symbols;
912 int text_shndx = obj->efile.text_shndx;
913 int maps_shndx = obj->efile.maps_shndx;
914 struct bpf_map *maps = obj->maps;
915 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000916 int i, nrels;
917
918 pr_debug("collecting relocating info for: '%s'\n",
919 prog->section_name);
920 nrels = shdr->sh_size / shdr->sh_entsize;
921
922 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
923 if (!prog->reloc_desc) {
924 pr_warning("failed to alloc memory in relocation\n");
925 return -ENOMEM;
926 }
927 prog->nr_reloc = nrels;
928
929 for (i = 0; i < nrels; i++) {
930 GElf_Sym sym;
931 GElf_Rel rel;
932 unsigned int insn_idx;
933 struct bpf_insn *insns = prog->insns;
934 size_t map_idx;
935
936 if (!gelf_getrel(data, i, &rel)) {
937 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000938 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000939 }
940
Wang Nan34090912015-07-01 02:14:02 +0000941 if (!gelf_getsym(symbols,
942 GELF_R_SYM(rel.r_info),
943 &sym)) {
944 pr_warning("relocation: symbol %"PRIx64" not found\n",
945 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000946 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000947 }
David Miller7d9890e2017-12-19 15:53:11 -0500948 pr_debug("relo for %lld value %lld name %d\n",
949 (long long) (rel.r_info >> 32),
950 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +0000951
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800952 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +0000953 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
954 prog->section_name, sym.st_shndx);
955 return -LIBBPF_ERRNO__RELOC;
956 }
957
958 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
959 pr_debug("relocation: insn_idx=%u\n", insn_idx);
960
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800961 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
962 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
963 pr_warning("incorrect bpf_call opcode\n");
964 return -LIBBPF_ERRNO__RELOC;
965 }
966 prog->reloc_desc[i].type = RELO_CALL;
967 prog->reloc_desc[i].insn_idx = insn_idx;
968 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700969 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800970 continue;
971 }
972
Wang Nan34090912015-07-01 02:14:02 +0000973 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
974 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
975 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000976 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000977 }
978
Joe Stringer94e5ade2017-01-22 17:11:22 -0800979 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
980 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
981 if (maps[map_idx].offset == sym.st_value) {
982 pr_debug("relocation: find map %zd (%s) for insn %u\n",
983 map_idx, maps[map_idx].name, insn_idx);
984 break;
985 }
986 }
987
Wang Nan34090912015-07-01 02:14:02 +0000988 if (map_idx >= nr_maps) {
989 pr_warning("bpf relocation: map_idx %d large than %d\n",
990 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000991 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000992 }
993
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800994 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +0000995 prog->reloc_desc[i].insn_idx = insn_idx;
996 prog->reloc_desc[i].map_idx = map_idx;
997 }
998 return 0;
999}
1000
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001001static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1002{
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001003 const struct btf_type *container_type;
1004 const struct btf_member *key, *value;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001005 struct bpf_map_def *def = &map->def;
1006 const size_t max_name = 256;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001007 char container_name[max_name];
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07001008 __s64 key_size, value_size;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001009 __s32 container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001010
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001011 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1012 max_name) {
1013 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001014 map->name, map->name);
1015 return -EINVAL;
1016 }
1017
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001018 container_id = btf__find_by_name(btf, container_name);
1019 if (container_id < 0) {
1020 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1021 map->name, container_name);
1022 return container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001023 }
1024
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001025 container_type = btf__type_by_id(btf, container_id);
1026 if (!container_type) {
1027 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1028 map->name, container_id);
1029 return -EINVAL;
1030 }
1031
1032 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1033 BTF_INFO_VLEN(container_type->info) < 2) {
1034 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1035 map->name, container_name);
1036 return -EINVAL;
1037 }
1038
1039 key = (struct btf_member *)(container_type + 1);
1040 value = key + 1;
1041
1042 key_size = btf__resolve_size(btf, key->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001043 if (key_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001044 pr_warning("map:%s invalid BTF key_type_size\n",
1045 map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001046 return key_size;
1047 }
1048
1049 if (def->key_size != key_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001050 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1051 map->name, (__u32)key_size, def->key_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001052 return -EINVAL;
1053 }
1054
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001055 value_size = btf__resolve_size(btf, value->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001056 if (value_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001057 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001058 return value_size;
1059 }
1060
1061 if (def->value_size != value_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001062 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1063 map->name, (__u32)value_size, def->value_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001064 return -EINVAL;
1065 }
1066
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001067 map->btf_key_type_id = key->type;
1068 map->btf_value_type_id = value->type;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001069
1070 return 0;
1071}
1072
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001073int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1074{
1075 struct bpf_map_info info = {};
1076 __u32 len = sizeof(info);
1077 int new_fd, err;
1078 char *new_name;
1079
1080 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1081 if (err)
1082 return err;
1083
1084 new_name = strdup(info.name);
1085 if (!new_name)
1086 return -errno;
1087
1088 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1089 if (new_fd < 0)
1090 goto err_free_new_name;
1091
1092 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1093 if (new_fd < 0)
1094 goto err_close_new_fd;
1095
1096 err = zclose(map->fd);
1097 if (err)
1098 goto err_close_new_fd;
1099 free(map->name);
1100
1101 map->fd = new_fd;
1102 map->name = new_name;
1103 map->def.type = info.type;
1104 map->def.key_size = info.key_size;
1105 map->def.value_size = info.value_size;
1106 map->def.max_entries = info.max_entries;
1107 map->def.map_flags = info.map_flags;
1108 map->btf_key_type_id = info.btf_key_type_id;
1109 map->btf_value_type_id = info.btf_value_type_id;
1110
1111 return 0;
1112
1113err_close_new_fd:
1114 close(new_fd);
1115err_free_new_name:
1116 free(new_name);
1117 return -errno;
1118}
1119
Wang Nan52d33522015-07-01 02:14:04 +00001120static int
1121bpf_object__create_maps(struct bpf_object *obj)
1122{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001123 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001124 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001125 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001126
Wang Nan9d759a92015-11-27 08:47:35 +00001127 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001128 struct bpf_map *map = &obj->maps[i];
1129 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001130 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001131 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001132
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001133 if (map->fd >= 0) {
1134 pr_debug("skip map create (preset) %s: fd=%d\n",
1135 map->name, map->fd);
1136 continue;
1137 }
1138
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001139 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001140 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001141 create_attr.map_type = def->type;
1142 create_attr.map_flags = def->map_flags;
1143 create_attr.key_size = def->key_size;
1144 create_attr.value_size = def->value_size;
1145 create_attr.max_entries = def->max_entries;
1146 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001147 create_attr.btf_key_type_id = 0;
1148 create_attr.btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001149
1150 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1151 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001152 create_attr.btf_key_type_id = map->btf_key_type_id;
1153 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001154 }
1155
1156 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001157 if (*pfd < 0 && create_attr.btf_key_type_id) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001158 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001159 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001160 map->name, cp, errno);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001161 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001162 create_attr.btf_key_type_id = 0;
1163 create_attr.btf_value_type_id = 0;
1164 map->btf_key_type_id = 0;
1165 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001166 *pfd = bpf_create_map_xattr(&create_attr);
1167 }
1168
Wang Nan52d33522015-07-01 02:14:04 +00001169 if (*pfd < 0) {
1170 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001171
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001172 err = *pfd;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001173 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Eric Leblond49bf4b32017-08-20 21:48:14 +02001174 pr_warning("failed to create map (name: '%s'): %s\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001175 map->name, cp);
Wang Nan52d33522015-07-01 02:14:04 +00001176 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001177 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001178 return err;
1179 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001180 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001181 }
1182
Wang Nan52d33522015-07-01 02:14:04 +00001183 return 0;
1184}
1185
Wang Nan8a47a6c2015-07-01 02:14:05 +00001186static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001187bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1188 struct reloc_desc *relo)
1189{
1190 struct bpf_insn *insn, *new_insn;
1191 struct bpf_program *text;
1192 size_t new_cnt;
1193
1194 if (relo->type != RELO_CALL)
1195 return -LIBBPF_ERRNO__RELOC;
1196
1197 if (prog->idx == obj->efile.text_shndx) {
1198 pr_warning("relo in .text insn %d into off %d\n",
1199 relo->insn_idx, relo->text_off);
1200 return -LIBBPF_ERRNO__RELOC;
1201 }
1202
1203 if (prog->main_prog_cnt == 0) {
1204 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1205 if (!text) {
1206 pr_warning("no .text section found yet relo into text exist\n");
1207 return -LIBBPF_ERRNO__RELOC;
1208 }
1209 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001210 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001211 if (!new_insn) {
1212 pr_warning("oom in prog realloc\n");
1213 return -ENOMEM;
1214 }
1215 memcpy(new_insn + prog->insns_cnt, text->insns,
1216 text->insns_cnt * sizeof(*insn));
1217 prog->insns = new_insn;
1218 prog->main_prog_cnt = prog->insns_cnt;
1219 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001220 pr_debug("added %zd insn from %s to prog %s\n",
1221 text->insns_cnt, text->section_name,
1222 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001223 }
1224 insn = &prog->insns[relo->insn_idx];
1225 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001226 return 0;
1227}
1228
1229static int
Wang Nan9d759a92015-11-27 08:47:35 +00001230bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001231{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001232 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001233
1234 if (!prog || !prog->reloc_desc)
1235 return 0;
1236
1237 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001238 if (prog->reloc_desc[i].type == RELO_LD64) {
1239 struct bpf_insn *insns = prog->insns;
1240 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001241
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001242 insn_idx = prog->reloc_desc[i].insn_idx;
1243 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001244
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001245 if (insn_idx >= (int)prog->insns_cnt) {
1246 pr_warning("relocation out of range: '%s'\n",
1247 prog->section_name);
1248 return -LIBBPF_ERRNO__RELOC;
1249 }
1250 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1251 insns[insn_idx].imm = obj->maps[map_idx].fd;
1252 } else {
1253 err = bpf_program__reloc_text(prog, obj,
1254 &prog->reloc_desc[i]);
1255 if (err)
1256 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001257 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001258 }
1259
1260 zfree(&prog->reloc_desc);
1261 prog->nr_reloc = 0;
1262 return 0;
1263}
1264
1265
1266static int
1267bpf_object__relocate(struct bpf_object *obj)
1268{
1269 struct bpf_program *prog;
1270 size_t i;
1271 int err;
1272
1273 for (i = 0; i < obj->nr_programs; i++) {
1274 prog = &obj->programs[i];
1275
Wang Nan9d759a92015-11-27 08:47:35 +00001276 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001277 if (err) {
1278 pr_warning("failed to relocate '%s'\n",
1279 prog->section_name);
1280 return err;
1281 }
1282 }
1283 return 0;
1284}
1285
Wang Nan34090912015-07-01 02:14:02 +00001286static int bpf_object__collect_reloc(struct bpf_object *obj)
1287{
1288 int i, err;
1289
1290 if (!obj_elf_valid(obj)) {
1291 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001292 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001293 }
1294
1295 for (i = 0; i < obj->efile.nr_reloc; i++) {
1296 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1297 Elf_Data *data = obj->efile.reloc[i].data;
1298 int idx = shdr->sh_info;
1299 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001300
1301 if (shdr->sh_type != SHT_REL) {
1302 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001303 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001304 }
1305
1306 prog = bpf_object__find_prog_by_idx(obj, idx);
1307 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001308 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001309 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001310 }
1311
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001312 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001313 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001314 obj);
Wang Nan34090912015-07-01 02:14:02 +00001315 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001316 return err;
Wang Nan34090912015-07-01 02:14:02 +00001317 }
1318 return 0;
1319}
1320
Wang Nan55cffde2015-07-01 02:14:07 +00001321static int
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001322load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1323 const char *name, struct bpf_insn *insns, int insns_cnt,
Yonghong Song438363c2018-10-09 16:14:47 -07001324 char *license, __u32 kern_version, int *pfd, int prog_ifindex)
Wang Nan55cffde2015-07-01 02:14:07 +00001325{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001326 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001327 char *cp, errmsg[STRERR_BUFSIZE];
Wang Nan55cffde2015-07-01 02:14:07 +00001328 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001329 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001330
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001331 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1332 load_attr.prog_type = type;
1333 load_attr.expected_attach_type = expected_attach_type;
1334 load_attr.name = name;
1335 load_attr.insns = insns;
1336 load_attr.insns_cnt = insns_cnt;
1337 load_attr.license = license;
1338 load_attr.kern_version = kern_version;
David Beckettf0307a72018-05-16 14:02:49 -07001339 load_attr.prog_ifindex = prog_ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001340
1341 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001342 return -EINVAL;
1343
1344 log_buf = malloc(BPF_LOG_BUF_SIZE);
1345 if (!log_buf)
1346 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1347
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001348 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001349
1350 if (ret >= 0) {
1351 *pfd = ret;
1352 ret = 0;
1353 goto out;
1354 }
1355
Wang Nan6371ca3b2015-11-06 13:49:37 +00001356 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001357 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001358 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00001359
Wang Nan6371ca3b2015-11-06 13:49:37 +00001360 if (log_buf && log_buf[0] != '\0') {
1361 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001362 pr_warning("-- BEGIN DUMP LOG ---\n");
1363 pr_warning("\n%s\n", log_buf);
1364 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001365 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1366 pr_warning("Program too large (%zu insns), at most %d insns\n",
1367 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001368 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001369 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001370 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001371 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001372 int fd;
1373
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001374 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1375 load_attr.expected_attach_type = 0;
1376 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001377 if (fd >= 0) {
1378 close(fd);
1379 ret = -LIBBPF_ERRNO__PROGTYPE;
1380 goto out;
1381 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001382 }
Wang Nan705fa212016-07-13 10:44:02 +00001383
1384 if (log_buf)
1385 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001386 }
1387
1388out:
1389 free(log_buf);
1390 return ret;
1391}
1392
Joe Stringer29cd77f2018-10-02 13:35:39 -07001393int
Wang Nan55cffde2015-07-01 02:14:07 +00001394bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07001395 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00001396{
Wang Nanb5805632015-11-16 12:10:09 +00001397 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001398
Wang Nanb5805632015-11-16 12:10:09 +00001399 if (prog->instances.nr < 0 || !prog->instances.fds) {
1400 if (prog->preprocessor) {
1401 pr_warning("Internal error: can't load program '%s'\n",
1402 prog->section_name);
1403 return -LIBBPF_ERRNO__INTERNAL;
1404 }
Wang Nan55cffde2015-07-01 02:14:07 +00001405
Wang Nanb5805632015-11-16 12:10:09 +00001406 prog->instances.fds = malloc(sizeof(int));
1407 if (!prog->instances.fds) {
1408 pr_warning("Not enough memory for BPF fds\n");
1409 return -ENOMEM;
1410 }
1411 prog->instances.nr = 1;
1412 prog->instances.fds[0] = -1;
1413 }
1414
1415 if (!prog->preprocessor) {
1416 if (prog->instances.nr != 1) {
1417 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1418 prog->section_name, prog->instances.nr);
1419 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001420 err = load_program(prog->type, prog->expected_attach_type,
1421 prog->name, prog->insns, prog->insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001422 license, kern_version, &fd,
1423 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001424 if (!err)
1425 prog->instances.fds[0] = fd;
1426 goto out;
1427 }
1428
1429 for (i = 0; i < prog->instances.nr; i++) {
1430 struct bpf_prog_prep_result result;
1431 bpf_program_prep_t preprocessor = prog->preprocessor;
1432
1433 bzero(&result, sizeof(result));
1434 err = preprocessor(prog, i, prog->insns,
1435 prog->insns_cnt, &result);
1436 if (err) {
1437 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1438 i, prog->section_name);
1439 goto out;
1440 }
1441
1442 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1443 pr_debug("Skip loading the %dth instance of program '%s'\n",
1444 i, prog->section_name);
1445 prog->instances.fds[i] = -1;
1446 if (result.pfd)
1447 *result.pfd = -1;
1448 continue;
1449 }
1450
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001451 err = load_program(prog->type, prog->expected_attach_type,
1452 prog->name, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001453 result.new_insn_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001454 license, kern_version, &fd,
1455 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001456
1457 if (err) {
1458 pr_warning("Loading the %dth instance of program '%s' failed\n",
1459 i, prog->section_name);
1460 goto out;
1461 }
1462
1463 if (result.pfd)
1464 *result.pfd = fd;
1465 prog->instances.fds[i] = fd;
1466 }
1467out:
Wang Nan55cffde2015-07-01 02:14:07 +00001468 if (err)
1469 pr_warning("failed to load program '%s'\n",
1470 prog->section_name);
1471 zfree(&prog->insns);
1472 prog->insns_cnt = 0;
1473 return err;
1474}
1475
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001476static bool bpf_program__is_function_storage(struct bpf_program *prog,
1477 struct bpf_object *obj)
1478{
1479 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1480}
1481
Wang Nan55cffde2015-07-01 02:14:07 +00001482static int
1483bpf_object__load_progs(struct bpf_object *obj)
1484{
1485 size_t i;
1486 int err;
1487
1488 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001489 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001490 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001491 err = bpf_program__load(&obj->programs[i],
1492 obj->license,
1493 obj->kern_version);
1494 if (err)
1495 return err;
1496 }
1497 return 0;
1498}
1499
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001500static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001501{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001502 switch (type) {
1503 case BPF_PROG_TYPE_SOCKET_FILTER:
1504 case BPF_PROG_TYPE_SCHED_CLS:
1505 case BPF_PROG_TYPE_SCHED_ACT:
1506 case BPF_PROG_TYPE_XDP:
1507 case BPF_PROG_TYPE_CGROUP_SKB:
1508 case BPF_PROG_TYPE_CGROUP_SOCK:
1509 case BPF_PROG_TYPE_LWT_IN:
1510 case BPF_PROG_TYPE_LWT_OUT:
1511 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001512 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001513 case BPF_PROG_TYPE_SOCK_OPS:
1514 case BPF_PROG_TYPE_SK_SKB:
1515 case BPF_PROG_TYPE_CGROUP_DEVICE:
1516 case BPF_PROG_TYPE_SK_MSG:
1517 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001518 case BPF_PROG_TYPE_LIRC_MODE2:
Martin KaFai Lau6bc8529c2018-08-08 01:01:30 -07001519 case BPF_PROG_TYPE_SK_REUSEPORT:
Petar Penkovc22fbae2018-09-14 07:46:20 -07001520 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001521 return false;
1522 case BPF_PROG_TYPE_UNSPEC:
1523 case BPF_PROG_TYPE_KPROBE:
1524 case BPF_PROG_TYPE_TRACEPOINT:
1525 case BPF_PROG_TYPE_PERF_EVENT:
1526 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1527 default:
1528 return true;
1529 }
1530}
1531
1532static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1533{
1534 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001535 pr_warning("%s doesn't provide kernel version\n",
1536 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001537 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001538 }
1539 return 0;
1540}
1541
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001542static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001543__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
John Fastabendc034a172018-10-15 11:19:55 -07001544 bool needs_kver, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001545{
1546 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001547 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001548
1549 if (elf_version(EV_CURRENT) == EV_NONE) {
1550 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001551 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001552 }
1553
Wang Nan6c956392015-07-01 02:13:54 +00001554 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001555 if (IS_ERR(obj))
1556 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001557
Wang Nan6371ca3b2015-11-06 13:49:37 +00001558 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1559 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
John Fastabendc034a172018-10-15 11:19:55 -07001560 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001561 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001562 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001563
1564 bpf_object__elf_finish(obj);
1565 return obj;
1566out:
1567 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001568 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001569}
1570
John Fastabendc034a172018-10-15 11:19:55 -07001571struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1572 int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001573{
1574 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001575 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001576 return NULL;
1577
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001578 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001579
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001580 return __bpf_object__open(attr->file, NULL, 0,
John Fastabendc034a172018-10-15 11:19:55 -07001581 bpf_prog_type__needs_kver(attr->prog_type),
1582 flags);
1583}
1584
1585struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1586{
1587 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001588}
1589
1590struct bpf_object *bpf_object__open(const char *path)
1591{
1592 struct bpf_object_open_attr attr = {
1593 .file = path,
1594 .prog_type = BPF_PROG_TYPE_UNSPEC,
1595 };
1596
1597 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00001598}
1599
1600struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001601 size_t obj_buf_sz,
1602 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001603{
Wang Nanacf860a2015-08-27 02:30:55 +00001604 char tmp_name[64];
1605
Wang Nan6c956392015-07-01 02:13:54 +00001606 /* param validation */
1607 if (!obj_buf || obj_buf_sz <= 0)
1608 return NULL;
1609
Wang Nanacf860a2015-08-27 02:30:55 +00001610 if (!name) {
1611 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1612 (unsigned long)obj_buf,
1613 (unsigned long)obj_buf_sz);
1614 tmp_name[sizeof(tmp_name) - 1] = '\0';
1615 name = tmp_name;
1616 }
1617 pr_debug("loading object '%s' from buffer\n",
1618 name);
Wang Nan6c956392015-07-01 02:13:54 +00001619
John Fastabendc034a172018-10-15 11:19:55 -07001620 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001621}
1622
Wang Nan52d33522015-07-01 02:14:04 +00001623int bpf_object__unload(struct bpf_object *obj)
1624{
1625 size_t i;
1626
1627 if (!obj)
1628 return -EINVAL;
1629
Wang Nan9d759a92015-11-27 08:47:35 +00001630 for (i = 0; i < obj->nr_maps; i++)
1631 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001632
Wang Nan55cffde2015-07-01 02:14:07 +00001633 for (i = 0; i < obj->nr_programs; i++)
1634 bpf_program__unload(&obj->programs[i]);
1635
Wang Nan52d33522015-07-01 02:14:04 +00001636 return 0;
1637}
1638
1639int bpf_object__load(struct bpf_object *obj)
1640{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001641 int err;
1642
Wang Nan52d33522015-07-01 02:14:04 +00001643 if (!obj)
1644 return -EINVAL;
1645
1646 if (obj->loaded) {
1647 pr_warning("object should not be loaded twice\n");
1648 return -EINVAL;
1649 }
1650
1651 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001652
1653 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1654 CHECK_ERR(bpf_object__relocate(obj), err, out);
1655 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001656
1657 return 0;
1658out:
1659 bpf_object__unload(obj);
1660 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001661 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001662}
1663
Joe Stringerf3675402017-01-26 13:19:56 -08001664static int check_path(const char *path)
1665{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001666 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001667 struct statfs st_fs;
1668 char *dname, *dir;
1669 int err = 0;
1670
1671 if (path == NULL)
1672 return -EINVAL;
1673
1674 dname = strdup(path);
1675 if (dname == NULL)
1676 return -ENOMEM;
1677
1678 dir = dirname(dname);
1679 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001680 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001681 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001682 err = -errno;
1683 }
1684 free(dname);
1685
1686 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1687 pr_warning("specified path %s is not on BPF FS\n", path);
1688 err = -EINVAL;
1689 }
1690
1691 return err;
1692}
1693
1694int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1695 int instance)
1696{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001697 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001698 int err;
1699
1700 err = check_path(path);
1701 if (err)
1702 return err;
1703
1704 if (prog == NULL) {
1705 pr_warning("invalid program pointer\n");
1706 return -EINVAL;
1707 }
1708
1709 if (instance < 0 || instance >= prog->instances.nr) {
1710 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1711 instance, prog->section_name, prog->instances.nr);
1712 return -EINVAL;
1713 }
1714
1715 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001716 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001717 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001718 return -errno;
1719 }
1720 pr_debug("pinned program '%s'\n", path);
1721
1722 return 0;
1723}
1724
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001725int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1726 int instance)
1727{
1728 int err;
1729
1730 err = check_path(path);
1731 if (err)
1732 return err;
1733
1734 if (prog == NULL) {
1735 pr_warning("invalid program pointer\n");
1736 return -EINVAL;
1737 }
1738
1739 if (instance < 0 || instance >= prog->instances.nr) {
1740 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1741 instance, prog->section_name, prog->instances.nr);
1742 return -EINVAL;
1743 }
1744
1745 err = unlink(path);
1746 if (err != 0)
1747 return -errno;
1748 pr_debug("unpinned program '%s'\n", path);
1749
1750 return 0;
1751}
1752
Joe Stringerf3675402017-01-26 13:19:56 -08001753static int make_dir(const char *path)
1754{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001755 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001756 int err = 0;
1757
1758 if (mkdir(path, 0700) && errno != EEXIST)
1759 err = -errno;
1760
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001761 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001762 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001763 pr_warning("failed to mkdir %s: %s\n", path, cp);
1764 }
Joe Stringerf3675402017-01-26 13:19:56 -08001765 return err;
1766}
1767
1768int bpf_program__pin(struct bpf_program *prog, const char *path)
1769{
1770 int i, err;
1771
1772 err = check_path(path);
1773 if (err)
1774 return err;
1775
1776 if (prog == NULL) {
1777 pr_warning("invalid program pointer\n");
1778 return -EINVAL;
1779 }
1780
1781 if (prog->instances.nr <= 0) {
1782 pr_warning("no instances of prog %s to pin\n",
1783 prog->section_name);
1784 return -EINVAL;
1785 }
1786
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08001787 if (prog->instances.nr == 1) {
1788 /* don't create subdirs when pinning single instance */
1789 return bpf_program__pin_instance(prog, path, 0);
1790 }
1791
Joe Stringerf3675402017-01-26 13:19:56 -08001792 err = make_dir(path);
1793 if (err)
1794 return err;
1795
1796 for (i = 0; i < prog->instances.nr; i++) {
1797 char buf[PATH_MAX];
1798 int len;
1799
1800 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001801 if (len < 0) {
1802 err = -EINVAL;
1803 goto err_unpin;
1804 } else if (len >= PATH_MAX) {
1805 err = -ENAMETOOLONG;
1806 goto err_unpin;
1807 }
1808
1809 err = bpf_program__pin_instance(prog, buf, i);
1810 if (err)
1811 goto err_unpin;
1812 }
1813
1814 return 0;
1815
1816err_unpin:
1817 for (i = i - 1; i >= 0; i--) {
1818 char buf[PATH_MAX];
1819 int len;
1820
1821 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1822 if (len < 0)
1823 continue;
1824 else if (len >= PATH_MAX)
1825 continue;
1826
1827 bpf_program__unpin_instance(prog, buf, i);
1828 }
1829
1830 rmdir(path);
1831
1832 return err;
1833}
1834
1835int bpf_program__unpin(struct bpf_program *prog, const char *path)
1836{
1837 int i, err;
1838
1839 err = check_path(path);
1840 if (err)
1841 return err;
1842
1843 if (prog == NULL) {
1844 pr_warning("invalid program pointer\n");
1845 return -EINVAL;
1846 }
1847
1848 if (prog->instances.nr <= 0) {
1849 pr_warning("no instances of prog %s to pin\n",
1850 prog->section_name);
1851 return -EINVAL;
1852 }
1853
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08001854 if (prog->instances.nr == 1) {
1855 /* don't create subdirs when pinning single instance */
1856 return bpf_program__unpin_instance(prog, path, 0);
1857 }
1858
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001859 for (i = 0; i < prog->instances.nr; i++) {
1860 char buf[PATH_MAX];
1861 int len;
1862
1863 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08001864 if (len < 0)
1865 return -EINVAL;
1866 else if (len >= PATH_MAX)
1867 return -ENAMETOOLONG;
1868
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001869 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08001870 if (err)
1871 return err;
1872 }
1873
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001874 err = rmdir(path);
1875 if (err)
1876 return -errno;
1877
Joe Stringerf3675402017-01-26 13:19:56 -08001878 return 0;
1879}
1880
Joe Stringerb6989f32017-01-26 13:19:57 -08001881int bpf_map__pin(struct bpf_map *map, const char *path)
1882{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001883 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08001884 int err;
1885
1886 err = check_path(path);
1887 if (err)
1888 return err;
1889
1890 if (map == NULL) {
1891 pr_warning("invalid map pointer\n");
1892 return -EINVAL;
1893 }
1894
1895 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001896 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001897 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08001898 return -errno;
1899 }
1900
1901 pr_debug("pinned map '%s'\n", path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001902
Joe Stringerb6989f32017-01-26 13:19:57 -08001903 return 0;
1904}
1905
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001906int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08001907{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001908 int err;
1909
1910 err = check_path(path);
1911 if (err)
1912 return err;
1913
1914 if (map == NULL) {
1915 pr_warning("invalid map pointer\n");
1916 return -EINVAL;
1917 }
1918
1919 err = unlink(path);
1920 if (err != 0)
1921 return -errno;
1922 pr_debug("unpinned map '%s'\n", path);
1923
1924 return 0;
1925}
1926
1927int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
1928{
Joe Stringerd5148d82017-01-26 13:19:58 -08001929 struct bpf_map *map;
1930 int err;
1931
1932 if (!obj)
1933 return -ENOENT;
1934
1935 if (!obj->loaded) {
1936 pr_warning("object not yet loaded; load it first\n");
1937 return -ENOENT;
1938 }
1939
1940 err = make_dir(path);
1941 if (err)
1942 return err;
1943
1944 bpf_map__for_each(map, obj) {
1945 char buf[PATH_MAX];
1946 int len;
1947
1948 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1949 bpf_map__name(map));
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001950 if (len < 0) {
1951 err = -EINVAL;
1952 goto err_unpin_maps;
1953 } else if (len >= PATH_MAX) {
1954 err = -ENAMETOOLONG;
1955 goto err_unpin_maps;
1956 }
1957
1958 err = bpf_map__pin(map, buf);
1959 if (err)
1960 goto err_unpin_maps;
1961 }
1962
1963 return 0;
1964
1965err_unpin_maps:
1966 while ((map = bpf_map__prev(map, obj))) {
1967 char buf[PATH_MAX];
1968 int len;
1969
1970 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1971 bpf_map__name(map));
1972 if (len < 0)
1973 continue;
1974 else if (len >= PATH_MAX)
1975 continue;
1976
1977 bpf_map__unpin(map, buf);
1978 }
1979
1980 return err;
1981}
1982
1983int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
1984{
1985 struct bpf_map *map;
1986 int err;
1987
1988 if (!obj)
1989 return -ENOENT;
1990
1991 bpf_map__for_each(map, obj) {
1992 char buf[PATH_MAX];
1993 int len;
1994
1995 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1996 bpf_map__name(map));
Joe Stringerd5148d82017-01-26 13:19:58 -08001997 if (len < 0)
1998 return -EINVAL;
1999 else if (len >= PATH_MAX)
2000 return -ENAMETOOLONG;
2001
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002002 err = bpf_map__unpin(map, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002003 if (err)
2004 return err;
2005 }
2006
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002007 return 0;
2008}
2009
2010int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2011{
2012 struct bpf_program *prog;
2013 int err;
2014
2015 if (!obj)
2016 return -ENOENT;
2017
2018 if (!obj->loaded) {
2019 pr_warning("object not yet loaded; load it first\n");
2020 return -ENOENT;
2021 }
2022
2023 err = make_dir(path);
2024 if (err)
2025 return err;
2026
2027 bpf_object__for_each_program(prog, obj) {
2028 char buf[PATH_MAX];
2029 int len;
2030
2031 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002032 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002033 if (len < 0) {
2034 err = -EINVAL;
2035 goto err_unpin_programs;
2036 } else if (len >= PATH_MAX) {
2037 err = -ENAMETOOLONG;
2038 goto err_unpin_programs;
2039 }
2040
2041 err = bpf_program__pin(prog, buf);
2042 if (err)
2043 goto err_unpin_programs;
2044 }
2045
2046 return 0;
2047
2048err_unpin_programs:
2049 while ((prog = bpf_program__prev(prog, obj))) {
2050 char buf[PATH_MAX];
2051 int len;
2052
2053 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002054 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002055 if (len < 0)
2056 continue;
2057 else if (len >= PATH_MAX)
2058 continue;
2059
2060 bpf_program__unpin(prog, buf);
2061 }
2062
2063 return err;
2064}
2065
2066int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2067{
2068 struct bpf_program *prog;
2069 int err;
2070
2071 if (!obj)
2072 return -ENOENT;
2073
Joe Stringerd5148d82017-01-26 13:19:58 -08002074 bpf_object__for_each_program(prog, obj) {
2075 char buf[PATH_MAX];
2076 int len;
2077
2078 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002079 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08002080 if (len < 0)
2081 return -EINVAL;
2082 else if (len >= PATH_MAX)
2083 return -ENAMETOOLONG;
2084
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002085 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002086 if (err)
2087 return err;
2088 }
2089
2090 return 0;
2091}
2092
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002093int bpf_object__pin(struct bpf_object *obj, const char *path)
2094{
2095 int err;
2096
2097 err = bpf_object__pin_maps(obj, path);
2098 if (err)
2099 return err;
2100
2101 err = bpf_object__pin_programs(obj, path);
2102 if (err) {
2103 bpf_object__unpin_maps(obj, path);
2104 return err;
2105 }
2106
2107 return 0;
2108}
2109
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002110void bpf_object__close(struct bpf_object *obj)
2111{
Wang Nana5b8bd42015-07-01 02:14:00 +00002112 size_t i;
2113
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002114 if (!obj)
2115 return;
2116
Wang Nan10931d22016-11-26 07:03:26 +00002117 if (obj->clear_priv)
2118 obj->clear_priv(obj, obj->priv);
2119
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002120 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00002121 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002122 btf__free(obj->btf);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002123
Wang Nan9d759a92015-11-27 08:47:35 +00002124 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00002125 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00002126 if (obj->maps[i].clear_priv)
2127 obj->maps[i].clear_priv(&obj->maps[i],
2128 obj->maps[i].priv);
2129 obj->maps[i].priv = NULL;
2130 obj->maps[i].clear_priv = NULL;
2131 }
2132 zfree(&obj->maps);
2133 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00002134
2135 if (obj->programs && obj->nr_programs) {
2136 for (i = 0; i < obj->nr_programs; i++)
2137 bpf_program__exit(&obj->programs[i]);
2138 }
2139 zfree(&obj->programs);
2140
Wang Nan9a208ef2015-07-01 02:14:10 +00002141 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002142 free(obj);
2143}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002144
Wang Nan9a208ef2015-07-01 02:14:10 +00002145struct bpf_object *
2146bpf_object__next(struct bpf_object *prev)
2147{
2148 struct bpf_object *next;
2149
2150 if (!prev)
2151 next = list_first_entry(&bpf_objects_list,
2152 struct bpf_object,
2153 list);
2154 else
2155 next = list_next_entry(prev, list);
2156
2157 /* Empty list is noticed here so don't need checking on entry. */
2158 if (&next->list == &bpf_objects_list)
2159 return NULL;
2160
2161 return next;
2162}
2163
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002164const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00002165{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002166 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00002167}
2168
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002169unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00002170{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002171 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00002172}
2173
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002174int bpf_object__btf_fd(const struct bpf_object *obj)
2175{
2176 return obj->btf ? btf__fd(obj->btf) : -1;
2177}
2178
Wang Nan10931d22016-11-26 07:03:26 +00002179int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2180 bpf_object_clear_priv_t clear_priv)
2181{
2182 if (obj->priv && obj->clear_priv)
2183 obj->clear_priv(obj, obj->priv);
2184
2185 obj->priv = priv;
2186 obj->clear_priv = clear_priv;
2187 return 0;
2188}
2189
2190void *bpf_object__priv(struct bpf_object *obj)
2191{
2192 return obj ? obj->priv : ERR_PTR(-EINVAL);
2193}
2194
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002195static struct bpf_program *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002196__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, int i)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002197{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002198 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002199
2200 if (!obj->programs)
2201 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002202
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002203 if (p->obj != obj) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002204 pr_warning("error: program handler doesn't match object\n");
2205 return NULL;
2206 }
2207
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002208 idx = (p - obj->programs) + i;
2209 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002210 return NULL;
2211 return &obj->programs[idx];
2212}
2213
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002214struct bpf_program *
2215bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2216{
2217 struct bpf_program *prog = prev;
2218
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002219 if (prev == NULL)
2220 return obj->programs;
2221
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002222 do {
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002223 prog = __bpf_program__iter(prog, obj, 1);
2224 } while (prog && bpf_program__is_function_storage(prog, obj));
2225
2226 return prog;
2227}
2228
2229struct bpf_program *
2230bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2231{
2232 struct bpf_program *prog = next;
2233
2234 if (next == NULL) {
2235 if (!obj->nr_programs)
2236 return NULL;
2237 return obj->programs + obj->nr_programs - 1;
2238 }
2239
2240 do {
2241 prog = __bpf_program__iter(prog, obj, -1);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002242 } while (prog && bpf_program__is_function_storage(prog, obj));
2243
2244 return prog;
2245}
2246
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002247int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2248 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002249{
2250 if (prog->priv && prog->clear_priv)
2251 prog->clear_priv(prog, prog->priv);
2252
2253 prog->priv = priv;
2254 prog->clear_priv = clear_priv;
2255 return 0;
2256}
2257
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002258void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002259{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002260 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002261}
2262
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002263void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2264{
2265 prog->prog_ifindex = ifindex;
2266}
2267
Namhyung Kim715f8db2015-11-03 20:21:05 +09002268const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002269{
2270 const char *title;
2271
2272 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09002273 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002274 title = strdup(title);
2275 if (!title) {
2276 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00002277 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002278 }
2279 }
2280
2281 return title;
2282}
2283
2284int bpf_program__fd(struct bpf_program *prog)
2285{
Wang Nanb5805632015-11-16 12:10:09 +00002286 return bpf_program__nth_fd(prog, 0);
2287}
2288
2289int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2290 bpf_program_prep_t prep)
2291{
2292 int *instances_fds;
2293
2294 if (nr_instances <= 0 || !prep)
2295 return -EINVAL;
2296
2297 if (prog->instances.nr > 0 || prog->instances.fds) {
2298 pr_warning("Can't set pre-processor after loading\n");
2299 return -EINVAL;
2300 }
2301
2302 instances_fds = malloc(sizeof(int) * nr_instances);
2303 if (!instances_fds) {
2304 pr_warning("alloc memory failed for fds\n");
2305 return -ENOMEM;
2306 }
2307
2308 /* fill all fd with -1 */
2309 memset(instances_fds, -1, sizeof(int) * nr_instances);
2310
2311 prog->instances.nr = nr_instances;
2312 prog->instances.fds = instances_fds;
2313 prog->preprocessor = prep;
2314 return 0;
2315}
2316
2317int bpf_program__nth_fd(struct bpf_program *prog, int n)
2318{
2319 int fd;
2320
Jakub Kicinski1e960042018-07-26 14:32:18 -07002321 if (!prog)
2322 return -EINVAL;
2323
Wang Nanb5805632015-11-16 12:10:09 +00002324 if (n >= prog->instances.nr || n < 0) {
2325 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2326 n, prog->section_name, prog->instances.nr);
2327 return -EINVAL;
2328 }
2329
2330 fd = prog->instances.fds[n];
2331 if (fd < 0) {
2332 pr_warning("%dth instance of program '%s' is invalid\n",
2333 n, prog->section_name);
2334 return -ENOENT;
2335 }
2336
2337 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002338}
Wang Nan9d759a92015-11-27 08:47:35 +00002339
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002340void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002341{
2342 prog->type = type;
2343}
2344
Wang Nan5f44e4c82016-07-13 10:44:01 +00002345static bool bpf_program__is_type(struct bpf_program *prog,
2346 enum bpf_prog_type type)
2347{
2348 return prog ? (prog->type == type) : false;
2349}
2350
Joe Stringered794072017-01-22 17:11:23 -08002351#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2352int bpf_program__set_##NAME(struct bpf_program *prog) \
2353{ \
2354 if (!prog) \
2355 return -EINVAL; \
2356 bpf_program__set_type(prog, TYPE); \
2357 return 0; \
2358} \
2359 \
2360bool bpf_program__is_##NAME(struct bpf_program *prog) \
2361{ \
2362 return bpf_program__is_type(prog, TYPE); \
2363} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002364
Joe Stringer7803ba72017-01-22 17:11:24 -08002365BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002366BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002367BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2368BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002369BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002370BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002371BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2372BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002373
John Fastabend16962b22018-04-23 14:30:38 -07002374void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2375 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002376{
2377 prog->expected_attach_type = type;
2378}
2379
Andrey Ignatov36153532018-10-31 12:57:18 -07002380#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2381 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002382
Andrey Ignatov956b6202018-09-26 15:24:53 -07002383/* Programs that can NOT be attached. */
Andrey Ignatov36153532018-10-31 12:57:18 -07002384#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002385
Andrey Ignatov956b6202018-09-26 15:24:53 -07002386/* Programs that can be attached. */
2387#define BPF_APROG_SEC(string, ptype, atype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002388 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07002389
Andrey Ignatov956b6202018-09-26 15:24:53 -07002390/* Programs that must specify expected attach type at load time. */
2391#define BPF_EAPROG_SEC(string, ptype, eatype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002392 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002393
2394/* Programs that can be attached but attach type can't be identified by section
2395 * name. Kept for backward compatibility.
2396 */
2397#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002398
Roman Gushchin583c9002017-12-13 15:18:51 +00002399static const struct {
2400 const char *sec;
2401 size_t len;
2402 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002403 enum bpf_attach_type expected_attach_type;
Andrey Ignatov36153532018-10-31 12:57:18 -07002404 int is_attachable;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002405 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002406} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07002407 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2408 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2409 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2410 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2411 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2412 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2413 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2414 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2415 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2416 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2417 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2418 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2419 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07002420 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2421 BPF_CGROUP_INET_INGRESS),
2422 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2423 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002424 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2425 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2426 BPF_CGROUP_INET_SOCK_CREATE),
2427 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2428 BPF_CGROUP_INET4_POST_BIND),
2429 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2430 BPF_CGROUP_INET6_POST_BIND),
2431 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2432 BPF_CGROUP_DEVICE),
2433 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2434 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07002435 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2436 BPF_SK_SKB_STREAM_PARSER),
2437 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2438 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002439 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2440 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2441 BPF_SK_MSG_VERDICT),
2442 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2443 BPF_LIRC_MODE2),
2444 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2445 BPF_FLOW_DISSECTOR),
2446 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2447 BPF_CGROUP_INET4_BIND),
2448 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2449 BPF_CGROUP_INET6_BIND),
2450 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2451 BPF_CGROUP_INET4_CONNECT),
2452 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2453 BPF_CGROUP_INET6_CONNECT),
2454 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2455 BPF_CGROUP_UDP4_SENDMSG),
2456 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2457 BPF_CGROUP_UDP6_SENDMSG),
Roman Gushchin583c9002017-12-13 15:18:51 +00002458};
Roman Gushchin583c9002017-12-13 15:18:51 +00002459
Andrey Ignatov956b6202018-09-26 15:24:53 -07002460#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002461#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07002462#undef BPF_APROG_SEC
2463#undef BPF_EAPROG_SEC
2464#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002465
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002466int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2467 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00002468{
2469 int i;
2470
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002471 if (!name)
2472 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00002473
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002474 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2475 if (strncmp(name, section_names[i].sec, section_names[i].len))
2476 continue;
2477 *prog_type = section_names[i].prog_type;
2478 *expected_attach_type = section_names[i].expected_attach_type;
2479 return 0;
2480 }
2481 return -EINVAL;
2482}
Roman Gushchin583c9002017-12-13 15:18:51 +00002483
Andrey Ignatov956b6202018-09-26 15:24:53 -07002484int libbpf_attach_type_by_name(const char *name,
2485 enum bpf_attach_type *attach_type)
2486{
2487 int i;
2488
2489 if (!name)
2490 return -EINVAL;
2491
2492 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2493 if (strncmp(name, section_names[i].sec, section_names[i].len))
2494 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07002495 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002496 return -EINVAL;
2497 *attach_type = section_names[i].attach_type;
2498 return 0;
2499 }
2500 return -EINVAL;
2501}
2502
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002503static int
2504bpf_program__identify_section(struct bpf_program *prog,
2505 enum bpf_prog_type *prog_type,
2506 enum bpf_attach_type *expected_attach_type)
2507{
2508 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2509 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00002510}
2511
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002512int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002513{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002514 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002515}
2516
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002517const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002518{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002519 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002520}
2521
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002522const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002523{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002524 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002525}
2526
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002527__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002528{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002529 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002530}
2531
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002532__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002533{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002534 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002535}
2536
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002537int bpf_map__set_priv(struct bpf_map *map, void *priv,
2538 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002539{
2540 if (!map)
2541 return -EINVAL;
2542
2543 if (map->priv) {
2544 if (map->clear_priv)
2545 map->clear_priv(map, map->priv);
2546 }
2547
2548 map->priv = priv;
2549 map->clear_priv = clear_priv;
2550 return 0;
2551}
2552
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002553void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002554{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002555 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002556}
2557
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002558bool bpf_map__is_offload_neutral(struct bpf_map *map)
2559{
2560 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2561}
2562
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002563void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2564{
2565 map->map_ifindex = ifindex;
2566}
2567
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002568static struct bpf_map *
2569__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00002570{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002571 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00002572 struct bpf_map *s, *e;
2573
2574 if (!obj || !obj->maps)
2575 return NULL;
2576
2577 s = obj->maps;
2578 e = obj->maps + obj->nr_maps;
2579
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002580 if ((m < s) || (m >= e)) {
Wang Nan9d759a92015-11-27 08:47:35 +00002581 pr_warning("error in %s: map handler doesn't belong to object\n",
2582 __func__);
2583 return NULL;
2584 }
2585
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002586 idx = (m - obj->maps) + i;
2587 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00002588 return NULL;
2589 return &obj->maps[idx];
2590}
Wang Nan561bbcc2015-11-27 08:47:36 +00002591
2592struct bpf_map *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002593bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2594{
2595 if (prev == NULL)
2596 return obj->maps;
2597
2598 return __bpf_map__iter(prev, obj, 1);
2599}
2600
2601struct bpf_map *
2602bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2603{
2604 if (next == NULL) {
2605 if (!obj->nr_maps)
2606 return NULL;
2607 return obj->maps + obj->nr_maps - 1;
2608 }
2609
2610 return __bpf_map__iter(next, obj, -1);
2611}
2612
2613struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002614bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002615{
2616 struct bpf_map *pos;
2617
2618 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002619 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002620 return pos;
2621 }
2622 return NULL;
2623}
Wang Nan5a6acad2016-11-26 07:03:27 +00002624
2625struct bpf_map *
2626bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2627{
2628 int i;
2629
2630 for (i = 0; i < obj->nr_maps; i++) {
2631 if (obj->maps[i].offset == offset)
2632 return &obj->maps[i];
2633 }
2634 return ERR_PTR(-ENOENT);
2635}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002636
2637long libbpf_get_error(const void *ptr)
2638{
2639 if (IS_ERR(ptr))
2640 return PTR_ERR(ptr);
2641 return 0;
2642}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002643
2644int bpf_prog_load(const char *file, enum bpf_prog_type type,
2645 struct bpf_object **pobj, int *prog_fd)
2646{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002647 struct bpf_prog_load_attr attr;
2648
2649 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2650 attr.file = file;
2651 attr.prog_type = type;
2652 attr.expected_attach_type = 0;
2653
2654 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2655}
2656
2657int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2658 struct bpf_object **pobj, int *prog_fd)
2659{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002660 struct bpf_object_open_attr open_attr = {
2661 .file = attr->file,
2662 .prog_type = attr->prog_type,
2663 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002664 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002665 enum bpf_attach_type expected_attach_type;
2666 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002667 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002668 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002669 int err;
2670
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002671 if (!attr)
2672 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002673 if (!attr->file)
2674 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002675
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002676 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07002677 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002678 return -ENOENT;
2679
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002680 bpf_object__for_each_program(prog, obj) {
2681 /*
2682 * If type is not specified, try to guess it based on
2683 * section name.
2684 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002685 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002686 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002687 expected_attach_type = attr->expected_attach_type;
2688 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002689 err = bpf_program__identify_section(prog, &prog_type,
2690 &expected_attach_type);
2691 if (err < 0) {
2692 pr_warning("failed to guess program type based on section name %s\n",
2693 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002694 bpf_object__close(obj);
2695 return -EINVAL;
2696 }
2697 }
2698
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002699 bpf_program__set_type(prog, prog_type);
2700 bpf_program__set_expected_attach_type(prog,
2701 expected_attach_type);
2702
Taeung Song69495d22018-09-03 08:30:07 +09002703 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002704 first_prog = prog;
2705 }
2706
David Beckettf0307a72018-05-16 14:02:49 -07002707 bpf_map__for_each(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002708 if (!bpf_map__is_offload_neutral(map))
2709 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07002710 }
2711
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002712 if (!first_prog) {
2713 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002714 bpf_object__close(obj);
2715 return -ENOENT;
2716 }
2717
John Fastabend6f6d33f2017-08-15 22:34:22 -07002718 err = bpf_object__load(obj);
2719 if (err) {
2720 bpf_object__close(obj);
2721 return -EINVAL;
2722 }
2723
2724 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002725 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002726 return 0;
2727}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002728
2729enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002730bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2731 void **copy_mem, size_t *copy_size,
2732 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002733{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002734 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002735 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002736 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002737 void *base = ((__u8 *)header) + page_size;
2738 int ret = LIBBPF_PERF_EVENT_CONT;
2739 struct perf_event_header *ehdr;
2740 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002741
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002742 while (data_head != data_tail) {
2743 ehdr = base + (data_tail & (mmap_size - 1));
2744 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002745
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002746 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2747 void *copy_start = ehdr;
2748 size_t len_first = base + mmap_size - copy_start;
2749 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002750
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002751 if (*copy_size < ehdr_size) {
2752 free(*copy_mem);
2753 *copy_mem = malloc(ehdr_size);
2754 if (!*copy_mem) {
2755 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002756 ret = LIBBPF_PERF_EVENT_ERROR;
2757 break;
2758 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002759 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002760 }
2761
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002762 memcpy(*copy_mem, copy_start, len_first);
2763 memcpy(*copy_mem + len_first, base, len_secnd);
2764 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002765 }
2766
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002767 ret = fn(ehdr, private_data);
2768 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002769 if (ret != LIBBPF_PERF_EVENT_CONT)
2770 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002771 }
2772
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002773 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002774 return ret;
2775}