blob: 4ea3368bf8037acda8607f2c8799d6ea24f2535b [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000010 */
11
Yonghong Songb4269952018-11-29 15:31:45 -080012#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070013#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080014#endif
Wang Nan1b76c132015-07-01 02:13:51 +000015#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000016#include <stdio.h>
17#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080018#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000019#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000020#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000021#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000022#include <fcntl.h>
23#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000024#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080025#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000026#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000027#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070028#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080029#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000030#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080031#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070032#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020033#include <linux/ring_buffer.h>
Joe Stringerf3675402017-01-26 13:19:56 -080034#include <sys/stat.h>
35#include <sys/types.h>
36#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070037#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000038#include <libelf.h>
39#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000040
41#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000042#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070043#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030044#include "str_error.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000045
Wang Nan9b161372016-07-18 06:01:08 +000046#ifndef EM_BPF
47#define EM_BPF 247
48#endif
49
Joe Stringerf3675402017-01-26 13:19:56 -080050#ifndef BPF_FS_MAGIC
51#define BPF_FS_MAGIC 0xcafe4a11
52#endif
53
Wang Nanb3f59d62015-07-01 02:13:52 +000054#define __printf(a, b) __attribute__((format(printf, a, b)))
55
56__printf(1, 2)
57static int __base_pr(const char *format, ...)
58{
59 va_list args;
60 int err;
61
62 va_start(args, format);
63 err = vfprintf(stderr, format, args);
64 va_end(args);
65 return err;
66}
67
68static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
69static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
70static __printf(1, 2) libbpf_print_fn_t __pr_debug;
71
72#define __pr(func, fmt, ...) \
73do { \
74 if ((func)) \
75 (func)("libbpf: " fmt, ##__VA_ARGS__); \
76} while (0)
77
78#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
79#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
80#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
81
82void libbpf_set_print(libbpf_print_fn_t warn,
83 libbpf_print_fn_t info,
84 libbpf_print_fn_t debug)
85{
86 __pr_warning = warn;
87 __pr_info = info;
88 __pr_debug = debug;
89}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000090
Wang Nan6371ca3b2015-11-06 13:49:37 +000091#define STRERR_BUFSIZE 128
92
Wang Nan6371ca3b2015-11-06 13:49:37 +000093#define CHECK_ERR(action, err, out) do { \
94 err = action; \
95 if (err) \
96 goto out; \
97} while(0)
98
99
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000100/* Copied from tools/perf/util/util.h */
101#ifndef zfree
102# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
103#endif
104
105#ifndef zclose
106# define zclose(fd) ({ \
107 int ___err = 0; \
108 if ((fd) >= 0) \
109 ___err = close((fd)); \
110 fd = -1; \
111 ___err; })
112#endif
113
114#ifdef HAVE_LIBELF_MMAP_SUPPORT
115# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
116#else
117# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
118#endif
119
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800120struct bpf_capabilities {
121 /* v4.14: kernel support for program & map names. */
122 __u32 name:1;
123};
124
Wang Nana5b8bd42015-07-01 02:14:00 +0000125/*
126 * bpf_prog should be a better name but it has been used in
127 * linux/filter.h.
128 */
129struct bpf_program {
130 /* Index in elf obj file, for relocation use. */
131 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700132 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700133 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000134 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800135 /* section_name with / replaced by _; makes recursive pinning
136 * in bpf_object__pin_programs easier
137 */
138 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000139 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800140 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000141 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000142
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800143 struct reloc_desc {
144 enum {
145 RELO_LD64,
146 RELO_CALL,
147 } type;
Wang Nan34090912015-07-01 02:14:02 +0000148 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800149 union {
150 int map_idx;
151 int text_off;
152 };
Wang Nan34090912015-07-01 02:14:02 +0000153 } *reloc_desc;
154 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000155
Wang Nanb5805632015-11-16 12:10:09 +0000156 struct {
157 int nr;
158 int *fds;
159 } instances;
160 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000161
162 struct bpf_object *obj;
163 void *priv;
164 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700165
166 enum bpf_attach_type expected_attach_type;
Yonghong Song2993e052018-11-19 15:29:16 -0800167 int btf_fd;
168 void *func_info;
169 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800170 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800171
172 struct bpf_capabilities *caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000173};
174
Wang Nan9d759a92015-11-27 08:47:35 +0000175struct bpf_map {
176 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000177 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000178 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700179 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800180 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000181 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700182 __u32 btf_key_type_id;
183 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000184 void *priv;
185 bpf_map_clear_priv_t clear_priv;
186};
187
Wang Nan9a208ef2015-07-01 02:14:10 +0000188static LIST_HEAD(bpf_objects_list);
189
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000190struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000191 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700192 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000193
Wang Nana5b8bd42015-07-01 02:14:00 +0000194 struct bpf_program *programs;
195 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000196 struct bpf_map *maps;
197 size_t nr_maps;
198
Wang Nan52d33522015-07-01 02:14:04 +0000199 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700200 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000201
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000202 /*
203 * Information when doing elf related work. Only valid if fd
204 * is valid.
205 */
206 struct {
207 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000208 void *obj_buf;
209 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000210 Elf *elf;
211 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000212 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000213 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000214 struct {
215 GElf_Shdr shdr;
216 Elf_Data *data;
217 } *reloc;
218 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000219 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800220 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000221 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000222 /*
223 * All loaded bpf_object is linked in a list, which is
224 * hidden to caller. bpf_objects__<func> handlers deal with
225 * all objects.
226 */
227 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000228
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700229 struct btf *btf;
Yonghong Song2993e052018-11-19 15:29:16 -0800230 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700231
Wang Nan10931d22016-11-26 07:03:26 +0000232 void *priv;
233 bpf_object_clear_priv_t clear_priv;
234
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800235 struct bpf_capabilities caps;
236
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000237 char path[];
238};
239#define obj_elf_valid(o) ((o)->efile.elf)
240
Joe Stringer29cd77f2018-10-02 13:35:39 -0700241void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000242{
Wang Nanb5805632015-11-16 12:10:09 +0000243 int i;
244
Wang Nan55cffde2015-07-01 02:14:07 +0000245 if (!prog)
246 return;
247
Wang Nanb5805632015-11-16 12:10:09 +0000248 /*
249 * If the object is opened but the program was never loaded,
250 * it is possible that prog->instances.nr == -1.
251 */
252 if (prog->instances.nr > 0) {
253 for (i = 0; i < prog->instances.nr; i++)
254 zclose(prog->instances.fds[i]);
255 } else if (prog->instances.nr != -1) {
256 pr_warning("Internal error: instances.nr is %d\n",
257 prog->instances.nr);
258 }
259
260 prog->instances.nr = -1;
261 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800262
263 zclose(prog->btf_fd);
264 zfree(&prog->func_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000265}
266
Wang Nana5b8bd42015-07-01 02:14:00 +0000267static void bpf_program__exit(struct bpf_program *prog)
268{
269 if (!prog)
270 return;
271
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000272 if (prog->clear_priv)
273 prog->clear_priv(prog, prog->priv);
274
275 prog->priv = NULL;
276 prog->clear_priv = NULL;
277
Wang Nan55cffde2015-07-01 02:14:07 +0000278 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700279 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000280 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800281 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000282 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000283 zfree(&prog->reloc_desc);
284
285 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000286 prog->insns_cnt = 0;
287 prog->idx = -1;
288}
289
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800290static char *__bpf_program__pin_name(struct bpf_program *prog)
291{
292 char *name, *p;
293
294 name = p = strdup(prog->section_name);
295 while ((p = strchr(p, '/')))
296 *p = '_';
297
298 return name;
299}
300
Wang Nana5b8bd42015-07-01 02:14:00 +0000301static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700302bpf_program__init(void *data, size_t size, char *section_name, int idx,
303 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000304{
305 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700306 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000307 return -EINVAL;
308 }
309
310 bzero(prog, sizeof(*prog));
311
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700312 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000313 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100314 pr_warning("failed to alloc name for prog under section(%d) %s\n",
315 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000316 goto errout;
317 }
318
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800319 prog->pin_name = __bpf_program__pin_name(prog);
320 if (!prog->pin_name) {
321 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
322 idx, section_name);
323 goto errout;
324 }
325
Wang Nana5b8bd42015-07-01 02:14:00 +0000326 prog->insns = malloc(size);
327 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700328 pr_warning("failed to alloc insns for prog under section %s\n",
329 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000330 goto errout;
331 }
332 prog->insns_cnt = size / sizeof(struct bpf_insn);
333 memcpy(prog->insns, data,
334 prog->insns_cnt * sizeof(struct bpf_insn));
335 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000336 prog->instances.fds = NULL;
337 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800338 prog->type = BPF_PROG_TYPE_UNSPEC;
Yonghong Song2993e052018-11-19 15:29:16 -0800339 prog->btf_fd = -1;
Wang Nana5b8bd42015-07-01 02:14:00 +0000340
341 return 0;
342errout:
343 bpf_program__exit(prog);
344 return -ENOMEM;
345}
346
347static int
348bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700349 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000350{
351 struct bpf_program prog, *progs;
352 int nr_progs, err;
353
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700354 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000355 if (err)
356 return err;
357
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800358 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000359 progs = obj->programs;
360 nr_progs = obj->nr_programs;
361
Jakub Kicinski531b0142018-07-10 14:43:05 -0700362 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000363 if (!progs) {
364 /*
365 * In this case the original obj->programs
366 * is still valid, so don't need special treat for
367 * bpf_close_object().
368 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700369 pr_warning("failed to alloc a new program under section '%s'\n",
370 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000371 bpf_program__exit(&prog);
372 return -ENOMEM;
373 }
374
375 pr_debug("found program %s\n", prog.section_name);
376 obj->programs = progs;
377 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000378 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000379 progs[nr_progs] = prog;
380 return 0;
381}
382
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700383static int
384bpf_object__init_prog_names(struct bpf_object *obj)
385{
386 Elf_Data *symbols = obj->efile.symbols;
387 struct bpf_program *prog;
388 size_t pi, si;
389
390 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800391 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700392
393 prog = &obj->programs[pi];
394
395 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
396 si++) {
397 GElf_Sym sym;
398
399 if (!gelf_getsym(symbols, si, &sym))
400 continue;
401 if (sym.st_shndx != prog->idx)
402 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000403 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
404 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700405
406 name = elf_strptr(obj->efile.elf,
407 obj->efile.strtabidx,
408 sym.st_name);
409 if (!name) {
410 pr_warning("failed to get sym name string for prog %s\n",
411 prog->section_name);
412 return -LIBBPF_ERRNO__LIBELF;
413 }
414 }
415
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700416 if (!name && prog->idx == obj->efile.text_shndx)
417 name = ".text";
418
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700419 if (!name) {
420 pr_warning("failed to find sym for prog %s\n",
421 prog->section_name);
422 return -EINVAL;
423 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700424
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700425 prog->name = strdup(name);
426 if (!prog->name) {
427 pr_warning("failed to allocate memory for prog sym %s\n",
428 name);
429 return -ENOMEM;
430 }
431 }
432
433 return 0;
434}
435
Wang Nan6c956392015-07-01 02:13:54 +0000436static struct bpf_object *bpf_object__new(const char *path,
437 void *obj_buf,
438 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000439{
440 struct bpf_object *obj;
441
442 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
443 if (!obj) {
444 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000445 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000446 }
447
448 strcpy(obj->path, path);
449 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000450
451 /*
452 * Caller of this function should also calls
453 * bpf_object__elf_finish() after data collection to return
454 * obj_buf to user. If not, we should duplicate the buffer to
455 * avoid user freeing them before elf finish.
456 */
457 obj->efile.obj_buf = obj_buf;
458 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000459 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000460
Wang Nan52d33522015-07-01 02:14:04 +0000461 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000462
463 INIT_LIST_HEAD(&obj->list);
464 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000465 return obj;
466}
467
468static void bpf_object__elf_finish(struct bpf_object *obj)
469{
470 if (!obj_elf_valid(obj))
471 return;
472
473 if (obj->efile.elf) {
474 elf_end(obj->efile.elf);
475 obj->efile.elf = NULL;
476 }
Wang Nanbec7d682015-07-01 02:13:59 +0000477 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000478
479 zfree(&obj->efile.reloc);
480 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000481 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000482 obj->efile.obj_buf = NULL;
483 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000484}
485
486static int bpf_object__elf_init(struct bpf_object *obj)
487{
488 int err = 0;
489 GElf_Ehdr *ep;
490
491 if (obj_elf_valid(obj)) {
492 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000493 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000494 }
495
Wang Nan6c956392015-07-01 02:13:54 +0000496 if (obj->efile.obj_buf_sz > 0) {
497 /*
498 * obj_buf should have been validated by
499 * bpf_object__open_buffer().
500 */
501 obj->efile.elf = elf_memory(obj->efile.obj_buf,
502 obj->efile.obj_buf_sz);
503 } else {
504 obj->efile.fd = open(obj->path, O_RDONLY);
505 if (obj->efile.fd < 0) {
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200506 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700507 char *cp = libbpf_strerror_r(errno, errmsg,
508 sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200509
510 pr_warning("failed to open %s: %s\n", obj->path, cp);
Wang Nan6c956392015-07-01 02:13:54 +0000511 return -errno;
512 }
513
514 obj->efile.elf = elf_begin(obj->efile.fd,
515 LIBBPF_ELF_C_READ_MMAP,
516 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000517 }
518
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000519 if (!obj->efile.elf) {
520 pr_warning("failed to open %s as ELF file\n",
521 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000522 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000523 goto errout;
524 }
525
526 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
527 pr_warning("failed to get EHDR from %s\n",
528 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000529 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000530 goto errout;
531 }
532 ep = &obj->efile.ehdr;
533
Wang Nan9b161372016-07-18 06:01:08 +0000534 /* Old LLVM set e_machine to EM_NONE */
535 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000536 pr_warning("%s is not an eBPF object file\n",
537 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000538 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000539 goto errout;
540 }
541
542 return 0;
543errout:
544 bpf_object__elf_finish(obj);
545 return err;
546}
547
Wang Nancc4228d2015-07-01 02:13:55 +0000548static int
549bpf_object__check_endianness(struct bpf_object *obj)
550{
551 static unsigned int const endian = 1;
552
553 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
554 case ELFDATA2LSB:
555 /* We are big endian, BPF obj is little endian. */
556 if (*(unsigned char const *)&endian != 1)
557 goto mismatch;
558 break;
559
560 case ELFDATA2MSB:
561 /* We are little endian, BPF obj is big endian. */
562 if (*(unsigned char const *)&endian != 0)
563 goto mismatch;
564 break;
565 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000566 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000567 }
568
569 return 0;
570
571mismatch:
572 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000573 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000574}
575
Wang Nancb1e5e92015-07-01 02:13:57 +0000576static int
577bpf_object__init_license(struct bpf_object *obj,
578 void *data, size_t size)
579{
580 memcpy(obj->license, data,
581 min(size, sizeof(obj->license) - 1));
582 pr_debug("license of %s is %s\n", obj->path, obj->license);
583 return 0;
584}
585
586static int
587bpf_object__init_kversion(struct bpf_object *obj,
588 void *data, size_t size)
589{
Yonghong Song438363c2018-10-09 16:14:47 -0700590 __u32 kver;
Wang Nancb1e5e92015-07-01 02:13:57 +0000591
592 if (size != sizeof(kver)) {
593 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000594 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000595 }
596 memcpy(&kver, data, sizeof(kver));
597 obj->kern_version = kver;
598 pr_debug("kernel version of %s is %x\n", obj->path,
599 obj->kern_version);
600 return 0;
601}
602
Eric Leblond4708bbd2016-11-15 04:05:47 +0000603static int compare_bpf_map(const void *_a, const void *_b)
604{
605 const struct bpf_map *a = _a;
606 const struct bpf_map *b = _b;
607
608 return a->offset - b->offset;
609}
610
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800611static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
612{
613 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
614 type == BPF_MAP_TYPE_HASH_OF_MAPS)
615 return true;
616 return false;
617}
618
Eric Leblond4708bbd2016-11-15 04:05:47 +0000619static int
John Fastabendc034a172018-10-15 11:19:55 -0700620bpf_object__init_maps(struct bpf_object *obj, int flags)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000621{
John Fastabendc034a172018-10-15 11:19:55 -0700622 bool strict = !(flags & MAPS_RELAX_COMPAT);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400623 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000624 Elf_Scn *scn;
625 Elf_Data *data;
626 Elf_Data *symbols = obj->efile.symbols;
627
628 if (obj->efile.maps_shndx < 0)
629 return -EINVAL;
630 if (!symbols)
631 return -EINVAL;
632
633 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
634 if (scn)
635 data = elf_getdata(scn, NULL);
636 if (!scn || !data) {
637 pr_warning("failed to get Elf_Data from map section %d\n",
638 obj->efile.maps_shndx);
639 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000640 }
641
Eric Leblond4708bbd2016-11-15 04:05:47 +0000642 /*
643 * Count number of maps. Each map has a name.
644 * Array of maps is not supported: only the first element is
645 * considered.
646 *
647 * TODO: Detect array of map and report error.
648 */
649 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
650 GElf_Sym sym;
651
652 if (!gelf_getsym(symbols, i, &sym))
653 continue;
654 if (sym.st_shndx != obj->efile.maps_shndx)
655 continue;
656 nr_maps++;
657 }
658
659 /* Alloc obj->maps and fill nr_maps. */
660 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
661 nr_maps, data->d_size);
662
663 if (!nr_maps)
664 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000665
Craig Gallekb13c5c12017-10-05 10:41:57 -0400666 /* Assume equally sized map definitions */
667 map_def_sz = data->d_size / nr_maps;
668 if (!data->d_size || (data->d_size % nr_maps) != 0) {
669 pr_warning("unable to determine map definition size "
670 "section %s, %d maps in %zd bytes\n",
671 obj->path, nr_maps, data->d_size);
672 return -EINVAL;
673 }
674
Wang Nan9d759a92015-11-27 08:47:35 +0000675 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
676 if (!obj->maps) {
677 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000678 return -ENOMEM;
679 }
Wang Nan9d759a92015-11-27 08:47:35 +0000680 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000681
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800682 for (i = 0; i < nr_maps; i++) {
683 /*
684 * fill all fd with -1 so won't close incorrect
685 * fd (fd=0 is stdin) when failure (zclose won't close
686 * negative fd)).
687 */
Wang Nan9d759a92015-11-27 08:47:35 +0000688 obj->maps[i].fd = -1;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800689 obj->maps[i].inner_map_fd = -1;
690 }
Wang Nan9d759a92015-11-27 08:47:35 +0000691
Eric Leblond4708bbd2016-11-15 04:05:47 +0000692 /*
693 * Fill obj->maps using data in "maps" section.
694 */
695 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000696 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000697 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000698 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000699
700 if (!gelf_getsym(symbols, i, &sym))
701 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000702 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000703 continue;
704
705 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000706 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000707 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000708 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400709 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000710 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
711 obj->path, map_name);
712 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000713 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000714
Wang Nan561bbcc2015-11-27 08:47:36 +0000715 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000716 if (!obj->maps[map_idx].name) {
717 pr_warning("failed to alloc map name\n");
718 return -ENOMEM;
719 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000720 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000721 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000722 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400723 /*
724 * If the definition of the map in the object file fits in
725 * bpf_map_def, copy it. Any extra fields in our version
726 * of bpf_map_def will default to zero as a result of the
727 * calloc above.
728 */
729 if (map_def_sz <= sizeof(struct bpf_map_def)) {
730 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
731 } else {
732 /*
733 * Here the map structure being read is bigger than what
734 * we expect, truncate if the excess bits are all zero.
735 * If they are not zero, reject this map as
736 * incompatible.
737 */
738 char *b;
739 for (b = ((char *)def) + sizeof(struct bpf_map_def);
740 b < ((char *)def) + map_def_sz; b++) {
741 if (*b != 0) {
742 pr_warning("maps section in %s: \"%s\" "
743 "has unrecognized, non-zero "
744 "options\n",
745 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -0700746 if (strict)
747 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400748 }
749 }
750 memcpy(&obj->maps[map_idx].def, def,
751 sizeof(struct bpf_map_def));
752 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000753 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000754 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000755
756 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400757 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000758}
759
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100760static bool section_have_execinstr(struct bpf_object *obj, int idx)
761{
762 Elf_Scn *scn;
763 GElf_Shdr sh;
764
765 scn = elf_getscn(obj->efile.elf, idx);
766 if (!scn)
767 return false;
768
769 if (gelf_getshdr(scn, &sh) != &sh)
770 return false;
771
772 if (sh.sh_flags & SHF_EXECINSTR)
773 return true;
774
775 return false;
776}
777
John Fastabendc034a172018-10-15 11:19:55 -0700778static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
Wang Nan29603662015-07-01 02:13:56 +0000779{
780 Elf *elf = obj->efile.elf;
781 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800782 Elf_Data *btf_ext_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +0000783 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000784 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000785
786 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
787 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
788 pr_warning("failed to get e_shstrndx from %s\n",
789 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000790 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000791 }
792
793 while ((scn = elf_nextscn(elf, scn)) != NULL) {
794 char *name;
795 GElf_Shdr sh;
796 Elf_Data *data;
797
798 idx++;
799 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100800 pr_warning("failed to get section(%d) header from %s\n",
801 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000802 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000803 goto out;
804 }
805
806 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
807 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100808 pr_warning("failed to get section(%d) name from %s\n",
809 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000810 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000811 goto out;
812 }
813
814 data = elf_getdata(scn, 0);
815 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100816 pr_warning("failed to get section(%d) data from %s(%s)\n",
817 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000818 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000819 goto out;
820 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100821 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
822 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000823 (int)sh.sh_link, (unsigned long)sh.sh_flags,
824 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000825
826 if (strcmp(name, "license") == 0)
827 err = bpf_object__init_license(obj,
828 data->d_buf,
829 data->d_size);
830 else if (strcmp(name, "version") == 0)
831 err = bpf_object__init_kversion(obj,
832 data->d_buf,
833 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000834 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000835 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700836 else if (strcmp(name, BTF_ELF_SEC) == 0) {
837 obj->btf = btf__new(data->d_buf, data->d_size,
838 __pr_debug);
839 if (IS_ERR(obj->btf)) {
840 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
841 BTF_ELF_SEC, PTR_ERR(obj->btf));
842 obj->btf = NULL;
843 }
Yonghong Song2993e052018-11-19 15:29:16 -0800844 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800845 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700846 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000847 if (obj->efile.symbols) {
848 pr_warning("bpf: multiple SYMTAB in %s\n",
849 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000850 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000851 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000852 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000853 obj->efile.strtabidx = sh.sh_link;
854 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000855 } else if ((sh.sh_type == SHT_PROGBITS) &&
856 (sh.sh_flags & SHF_EXECINSTR) &&
857 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800858 if (strcmp(name, ".text") == 0)
859 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000860 err = bpf_object__add_program(obj, data->d_buf,
861 data->d_size, name, idx);
862 if (err) {
Wang Nan6371ca3b2015-11-06 13:49:37 +0000863 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700864 char *cp = libbpf_strerror_r(-err, errmsg,
865 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000866
Wang Nana5b8bd42015-07-01 02:14:00 +0000867 pr_warning("failed to alloc program %s (%s): %s",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200868 name, obj->path, cp);
Wang Nana5b8bd42015-07-01 02:14:00 +0000869 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000870 } else if (sh.sh_type == SHT_REL) {
871 void *reloc = obj->efile.reloc;
872 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100873 int sec = sh.sh_info; /* points to other section */
874
875 /* Only do relo for section with exec instructions */
876 if (!section_have_execinstr(obj, sec)) {
877 pr_debug("skip relo %s(%d) for section(%d)\n",
878 name, idx, sec);
879 continue;
880 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000881
Jakub Kicinski531b0142018-07-10 14:43:05 -0700882 reloc = reallocarray(reloc, nr_reloc,
883 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +0000884 if (!reloc) {
885 pr_warning("realloc failed\n");
886 err = -ENOMEM;
887 } else {
888 int n = nr_reloc - 1;
889
890 obj->efile.reloc = reloc;
891 obj->efile.nr_reloc = nr_reloc;
892
893 obj->efile.reloc[n].shdr = sh;
894 obj->efile.reloc[n].data = data;
895 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100896 } else {
897 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000898 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000899 if (err)
900 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000901 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000902
Wang Nan77ba9a52015-12-08 02:25:30 +0000903 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
904 pr_warning("Corrupted ELF file: index of strtab invalid\n");
905 return LIBBPF_ERRNO__FORMAT;
906 }
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800907 if (btf_ext_data) {
908 if (!obj->btf) {
909 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
910 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
911 } else {
912 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
913 btf_ext_data->d_size,
914 __pr_debug);
915 if (IS_ERR(obj->btf_ext)) {
916 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
917 BTF_EXT_ELF_SEC,
918 PTR_ERR(obj->btf_ext));
919 obj->btf_ext = NULL;
920 }
921 }
922 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700923 if (obj->efile.maps_shndx >= 0) {
John Fastabendc034a172018-10-15 11:19:55 -0700924 err = bpf_object__init_maps(obj, flags);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700925 if (err)
926 goto out;
927 }
928 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000929out:
930 return err;
931}
932
Wang Nan34090912015-07-01 02:14:02 +0000933static struct bpf_program *
934bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
935{
936 struct bpf_program *prog;
937 size_t i;
938
939 for (i = 0; i < obj->nr_programs; i++) {
940 prog = &obj->programs[i];
941 if (prog->idx == idx)
942 return prog;
943 }
944 return NULL;
945}
946
Jakub Kicinski6d4b1982018-07-26 14:32:19 -0700947struct bpf_program *
948bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
949{
950 struct bpf_program *pos;
951
952 bpf_object__for_each_program(pos, obj) {
953 if (pos->section_name && !strcmp(pos->section_name, title))
954 return pos;
955 }
956 return NULL;
957}
958
Wang Nan34090912015-07-01 02:14:02 +0000959static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800960bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
961 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000962{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800963 Elf_Data *symbols = obj->efile.symbols;
964 int text_shndx = obj->efile.text_shndx;
965 int maps_shndx = obj->efile.maps_shndx;
966 struct bpf_map *maps = obj->maps;
967 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000968 int i, nrels;
969
970 pr_debug("collecting relocating info for: '%s'\n",
971 prog->section_name);
972 nrels = shdr->sh_size / shdr->sh_entsize;
973
974 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
975 if (!prog->reloc_desc) {
976 pr_warning("failed to alloc memory in relocation\n");
977 return -ENOMEM;
978 }
979 prog->nr_reloc = nrels;
980
981 for (i = 0; i < nrels; i++) {
982 GElf_Sym sym;
983 GElf_Rel rel;
984 unsigned int insn_idx;
985 struct bpf_insn *insns = prog->insns;
986 size_t map_idx;
987
988 if (!gelf_getrel(data, i, &rel)) {
989 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000990 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000991 }
992
Wang Nan34090912015-07-01 02:14:02 +0000993 if (!gelf_getsym(symbols,
994 GELF_R_SYM(rel.r_info),
995 &sym)) {
996 pr_warning("relocation: symbol %"PRIx64" not found\n",
997 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000998 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000999 }
David Miller7d9890e2017-12-19 15:53:11 -05001000 pr_debug("relo for %lld value %lld name %d\n",
1001 (long long) (rel.r_info >> 32),
1002 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +00001003
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001004 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +00001005 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
1006 prog->section_name, sym.st_shndx);
1007 return -LIBBPF_ERRNO__RELOC;
1008 }
1009
1010 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1011 pr_debug("relocation: insn_idx=%u\n", insn_idx);
1012
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001013 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1014 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1015 pr_warning("incorrect bpf_call opcode\n");
1016 return -LIBBPF_ERRNO__RELOC;
1017 }
1018 prog->reloc_desc[i].type = RELO_CALL;
1019 prog->reloc_desc[i].insn_idx = insn_idx;
1020 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001021 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001022 continue;
1023 }
1024
Wang Nan34090912015-07-01 02:14:02 +00001025 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1026 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1027 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001028 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001029 }
1030
Joe Stringer94e5ade2017-01-22 17:11:22 -08001031 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1032 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1033 if (maps[map_idx].offset == sym.st_value) {
1034 pr_debug("relocation: find map %zd (%s) for insn %u\n",
1035 map_idx, maps[map_idx].name, insn_idx);
1036 break;
1037 }
1038 }
1039
Wang Nan34090912015-07-01 02:14:02 +00001040 if (map_idx >= nr_maps) {
1041 pr_warning("bpf relocation: map_idx %d large than %d\n",
1042 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001043 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001044 }
1045
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001046 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +00001047 prog->reloc_desc[i].insn_idx = insn_idx;
1048 prog->reloc_desc[i].map_idx = map_idx;
1049 }
1050 return 0;
1051}
1052
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001053static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1054{
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001055 const struct btf_type *container_type;
1056 const struct btf_member *key, *value;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001057 struct bpf_map_def *def = &map->def;
1058 const size_t max_name = 256;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001059 char container_name[max_name];
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07001060 __s64 key_size, value_size;
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001061 __s32 container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001062
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001063 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1064 max_name) {
1065 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001066 map->name, map->name);
1067 return -EINVAL;
1068 }
1069
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001070 container_id = btf__find_by_name(btf, container_name);
1071 if (container_id < 0) {
1072 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1073 map->name, container_name);
1074 return container_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001075 }
1076
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001077 container_type = btf__type_by_id(btf, container_id);
1078 if (!container_type) {
1079 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1080 map->name, container_id);
1081 return -EINVAL;
1082 }
1083
1084 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1085 BTF_INFO_VLEN(container_type->info) < 2) {
1086 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1087 map->name, container_name);
1088 return -EINVAL;
1089 }
1090
1091 key = (struct btf_member *)(container_type + 1);
1092 value = key + 1;
1093
1094 key_size = btf__resolve_size(btf, key->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001095 if (key_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001096 pr_warning("map:%s invalid BTF key_type_size\n",
1097 map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001098 return key_size;
1099 }
1100
1101 if (def->key_size != key_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001102 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1103 map->name, (__u32)key_size, def->key_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001104 return -EINVAL;
1105 }
1106
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001107 value_size = btf__resolve_size(btf, value->type);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001108 if (value_size < 0) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001109 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001110 return value_size;
1111 }
1112
1113 if (def->value_size != value_size) {
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001114 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1115 map->name, (__u32)value_size, def->value_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001116 return -EINVAL;
1117 }
1118
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -07001119 map->btf_key_type_id = key->type;
1120 map->btf_value_type_id = value->type;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001121
1122 return 0;
1123}
1124
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001125int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1126{
1127 struct bpf_map_info info = {};
1128 __u32 len = sizeof(info);
1129 int new_fd, err;
1130 char *new_name;
1131
1132 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1133 if (err)
1134 return err;
1135
1136 new_name = strdup(info.name);
1137 if (!new_name)
1138 return -errno;
1139
1140 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1141 if (new_fd < 0)
1142 goto err_free_new_name;
1143
1144 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1145 if (new_fd < 0)
1146 goto err_close_new_fd;
1147
1148 err = zclose(map->fd);
1149 if (err)
1150 goto err_close_new_fd;
1151 free(map->name);
1152
1153 map->fd = new_fd;
1154 map->name = new_name;
1155 map->def.type = info.type;
1156 map->def.key_size = info.key_size;
1157 map->def.value_size = info.value_size;
1158 map->def.max_entries = info.max_entries;
1159 map->def.map_flags = info.map_flags;
1160 map->btf_key_type_id = info.btf_key_type_id;
1161 map->btf_value_type_id = info.btf_value_type_id;
1162
1163 return 0;
1164
1165err_close_new_fd:
1166 close(new_fd);
1167err_free_new_name:
1168 free(new_name);
1169 return -errno;
1170}
1171
Wang Nan52d33522015-07-01 02:14:04 +00001172static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08001173bpf_object__probe_name(struct bpf_object *obj)
1174{
1175 struct bpf_load_program_attr attr;
1176 char *cp, errmsg[STRERR_BUFSIZE];
1177 struct bpf_insn insns[] = {
1178 BPF_MOV64_IMM(BPF_REG_0, 0),
1179 BPF_EXIT_INSN(),
1180 };
1181 int ret;
1182
1183 /* make sure basic loading works */
1184
1185 memset(&attr, 0, sizeof(attr));
1186 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1187 attr.insns = insns;
1188 attr.insns_cnt = ARRAY_SIZE(insns);
1189 attr.license = "GPL";
1190
1191 ret = bpf_load_program_xattr(&attr, NULL, 0);
1192 if (ret < 0) {
1193 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1194 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1195 __func__, cp, errno);
1196 return -errno;
1197 }
1198 close(ret);
1199
1200 /* now try the same program, but with the name */
1201
1202 attr.name = "test";
1203 ret = bpf_load_program_xattr(&attr, NULL, 0);
1204 if (ret >= 0) {
1205 obj->caps.name = 1;
1206 close(ret);
1207 }
1208
1209 return 0;
1210}
1211
1212static int
1213bpf_object__probe_caps(struct bpf_object *obj)
1214{
1215 return bpf_object__probe_name(obj);
1216}
1217
1218static int
Wang Nan52d33522015-07-01 02:14:04 +00001219bpf_object__create_maps(struct bpf_object *obj)
1220{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001221 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001222 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001223 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001224
Wang Nan9d759a92015-11-27 08:47:35 +00001225 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001226 struct bpf_map *map = &obj->maps[i];
1227 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001228 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001229 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001230
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001231 if (map->fd >= 0) {
1232 pr_debug("skip map create (preset) %s: fd=%d\n",
1233 map->name, map->fd);
1234 continue;
1235 }
1236
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08001237 if (obj->caps.name)
1238 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001239 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001240 create_attr.map_type = def->type;
1241 create_attr.map_flags = def->map_flags;
1242 create_attr.key_size = def->key_size;
1243 create_attr.value_size = def->value_size;
1244 create_attr.max_entries = def->max_entries;
1245 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001246 create_attr.btf_key_type_id = 0;
1247 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08001248 if (bpf_map_type__is_map_in_map(def->type) &&
1249 map->inner_map_fd >= 0)
1250 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001251
1252 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1253 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001254 create_attr.btf_key_type_id = map->btf_key_type_id;
1255 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001256 }
1257
1258 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001259 if (*pfd < 0 && create_attr.btf_key_type_id) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001260 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001261 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001262 map->name, cp, errno);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001263 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001264 create_attr.btf_key_type_id = 0;
1265 create_attr.btf_value_type_id = 0;
1266 map->btf_key_type_id = 0;
1267 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001268 *pfd = bpf_create_map_xattr(&create_attr);
1269 }
1270
Wang Nan52d33522015-07-01 02:14:04 +00001271 if (*pfd < 0) {
1272 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001273
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001274 err = *pfd;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001275 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Eric Leblond49bf4b32017-08-20 21:48:14 +02001276 pr_warning("failed to create map (name: '%s'): %s\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001277 map->name, cp);
Wang Nan52d33522015-07-01 02:14:04 +00001278 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001279 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001280 return err;
1281 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001282 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001283 }
1284
Wang Nan52d33522015-07-01 02:14:04 +00001285 return 0;
1286}
1287
Wang Nan8a47a6c2015-07-01 02:14:05 +00001288static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001289check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1290 void *btf_prog_info, const char *info_name)
1291{
1292 if (err != -ENOENT) {
1293 pr_warning("Error in loading %s for sec %s.\n",
1294 info_name, prog->section_name);
1295 return err;
1296 }
1297
1298 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1299
1300 if (btf_prog_info) {
1301 /*
1302 * Some info has already been found but has problem
1303 * in the last btf_ext reloc. Must have to error
1304 * out.
1305 */
1306 pr_warning("Error in relocating %s for sec %s.\n",
1307 info_name, prog->section_name);
1308 return err;
1309 }
1310
1311 /*
1312 * Have problem loading the very first info. Ignore
1313 * the rest.
1314 */
1315 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1316 info_name, prog->section_name, info_name);
1317 return 0;
1318}
1319
1320static int
1321bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1322 const char *section_name, __u32 insn_offset)
1323{
1324 int err;
1325
1326 if (!insn_offset || prog->func_info) {
1327 /*
1328 * !insn_offset => main program
1329 *
1330 * For sub prog, the main program's func_info has to
1331 * be loaded first (i.e. prog->func_info != NULL)
1332 */
1333 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1334 section_name, insn_offset,
1335 &prog->func_info,
1336 &prog->func_info_cnt);
1337 if (err)
1338 return check_btf_ext_reloc_err(prog, err,
1339 prog->func_info,
1340 "bpf_func_info");
1341
1342 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1343 }
1344
1345 if (!insn_offset)
1346 prog->btf_fd = btf__fd(obj->btf);
1347
1348 return 0;
1349}
1350
1351static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001352bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1353 struct reloc_desc *relo)
1354{
1355 struct bpf_insn *insn, *new_insn;
1356 struct bpf_program *text;
1357 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08001358 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001359
1360 if (relo->type != RELO_CALL)
1361 return -LIBBPF_ERRNO__RELOC;
1362
1363 if (prog->idx == obj->efile.text_shndx) {
1364 pr_warning("relo in .text insn %d into off %d\n",
1365 relo->insn_idx, relo->text_off);
1366 return -LIBBPF_ERRNO__RELOC;
1367 }
1368
1369 if (prog->main_prog_cnt == 0) {
1370 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1371 if (!text) {
1372 pr_warning("no .text section found yet relo into text exist\n");
1373 return -LIBBPF_ERRNO__RELOC;
1374 }
1375 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001376 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001377 if (!new_insn) {
1378 pr_warning("oom in prog realloc\n");
1379 return -ENOMEM;
1380 }
Yonghong Song2993e052018-11-19 15:29:16 -08001381
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001382 if (obj->btf_ext) {
1383 err = bpf_program_reloc_btf_ext(prog, obj,
1384 text->section_name,
1385 prog->insns_cnt);
1386 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08001387 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08001388 }
1389
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001390 memcpy(new_insn + prog->insns_cnt, text->insns,
1391 text->insns_cnt * sizeof(*insn));
1392 prog->insns = new_insn;
1393 prog->main_prog_cnt = prog->insns_cnt;
1394 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001395 pr_debug("added %zd insn from %s to prog %s\n",
1396 text->insns_cnt, text->section_name,
1397 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001398 }
1399 insn = &prog->insns[relo->insn_idx];
1400 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001401 return 0;
1402}
1403
1404static int
Wang Nan9d759a92015-11-27 08:47:35 +00001405bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001406{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001407 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001408
Yonghong Song2993e052018-11-19 15:29:16 -08001409 if (!prog)
1410 return 0;
1411
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001412 if (obj->btf_ext) {
1413 err = bpf_program_reloc_btf_ext(prog, obj,
1414 prog->section_name, 0);
1415 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08001416 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08001417 }
1418
1419 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001420 return 0;
1421
1422 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001423 if (prog->reloc_desc[i].type == RELO_LD64) {
1424 struct bpf_insn *insns = prog->insns;
1425 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001426
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001427 insn_idx = prog->reloc_desc[i].insn_idx;
1428 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001429
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001430 if (insn_idx >= (int)prog->insns_cnt) {
1431 pr_warning("relocation out of range: '%s'\n",
1432 prog->section_name);
1433 return -LIBBPF_ERRNO__RELOC;
1434 }
1435 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1436 insns[insn_idx].imm = obj->maps[map_idx].fd;
1437 } else {
1438 err = bpf_program__reloc_text(prog, obj,
1439 &prog->reloc_desc[i]);
1440 if (err)
1441 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001442 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001443 }
1444
1445 zfree(&prog->reloc_desc);
1446 prog->nr_reloc = 0;
1447 return 0;
1448}
1449
1450
1451static int
1452bpf_object__relocate(struct bpf_object *obj)
1453{
1454 struct bpf_program *prog;
1455 size_t i;
1456 int err;
1457
1458 for (i = 0; i < obj->nr_programs; i++) {
1459 prog = &obj->programs[i];
1460
Wang Nan9d759a92015-11-27 08:47:35 +00001461 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001462 if (err) {
1463 pr_warning("failed to relocate '%s'\n",
1464 prog->section_name);
1465 return err;
1466 }
1467 }
1468 return 0;
1469}
1470
Wang Nan34090912015-07-01 02:14:02 +00001471static int bpf_object__collect_reloc(struct bpf_object *obj)
1472{
1473 int i, err;
1474
1475 if (!obj_elf_valid(obj)) {
1476 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001477 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001478 }
1479
1480 for (i = 0; i < obj->efile.nr_reloc; i++) {
1481 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1482 Elf_Data *data = obj->efile.reloc[i].data;
1483 int idx = shdr->sh_info;
1484 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001485
1486 if (shdr->sh_type != SHT_REL) {
1487 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001488 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001489 }
1490
1491 prog = bpf_object__find_prog_by_idx(obj, idx);
1492 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001493 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001494 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001495 }
1496
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001497 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001498 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001499 obj);
Wang Nan34090912015-07-01 02:14:02 +00001500 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001501 return err;
Wang Nan34090912015-07-01 02:14:02 +00001502 }
1503 return 0;
1504}
1505
Wang Nan55cffde2015-07-01 02:14:07 +00001506static int
Yonghong Song2993e052018-11-19 15:29:16 -08001507load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001508 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00001509{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001510 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001511 char *cp, errmsg[STRERR_BUFSIZE];
Wang Nan55cffde2015-07-01 02:14:07 +00001512 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001513 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001514
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001515 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08001516 load_attr.prog_type = prog->type;
1517 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08001518 if (prog->caps->name)
1519 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001520 load_attr.insns = insns;
1521 load_attr.insns_cnt = insns_cnt;
1522 load_attr.license = license;
1523 load_attr.kern_version = kern_version;
Yonghong Song2993e052018-11-19 15:29:16 -08001524 load_attr.prog_ifindex = prog->prog_ifindex;
Yonghong Song462c1242018-11-21 11:22:42 -08001525 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08001526 load_attr.func_info = prog->func_info;
1527 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001528 load_attr.func_info_cnt = prog->func_info_cnt;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001529 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001530 return -EINVAL;
1531
1532 log_buf = malloc(BPF_LOG_BUF_SIZE);
1533 if (!log_buf)
1534 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1535
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001536 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001537
1538 if (ret >= 0) {
1539 *pfd = ret;
1540 ret = 0;
1541 goto out;
1542 }
1543
Wang Nan6371ca3b2015-11-06 13:49:37 +00001544 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001545 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001546 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00001547
Wang Nan6371ca3b2015-11-06 13:49:37 +00001548 if (log_buf && log_buf[0] != '\0') {
1549 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001550 pr_warning("-- BEGIN DUMP LOG ---\n");
1551 pr_warning("\n%s\n", log_buf);
1552 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001553 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1554 pr_warning("Program too large (%zu insns), at most %d insns\n",
1555 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001556 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001557 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001558 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001559 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001560 int fd;
1561
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001562 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1563 load_attr.expected_attach_type = 0;
1564 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001565 if (fd >= 0) {
1566 close(fd);
1567 ret = -LIBBPF_ERRNO__PROGTYPE;
1568 goto out;
1569 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001570 }
Wang Nan705fa212016-07-13 10:44:02 +00001571
1572 if (log_buf)
1573 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001574 }
1575
1576out:
1577 free(log_buf);
1578 return ret;
1579}
1580
Joe Stringer29cd77f2018-10-02 13:35:39 -07001581int
Wang Nan55cffde2015-07-01 02:14:07 +00001582bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07001583 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00001584{
Wang Nanb5805632015-11-16 12:10:09 +00001585 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001586
Wang Nanb5805632015-11-16 12:10:09 +00001587 if (prog->instances.nr < 0 || !prog->instances.fds) {
1588 if (prog->preprocessor) {
1589 pr_warning("Internal error: can't load program '%s'\n",
1590 prog->section_name);
1591 return -LIBBPF_ERRNO__INTERNAL;
1592 }
Wang Nan55cffde2015-07-01 02:14:07 +00001593
Wang Nanb5805632015-11-16 12:10:09 +00001594 prog->instances.fds = malloc(sizeof(int));
1595 if (!prog->instances.fds) {
1596 pr_warning("Not enough memory for BPF fds\n");
1597 return -ENOMEM;
1598 }
1599 prog->instances.nr = 1;
1600 prog->instances.fds[0] = -1;
1601 }
1602
1603 if (!prog->preprocessor) {
1604 if (prog->instances.nr != 1) {
1605 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1606 prog->section_name, prog->instances.nr);
1607 }
Yonghong Song2993e052018-11-19 15:29:16 -08001608 err = load_program(prog, prog->insns, prog->insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001609 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001610 if (!err)
1611 prog->instances.fds[0] = fd;
1612 goto out;
1613 }
1614
1615 for (i = 0; i < prog->instances.nr; i++) {
1616 struct bpf_prog_prep_result result;
1617 bpf_program_prep_t preprocessor = prog->preprocessor;
1618
1619 bzero(&result, sizeof(result));
1620 err = preprocessor(prog, i, prog->insns,
1621 prog->insns_cnt, &result);
1622 if (err) {
1623 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1624 i, prog->section_name);
1625 goto out;
1626 }
1627
1628 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1629 pr_debug("Skip loading the %dth instance of program '%s'\n",
1630 i, prog->section_name);
1631 prog->instances.fds[i] = -1;
1632 if (result.pfd)
1633 *result.pfd = -1;
1634 continue;
1635 }
1636
Yonghong Song2993e052018-11-19 15:29:16 -08001637 err = load_program(prog, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001638 result.new_insn_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001639 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001640
1641 if (err) {
1642 pr_warning("Loading the %dth instance of program '%s' failed\n",
1643 i, prog->section_name);
1644 goto out;
1645 }
1646
1647 if (result.pfd)
1648 *result.pfd = fd;
1649 prog->instances.fds[i] = fd;
1650 }
1651out:
Wang Nan55cffde2015-07-01 02:14:07 +00001652 if (err)
1653 pr_warning("failed to load program '%s'\n",
1654 prog->section_name);
1655 zfree(&prog->insns);
1656 prog->insns_cnt = 0;
1657 return err;
1658}
1659
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001660static bool bpf_program__is_function_storage(struct bpf_program *prog,
1661 struct bpf_object *obj)
1662{
1663 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1664}
1665
Wang Nan55cffde2015-07-01 02:14:07 +00001666static int
1667bpf_object__load_progs(struct bpf_object *obj)
1668{
1669 size_t i;
1670 int err;
1671
1672 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001673 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001674 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001675 err = bpf_program__load(&obj->programs[i],
1676 obj->license,
1677 obj->kern_version);
1678 if (err)
1679 return err;
1680 }
1681 return 0;
1682}
1683
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001684static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001685{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001686 switch (type) {
1687 case BPF_PROG_TYPE_SOCKET_FILTER:
1688 case BPF_PROG_TYPE_SCHED_CLS:
1689 case BPF_PROG_TYPE_SCHED_ACT:
1690 case BPF_PROG_TYPE_XDP:
1691 case BPF_PROG_TYPE_CGROUP_SKB:
1692 case BPF_PROG_TYPE_CGROUP_SOCK:
1693 case BPF_PROG_TYPE_LWT_IN:
1694 case BPF_PROG_TYPE_LWT_OUT:
1695 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001696 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001697 case BPF_PROG_TYPE_SOCK_OPS:
1698 case BPF_PROG_TYPE_SK_SKB:
1699 case BPF_PROG_TYPE_CGROUP_DEVICE:
1700 case BPF_PROG_TYPE_SK_MSG:
1701 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001702 case BPF_PROG_TYPE_LIRC_MODE2:
Martin KaFai Lau6bc8529c2018-08-08 01:01:30 -07001703 case BPF_PROG_TYPE_SK_REUSEPORT:
Petar Penkovc22fbae2018-09-14 07:46:20 -07001704 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001705 case BPF_PROG_TYPE_UNSPEC:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001706 case BPF_PROG_TYPE_TRACEPOINT:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001707 case BPF_PROG_TYPE_RAW_TRACEPOINT:
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -08001708 case BPF_PROG_TYPE_PERF_EVENT:
1709 return false;
1710 case BPF_PROG_TYPE_KPROBE:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001711 default:
1712 return true;
1713 }
1714}
1715
1716static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1717{
1718 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001719 pr_warning("%s doesn't provide kernel version\n",
1720 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001721 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001722 }
1723 return 0;
1724}
1725
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001726static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001727__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
John Fastabendc034a172018-10-15 11:19:55 -07001728 bool needs_kver, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001729{
1730 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001731 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001732
1733 if (elf_version(EV_CURRENT) == EV_NONE) {
1734 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001735 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001736 }
1737
Wang Nan6c956392015-07-01 02:13:54 +00001738 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001739 if (IS_ERR(obj))
1740 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001741
Wang Nan6371ca3b2015-11-06 13:49:37 +00001742 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1743 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
John Fastabendc034a172018-10-15 11:19:55 -07001744 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001745 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001746 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001747
1748 bpf_object__elf_finish(obj);
1749 return obj;
1750out:
1751 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001752 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001753}
1754
John Fastabendc034a172018-10-15 11:19:55 -07001755struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1756 int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001757{
1758 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001759 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001760 return NULL;
1761
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001762 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001763
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001764 return __bpf_object__open(attr->file, NULL, 0,
John Fastabendc034a172018-10-15 11:19:55 -07001765 bpf_prog_type__needs_kver(attr->prog_type),
1766 flags);
1767}
1768
1769struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1770{
1771 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001772}
1773
1774struct bpf_object *bpf_object__open(const char *path)
1775{
1776 struct bpf_object_open_attr attr = {
1777 .file = path,
1778 .prog_type = BPF_PROG_TYPE_UNSPEC,
1779 };
1780
1781 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00001782}
1783
1784struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001785 size_t obj_buf_sz,
1786 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001787{
Wang Nanacf860a2015-08-27 02:30:55 +00001788 char tmp_name[64];
1789
Wang Nan6c956392015-07-01 02:13:54 +00001790 /* param validation */
1791 if (!obj_buf || obj_buf_sz <= 0)
1792 return NULL;
1793
Wang Nanacf860a2015-08-27 02:30:55 +00001794 if (!name) {
1795 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1796 (unsigned long)obj_buf,
1797 (unsigned long)obj_buf_sz);
1798 tmp_name[sizeof(tmp_name) - 1] = '\0';
1799 name = tmp_name;
1800 }
1801 pr_debug("loading object '%s' from buffer\n",
1802 name);
Wang Nan6c956392015-07-01 02:13:54 +00001803
John Fastabendc034a172018-10-15 11:19:55 -07001804 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001805}
1806
Wang Nan52d33522015-07-01 02:14:04 +00001807int bpf_object__unload(struct bpf_object *obj)
1808{
1809 size_t i;
1810
1811 if (!obj)
1812 return -EINVAL;
1813
Wang Nan9d759a92015-11-27 08:47:35 +00001814 for (i = 0; i < obj->nr_maps; i++)
1815 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001816
Wang Nan55cffde2015-07-01 02:14:07 +00001817 for (i = 0; i < obj->nr_programs; i++)
1818 bpf_program__unload(&obj->programs[i]);
1819
Wang Nan52d33522015-07-01 02:14:04 +00001820 return 0;
1821}
1822
1823int bpf_object__load(struct bpf_object *obj)
1824{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001825 int err;
1826
Wang Nan52d33522015-07-01 02:14:04 +00001827 if (!obj)
1828 return -EINVAL;
1829
1830 if (obj->loaded) {
1831 pr_warning("object should not be loaded twice\n");
1832 return -EINVAL;
1833 }
1834
1835 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001836
Stanislav Fomichev47eff612018-11-20 17:11:19 -08001837 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001838 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1839 CHECK_ERR(bpf_object__relocate(obj), err, out);
1840 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001841
1842 return 0;
1843out:
1844 bpf_object__unload(obj);
1845 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001846 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001847}
1848
Joe Stringerf3675402017-01-26 13:19:56 -08001849static int check_path(const char *path)
1850{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001851 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001852 struct statfs st_fs;
1853 char *dname, *dir;
1854 int err = 0;
1855
1856 if (path == NULL)
1857 return -EINVAL;
1858
1859 dname = strdup(path);
1860 if (dname == NULL)
1861 return -ENOMEM;
1862
1863 dir = dirname(dname);
1864 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001865 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001866 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001867 err = -errno;
1868 }
1869 free(dname);
1870
1871 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1872 pr_warning("specified path %s is not on BPF FS\n", path);
1873 err = -EINVAL;
1874 }
1875
1876 return err;
1877}
1878
1879int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1880 int instance)
1881{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001882 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001883 int err;
1884
1885 err = check_path(path);
1886 if (err)
1887 return err;
1888
1889 if (prog == NULL) {
1890 pr_warning("invalid program pointer\n");
1891 return -EINVAL;
1892 }
1893
1894 if (instance < 0 || instance >= prog->instances.nr) {
1895 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1896 instance, prog->section_name, prog->instances.nr);
1897 return -EINVAL;
1898 }
1899
1900 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001901 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001902 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001903 return -errno;
1904 }
1905 pr_debug("pinned program '%s'\n", path);
1906
1907 return 0;
1908}
1909
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001910int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1911 int instance)
1912{
1913 int err;
1914
1915 err = check_path(path);
1916 if (err)
1917 return err;
1918
1919 if (prog == NULL) {
1920 pr_warning("invalid program pointer\n");
1921 return -EINVAL;
1922 }
1923
1924 if (instance < 0 || instance >= prog->instances.nr) {
1925 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1926 instance, prog->section_name, prog->instances.nr);
1927 return -EINVAL;
1928 }
1929
1930 err = unlink(path);
1931 if (err != 0)
1932 return -errno;
1933 pr_debug("unpinned program '%s'\n", path);
1934
1935 return 0;
1936}
1937
Joe Stringerf3675402017-01-26 13:19:56 -08001938static int make_dir(const char *path)
1939{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001940 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001941 int err = 0;
1942
1943 if (mkdir(path, 0700) && errno != EEXIST)
1944 err = -errno;
1945
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001946 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001947 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001948 pr_warning("failed to mkdir %s: %s\n", path, cp);
1949 }
Joe Stringerf3675402017-01-26 13:19:56 -08001950 return err;
1951}
1952
1953int bpf_program__pin(struct bpf_program *prog, const char *path)
1954{
1955 int i, err;
1956
1957 err = check_path(path);
1958 if (err)
1959 return err;
1960
1961 if (prog == NULL) {
1962 pr_warning("invalid program pointer\n");
1963 return -EINVAL;
1964 }
1965
1966 if (prog->instances.nr <= 0) {
1967 pr_warning("no instances of prog %s to pin\n",
1968 prog->section_name);
1969 return -EINVAL;
1970 }
1971
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08001972 if (prog->instances.nr == 1) {
1973 /* don't create subdirs when pinning single instance */
1974 return bpf_program__pin_instance(prog, path, 0);
1975 }
1976
Joe Stringerf3675402017-01-26 13:19:56 -08001977 err = make_dir(path);
1978 if (err)
1979 return err;
1980
1981 for (i = 0; i < prog->instances.nr; i++) {
1982 char buf[PATH_MAX];
1983 int len;
1984
1985 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001986 if (len < 0) {
1987 err = -EINVAL;
1988 goto err_unpin;
1989 } else if (len >= PATH_MAX) {
1990 err = -ENAMETOOLONG;
1991 goto err_unpin;
1992 }
1993
1994 err = bpf_program__pin_instance(prog, buf, i);
1995 if (err)
1996 goto err_unpin;
1997 }
1998
1999 return 0;
2000
2001err_unpin:
2002 for (i = i - 1; i >= 0; i--) {
2003 char buf[PATH_MAX];
2004 int len;
2005
2006 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2007 if (len < 0)
2008 continue;
2009 else if (len >= PATH_MAX)
2010 continue;
2011
2012 bpf_program__unpin_instance(prog, buf, i);
2013 }
2014
2015 rmdir(path);
2016
2017 return err;
2018}
2019
2020int bpf_program__unpin(struct bpf_program *prog, const char *path)
2021{
2022 int i, err;
2023
2024 err = check_path(path);
2025 if (err)
2026 return err;
2027
2028 if (prog == NULL) {
2029 pr_warning("invalid program pointer\n");
2030 return -EINVAL;
2031 }
2032
2033 if (prog->instances.nr <= 0) {
2034 pr_warning("no instances of prog %s to pin\n",
2035 prog->section_name);
2036 return -EINVAL;
2037 }
2038
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08002039 if (prog->instances.nr == 1) {
2040 /* don't create subdirs when pinning single instance */
2041 return bpf_program__unpin_instance(prog, path, 0);
2042 }
2043
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002044 for (i = 0; i < prog->instances.nr; i++) {
2045 char buf[PATH_MAX];
2046 int len;
2047
2048 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08002049 if (len < 0)
2050 return -EINVAL;
2051 else if (len >= PATH_MAX)
2052 return -ENAMETOOLONG;
2053
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002054 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08002055 if (err)
2056 return err;
2057 }
2058
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002059 err = rmdir(path);
2060 if (err)
2061 return -errno;
2062
Joe Stringerf3675402017-01-26 13:19:56 -08002063 return 0;
2064}
2065
Joe Stringerb6989f32017-01-26 13:19:57 -08002066int bpf_map__pin(struct bpf_map *map, const char *path)
2067{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002068 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08002069 int err;
2070
2071 err = check_path(path);
2072 if (err)
2073 return err;
2074
2075 if (map == NULL) {
2076 pr_warning("invalid map pointer\n");
2077 return -EINVAL;
2078 }
2079
2080 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07002081 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002082 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08002083 return -errno;
2084 }
2085
2086 pr_debug("pinned map '%s'\n", path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002087
Joe Stringerb6989f32017-01-26 13:19:57 -08002088 return 0;
2089}
2090
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002091int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08002092{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002093 int err;
2094
2095 err = check_path(path);
2096 if (err)
2097 return err;
2098
2099 if (map == NULL) {
2100 pr_warning("invalid map pointer\n");
2101 return -EINVAL;
2102 }
2103
2104 err = unlink(path);
2105 if (err != 0)
2106 return -errno;
2107 pr_debug("unpinned map '%s'\n", path);
2108
2109 return 0;
2110}
2111
2112int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2113{
Joe Stringerd5148d82017-01-26 13:19:58 -08002114 struct bpf_map *map;
2115 int err;
2116
2117 if (!obj)
2118 return -ENOENT;
2119
2120 if (!obj->loaded) {
2121 pr_warning("object not yet loaded; load it first\n");
2122 return -ENOENT;
2123 }
2124
2125 err = make_dir(path);
2126 if (err)
2127 return err;
2128
2129 bpf_map__for_each(map, obj) {
2130 char buf[PATH_MAX];
2131 int len;
2132
2133 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2134 bpf_map__name(map));
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002135 if (len < 0) {
2136 err = -EINVAL;
2137 goto err_unpin_maps;
2138 } else if (len >= PATH_MAX) {
2139 err = -ENAMETOOLONG;
2140 goto err_unpin_maps;
2141 }
2142
2143 err = bpf_map__pin(map, buf);
2144 if (err)
2145 goto err_unpin_maps;
2146 }
2147
2148 return 0;
2149
2150err_unpin_maps:
2151 while ((map = bpf_map__prev(map, obj))) {
2152 char buf[PATH_MAX];
2153 int len;
2154
2155 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2156 bpf_map__name(map));
2157 if (len < 0)
2158 continue;
2159 else if (len >= PATH_MAX)
2160 continue;
2161
2162 bpf_map__unpin(map, buf);
2163 }
2164
2165 return err;
2166}
2167
2168int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2169{
2170 struct bpf_map *map;
2171 int err;
2172
2173 if (!obj)
2174 return -ENOENT;
2175
2176 bpf_map__for_each(map, obj) {
2177 char buf[PATH_MAX];
2178 int len;
2179
2180 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2181 bpf_map__name(map));
Joe Stringerd5148d82017-01-26 13:19:58 -08002182 if (len < 0)
2183 return -EINVAL;
2184 else if (len >= PATH_MAX)
2185 return -ENAMETOOLONG;
2186
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002187 err = bpf_map__unpin(map, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002188 if (err)
2189 return err;
2190 }
2191
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002192 return 0;
2193}
2194
2195int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2196{
2197 struct bpf_program *prog;
2198 int err;
2199
2200 if (!obj)
2201 return -ENOENT;
2202
2203 if (!obj->loaded) {
2204 pr_warning("object not yet loaded; load it first\n");
2205 return -ENOENT;
2206 }
2207
2208 err = make_dir(path);
2209 if (err)
2210 return err;
2211
2212 bpf_object__for_each_program(prog, obj) {
2213 char buf[PATH_MAX];
2214 int len;
2215
2216 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002217 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002218 if (len < 0) {
2219 err = -EINVAL;
2220 goto err_unpin_programs;
2221 } else if (len >= PATH_MAX) {
2222 err = -ENAMETOOLONG;
2223 goto err_unpin_programs;
2224 }
2225
2226 err = bpf_program__pin(prog, buf);
2227 if (err)
2228 goto err_unpin_programs;
2229 }
2230
2231 return 0;
2232
2233err_unpin_programs:
2234 while ((prog = bpf_program__prev(prog, obj))) {
2235 char buf[PATH_MAX];
2236 int len;
2237
2238 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002239 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002240 if (len < 0)
2241 continue;
2242 else if (len >= PATH_MAX)
2243 continue;
2244
2245 bpf_program__unpin(prog, buf);
2246 }
2247
2248 return err;
2249}
2250
2251int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2252{
2253 struct bpf_program *prog;
2254 int err;
2255
2256 if (!obj)
2257 return -ENOENT;
2258
Joe Stringerd5148d82017-01-26 13:19:58 -08002259 bpf_object__for_each_program(prog, obj) {
2260 char buf[PATH_MAX];
2261 int len;
2262
2263 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002264 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08002265 if (len < 0)
2266 return -EINVAL;
2267 else if (len >= PATH_MAX)
2268 return -ENAMETOOLONG;
2269
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002270 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002271 if (err)
2272 return err;
2273 }
2274
2275 return 0;
2276}
2277
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002278int bpf_object__pin(struct bpf_object *obj, const char *path)
2279{
2280 int err;
2281
2282 err = bpf_object__pin_maps(obj, path);
2283 if (err)
2284 return err;
2285
2286 err = bpf_object__pin_programs(obj, path);
2287 if (err) {
2288 bpf_object__unpin_maps(obj, path);
2289 return err;
2290 }
2291
2292 return 0;
2293}
2294
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002295void bpf_object__close(struct bpf_object *obj)
2296{
Wang Nana5b8bd42015-07-01 02:14:00 +00002297 size_t i;
2298
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002299 if (!obj)
2300 return;
2301
Wang Nan10931d22016-11-26 07:03:26 +00002302 if (obj->clear_priv)
2303 obj->clear_priv(obj, obj->priv);
2304
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002305 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00002306 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002307 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08002308 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002309
Wang Nan9d759a92015-11-27 08:47:35 +00002310 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00002311 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00002312 if (obj->maps[i].clear_priv)
2313 obj->maps[i].clear_priv(&obj->maps[i],
2314 obj->maps[i].priv);
2315 obj->maps[i].priv = NULL;
2316 obj->maps[i].clear_priv = NULL;
2317 }
2318 zfree(&obj->maps);
2319 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00002320
2321 if (obj->programs && obj->nr_programs) {
2322 for (i = 0; i < obj->nr_programs; i++)
2323 bpf_program__exit(&obj->programs[i]);
2324 }
2325 zfree(&obj->programs);
2326
Wang Nan9a208ef2015-07-01 02:14:10 +00002327 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002328 free(obj);
2329}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002330
Wang Nan9a208ef2015-07-01 02:14:10 +00002331struct bpf_object *
2332bpf_object__next(struct bpf_object *prev)
2333{
2334 struct bpf_object *next;
2335
2336 if (!prev)
2337 next = list_first_entry(&bpf_objects_list,
2338 struct bpf_object,
2339 list);
2340 else
2341 next = list_next_entry(prev, list);
2342
2343 /* Empty list is noticed here so don't need checking on entry. */
2344 if (&next->list == &bpf_objects_list)
2345 return NULL;
2346
2347 return next;
2348}
2349
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002350const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00002351{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002352 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00002353}
2354
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002355unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00002356{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002357 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00002358}
2359
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002360int bpf_object__btf_fd(const struct bpf_object *obj)
2361{
2362 return obj->btf ? btf__fd(obj->btf) : -1;
2363}
2364
Wang Nan10931d22016-11-26 07:03:26 +00002365int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2366 bpf_object_clear_priv_t clear_priv)
2367{
2368 if (obj->priv && obj->clear_priv)
2369 obj->clear_priv(obj, obj->priv);
2370
2371 obj->priv = priv;
2372 obj->clear_priv = clear_priv;
2373 return 0;
2374}
2375
2376void *bpf_object__priv(struct bpf_object *obj)
2377{
2378 return obj ? obj->priv : ERR_PTR(-EINVAL);
2379}
2380
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002381static struct bpf_program *
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002382__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002383{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002384 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002385 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002386
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002387 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002388 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002389
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002390 if (!p)
2391 /* Iter from the beginning */
2392 return forward ? &obj->programs[0] :
2393 &obj->programs[nr_programs - 1];
2394
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002395 if (p->obj != obj) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002396 pr_warning("error: program handler doesn't match object\n");
2397 return NULL;
2398 }
2399
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002400 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002401 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002402 return NULL;
2403 return &obj->programs[idx];
2404}
2405
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002406struct bpf_program *
2407bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2408{
2409 struct bpf_program *prog = prev;
2410
2411 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002412 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002413 } while (prog && bpf_program__is_function_storage(prog, obj));
2414
2415 return prog;
2416}
2417
2418struct bpf_program *
2419bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2420{
2421 struct bpf_program *prog = next;
2422
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002423 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002424 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002425 } while (prog && bpf_program__is_function_storage(prog, obj));
2426
2427 return prog;
2428}
2429
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002430int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2431 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002432{
2433 if (prog->priv && prog->clear_priv)
2434 prog->clear_priv(prog, prog->priv);
2435
2436 prog->priv = priv;
2437 prog->clear_priv = clear_priv;
2438 return 0;
2439}
2440
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002441void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002442{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002443 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002444}
2445
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002446void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2447{
2448 prog->prog_ifindex = ifindex;
2449}
2450
Namhyung Kim715f8db2015-11-03 20:21:05 +09002451const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002452{
2453 const char *title;
2454
2455 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09002456 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002457 title = strdup(title);
2458 if (!title) {
2459 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00002460 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002461 }
2462 }
2463
2464 return title;
2465}
2466
2467int bpf_program__fd(struct bpf_program *prog)
2468{
Wang Nanb5805632015-11-16 12:10:09 +00002469 return bpf_program__nth_fd(prog, 0);
2470}
2471
2472int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2473 bpf_program_prep_t prep)
2474{
2475 int *instances_fds;
2476
2477 if (nr_instances <= 0 || !prep)
2478 return -EINVAL;
2479
2480 if (prog->instances.nr > 0 || prog->instances.fds) {
2481 pr_warning("Can't set pre-processor after loading\n");
2482 return -EINVAL;
2483 }
2484
2485 instances_fds = malloc(sizeof(int) * nr_instances);
2486 if (!instances_fds) {
2487 pr_warning("alloc memory failed for fds\n");
2488 return -ENOMEM;
2489 }
2490
2491 /* fill all fd with -1 */
2492 memset(instances_fds, -1, sizeof(int) * nr_instances);
2493
2494 prog->instances.nr = nr_instances;
2495 prog->instances.fds = instances_fds;
2496 prog->preprocessor = prep;
2497 return 0;
2498}
2499
2500int bpf_program__nth_fd(struct bpf_program *prog, int n)
2501{
2502 int fd;
2503
Jakub Kicinski1e960042018-07-26 14:32:18 -07002504 if (!prog)
2505 return -EINVAL;
2506
Wang Nanb5805632015-11-16 12:10:09 +00002507 if (n >= prog->instances.nr || n < 0) {
2508 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2509 n, prog->section_name, prog->instances.nr);
2510 return -EINVAL;
2511 }
2512
2513 fd = prog->instances.fds[n];
2514 if (fd < 0) {
2515 pr_warning("%dth instance of program '%s' is invalid\n",
2516 n, prog->section_name);
2517 return -ENOENT;
2518 }
2519
2520 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002521}
Wang Nan9d759a92015-11-27 08:47:35 +00002522
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002523void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002524{
2525 prog->type = type;
2526}
2527
Wang Nan5f44e4c82016-07-13 10:44:01 +00002528static bool bpf_program__is_type(struct bpf_program *prog,
2529 enum bpf_prog_type type)
2530{
2531 return prog ? (prog->type == type) : false;
2532}
2533
Joe Stringered794072017-01-22 17:11:23 -08002534#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2535int bpf_program__set_##NAME(struct bpf_program *prog) \
2536{ \
2537 if (!prog) \
2538 return -EINVAL; \
2539 bpf_program__set_type(prog, TYPE); \
2540 return 0; \
2541} \
2542 \
2543bool bpf_program__is_##NAME(struct bpf_program *prog) \
2544{ \
2545 return bpf_program__is_type(prog, TYPE); \
2546} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002547
Joe Stringer7803ba72017-01-22 17:11:24 -08002548BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002549BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002550BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2551BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002552BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002553BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002554BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2555BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002556
John Fastabend16962b22018-04-23 14:30:38 -07002557void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2558 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002559{
2560 prog->expected_attach_type = type;
2561}
2562
Andrey Ignatov36153532018-10-31 12:57:18 -07002563#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2564 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002565
Andrey Ignatov956b6202018-09-26 15:24:53 -07002566/* Programs that can NOT be attached. */
Andrey Ignatov36153532018-10-31 12:57:18 -07002567#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002568
Andrey Ignatov956b6202018-09-26 15:24:53 -07002569/* Programs that can be attached. */
2570#define BPF_APROG_SEC(string, ptype, atype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002571 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07002572
Andrey Ignatov956b6202018-09-26 15:24:53 -07002573/* Programs that must specify expected attach type at load time. */
2574#define BPF_EAPROG_SEC(string, ptype, eatype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002575 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002576
2577/* Programs that can be attached but attach type can't be identified by section
2578 * name. Kept for backward compatibility.
2579 */
2580#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002581
Roman Gushchin583c9002017-12-13 15:18:51 +00002582static const struct {
2583 const char *sec;
2584 size_t len;
2585 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002586 enum bpf_attach_type expected_attach_type;
Andrey Ignatov36153532018-10-31 12:57:18 -07002587 int is_attachable;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002588 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002589} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07002590 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2591 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2592 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2593 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2594 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2595 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2596 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2597 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2598 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2599 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2600 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2601 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2602 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07002603 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2604 BPF_CGROUP_INET_INGRESS),
2605 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2606 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002607 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2608 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2609 BPF_CGROUP_INET_SOCK_CREATE),
2610 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2611 BPF_CGROUP_INET4_POST_BIND),
2612 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2613 BPF_CGROUP_INET6_POST_BIND),
2614 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2615 BPF_CGROUP_DEVICE),
2616 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2617 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07002618 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2619 BPF_SK_SKB_STREAM_PARSER),
2620 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2621 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002622 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2623 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2624 BPF_SK_MSG_VERDICT),
2625 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2626 BPF_LIRC_MODE2),
2627 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2628 BPF_FLOW_DISSECTOR),
2629 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2630 BPF_CGROUP_INET4_BIND),
2631 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2632 BPF_CGROUP_INET6_BIND),
2633 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2634 BPF_CGROUP_INET4_CONNECT),
2635 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2636 BPF_CGROUP_INET6_CONNECT),
2637 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2638 BPF_CGROUP_UDP4_SENDMSG),
2639 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2640 BPF_CGROUP_UDP6_SENDMSG),
Roman Gushchin583c9002017-12-13 15:18:51 +00002641};
Roman Gushchin583c9002017-12-13 15:18:51 +00002642
Andrey Ignatov956b6202018-09-26 15:24:53 -07002643#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002644#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07002645#undef BPF_APROG_SEC
2646#undef BPF_EAPROG_SEC
2647#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002648
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002649int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2650 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00002651{
2652 int i;
2653
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002654 if (!name)
2655 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00002656
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002657 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2658 if (strncmp(name, section_names[i].sec, section_names[i].len))
2659 continue;
2660 *prog_type = section_names[i].prog_type;
2661 *expected_attach_type = section_names[i].expected_attach_type;
2662 return 0;
2663 }
2664 return -EINVAL;
2665}
Roman Gushchin583c9002017-12-13 15:18:51 +00002666
Andrey Ignatov956b6202018-09-26 15:24:53 -07002667int libbpf_attach_type_by_name(const char *name,
2668 enum bpf_attach_type *attach_type)
2669{
2670 int i;
2671
2672 if (!name)
2673 return -EINVAL;
2674
2675 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2676 if (strncmp(name, section_names[i].sec, section_names[i].len))
2677 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07002678 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002679 return -EINVAL;
2680 *attach_type = section_names[i].attach_type;
2681 return 0;
2682 }
2683 return -EINVAL;
2684}
2685
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002686static int
2687bpf_program__identify_section(struct bpf_program *prog,
2688 enum bpf_prog_type *prog_type,
2689 enum bpf_attach_type *expected_attach_type)
2690{
2691 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2692 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00002693}
2694
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002695int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002696{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002697 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002698}
2699
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002700const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002701{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002702 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002703}
2704
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002705const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002706{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002707 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002708}
2709
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002710__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002711{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002712 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002713}
2714
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002715__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002716{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002717 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002718}
2719
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002720int bpf_map__set_priv(struct bpf_map *map, void *priv,
2721 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002722{
2723 if (!map)
2724 return -EINVAL;
2725
2726 if (map->priv) {
2727 if (map->clear_priv)
2728 map->clear_priv(map, map->priv);
2729 }
2730
2731 map->priv = priv;
2732 map->clear_priv = clear_priv;
2733 return 0;
2734}
2735
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002736void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002737{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002738 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002739}
2740
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002741bool bpf_map__is_offload_neutral(struct bpf_map *map)
2742{
2743 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2744}
2745
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002746void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2747{
2748 map->map_ifindex = ifindex;
2749}
2750
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08002751int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2752{
2753 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2754 pr_warning("error: unsupported map type\n");
2755 return -EINVAL;
2756 }
2757 if (map->inner_map_fd != -1) {
2758 pr_warning("error: inner_map_fd already specified\n");
2759 return -EINVAL;
2760 }
2761 map->inner_map_fd = fd;
2762 return 0;
2763}
2764
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002765static struct bpf_map *
2766__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00002767{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002768 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00002769 struct bpf_map *s, *e;
2770
2771 if (!obj || !obj->maps)
2772 return NULL;
2773
2774 s = obj->maps;
2775 e = obj->maps + obj->nr_maps;
2776
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002777 if ((m < s) || (m >= e)) {
Wang Nan9d759a92015-11-27 08:47:35 +00002778 pr_warning("error in %s: map handler doesn't belong to object\n",
2779 __func__);
2780 return NULL;
2781 }
2782
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002783 idx = (m - obj->maps) + i;
2784 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00002785 return NULL;
2786 return &obj->maps[idx];
2787}
Wang Nan561bbcc2015-11-27 08:47:36 +00002788
2789struct bpf_map *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002790bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2791{
2792 if (prev == NULL)
2793 return obj->maps;
2794
2795 return __bpf_map__iter(prev, obj, 1);
2796}
2797
2798struct bpf_map *
2799bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2800{
2801 if (next == NULL) {
2802 if (!obj->nr_maps)
2803 return NULL;
2804 return obj->maps + obj->nr_maps - 1;
2805 }
2806
2807 return __bpf_map__iter(next, obj, -1);
2808}
2809
2810struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002811bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002812{
2813 struct bpf_map *pos;
2814
2815 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002816 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002817 return pos;
2818 }
2819 return NULL;
2820}
Wang Nan5a6acad2016-11-26 07:03:27 +00002821
2822struct bpf_map *
2823bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2824{
2825 int i;
2826
2827 for (i = 0; i < obj->nr_maps; i++) {
2828 if (obj->maps[i].offset == offset)
2829 return &obj->maps[i];
2830 }
2831 return ERR_PTR(-ENOENT);
2832}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002833
2834long libbpf_get_error(const void *ptr)
2835{
2836 if (IS_ERR(ptr))
2837 return PTR_ERR(ptr);
2838 return 0;
2839}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002840
2841int bpf_prog_load(const char *file, enum bpf_prog_type type,
2842 struct bpf_object **pobj, int *prog_fd)
2843{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002844 struct bpf_prog_load_attr attr;
2845
2846 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2847 attr.file = file;
2848 attr.prog_type = type;
2849 attr.expected_attach_type = 0;
2850
2851 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2852}
2853
2854int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2855 struct bpf_object **pobj, int *prog_fd)
2856{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002857 struct bpf_object_open_attr open_attr = {
2858 .file = attr->file,
2859 .prog_type = attr->prog_type,
2860 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002861 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002862 enum bpf_attach_type expected_attach_type;
2863 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002864 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002865 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002866 int err;
2867
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002868 if (!attr)
2869 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002870 if (!attr->file)
2871 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002872
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002873 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07002874 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002875 return -ENOENT;
2876
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002877 bpf_object__for_each_program(prog, obj) {
2878 /*
2879 * If type is not specified, try to guess it based on
2880 * section name.
2881 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002882 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002883 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002884 expected_attach_type = attr->expected_attach_type;
2885 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002886 err = bpf_program__identify_section(prog, &prog_type,
2887 &expected_attach_type);
2888 if (err < 0) {
2889 pr_warning("failed to guess program type based on section name %s\n",
2890 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002891 bpf_object__close(obj);
2892 return -EINVAL;
2893 }
2894 }
2895
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002896 bpf_program__set_type(prog, prog_type);
2897 bpf_program__set_expected_attach_type(prog,
2898 expected_attach_type);
2899
Taeung Song69495d22018-09-03 08:30:07 +09002900 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002901 first_prog = prog;
2902 }
2903
David Beckettf0307a72018-05-16 14:02:49 -07002904 bpf_map__for_each(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002905 if (!bpf_map__is_offload_neutral(map))
2906 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07002907 }
2908
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002909 if (!first_prog) {
2910 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002911 bpf_object__close(obj);
2912 return -ENOENT;
2913 }
2914
John Fastabend6f6d33f2017-08-15 22:34:22 -07002915 err = bpf_object__load(obj);
2916 if (err) {
2917 bpf_object__close(obj);
2918 return -EINVAL;
2919 }
2920
2921 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002922 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002923 return 0;
2924}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002925
2926enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002927bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2928 void **copy_mem, size_t *copy_size,
2929 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002930{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002931 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002932 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002933 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002934 void *base = ((__u8 *)header) + page_size;
2935 int ret = LIBBPF_PERF_EVENT_CONT;
2936 struct perf_event_header *ehdr;
2937 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002938
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002939 while (data_head != data_tail) {
2940 ehdr = base + (data_tail & (mmap_size - 1));
2941 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002942
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002943 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2944 void *copy_start = ehdr;
2945 size_t len_first = base + mmap_size - copy_start;
2946 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002947
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002948 if (*copy_size < ehdr_size) {
2949 free(*copy_mem);
2950 *copy_mem = malloc(ehdr_size);
2951 if (!*copy_mem) {
2952 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002953 ret = LIBBPF_PERF_EVENT_ERROR;
2954 break;
2955 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002956 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002957 }
2958
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002959 memcpy(*copy_mem, copy_start, len_first);
2960 memcpy(*copy_mem + len_first, base, len_secnd);
2961 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002962 }
2963
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002964 ret = fn(ehdr, private_data);
2965 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002966 if (ret != LIBBPF_PERF_EVENT_CONT)
2967 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002968 }
2969
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002970 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002971 return ret;
2972}