blob: 6dba0f01673b1e16a2592a2385ed5f57e52969af [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000010 */
11
Yonghong Songb4269952018-11-29 15:31:45 -080012#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070013#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080014#endif
Wang Nan1b76c132015-07-01 02:13:51 +000015#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000016#include <stdio.h>
17#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080018#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000019#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000020#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000021#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000022#include <fcntl.h>
23#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000024#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080025#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000026#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000027#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070028#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080029#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000030#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080031#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070032#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020033#include <linux/ring_buffer.h>
Joe Stringerf3675402017-01-26 13:19:56 -080034#include <sys/stat.h>
35#include <sys/types.h>
36#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070037#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000038#include <libelf.h>
39#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000040
41#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000042#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070043#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030044#include "str_error.h"
Yonghong Song8461ef82019-02-01 16:14:14 -080045#include "libbpf_util.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000046
Wang Nan9b161372016-07-18 06:01:08 +000047#ifndef EM_BPF
48#define EM_BPF 247
49#endif
50
Joe Stringerf3675402017-01-26 13:19:56 -080051#ifndef BPF_FS_MAGIC
52#define BPF_FS_MAGIC 0xcafe4a11
53#endif
54
Andrey Ignatovff466b52019-04-06 22:37:34 -070055/* vsprintf() in __base_pr() uses nonliteral format string. It may break
56 * compilation if user enables corresponding warning. Disable it explicitly.
57 */
58#pragma GCC diagnostic ignored "-Wformat-nonliteral"
59
Wang Nanb3f59d62015-07-01 02:13:52 +000060#define __printf(a, b) __attribute__((format(printf, a, b)))
61
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080062static int __base_pr(enum libbpf_print_level level, const char *format,
63 va_list args)
Wang Nanb3f59d62015-07-01 02:13:52 +000064{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080065 if (level == LIBBPF_DEBUG)
66 return 0;
67
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080068 return vfprintf(stderr, format, args);
Wang Nanb3f59d62015-07-01 02:13:52 +000069}
70
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080071static libbpf_print_fn_t __libbpf_pr = __base_pr;
Wang Nanb3f59d62015-07-01 02:13:52 +000072
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080073void libbpf_set_print(libbpf_print_fn_t fn)
Wang Nanb3f59d62015-07-01 02:13:52 +000074{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080075 __libbpf_pr = fn;
Wang Nanb3f59d62015-07-01 02:13:52 +000076}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000077
Yonghong Song8461ef82019-02-01 16:14:14 -080078__printf(2, 3)
79void libbpf_print(enum libbpf_print_level level, const char *format, ...)
80{
81 va_list args;
82
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080083 if (!__libbpf_pr)
84 return;
85
Yonghong Song8461ef82019-02-01 16:14:14 -080086 va_start(args, format);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080087 __libbpf_pr(level, format, args);
Yonghong Song8461ef82019-02-01 16:14:14 -080088 va_end(args);
89}
90
Wang Nan6371ca3b2015-11-06 13:49:37 +000091#define STRERR_BUFSIZE 128
92
Wang Nan6371ca3b2015-11-06 13:49:37 +000093#define CHECK_ERR(action, err, out) do { \
94 err = action; \
95 if (err) \
96 goto out; \
97} while(0)
98
99
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000100/* Copied from tools/perf/util/util.h */
101#ifndef zfree
102# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
103#endif
104
105#ifndef zclose
106# define zclose(fd) ({ \
107 int ___err = 0; \
108 if ((fd) >= 0) \
109 ___err = close((fd)); \
110 fd = -1; \
111 ___err; })
112#endif
113
114#ifdef HAVE_LIBELF_MMAP_SUPPORT
115# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
116#else
117# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
118#endif
119
Song Liu34be16462019-03-11 22:30:38 -0700120static inline __u64 ptr_to_u64(const void *ptr)
121{
122 return (__u64) (unsigned long) ptr;
123}
124
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800125struct bpf_capabilities {
126 /* v4.14: kernel support for program & map names. */
127 __u32 name:1;
128};
129
Wang Nana5b8bd42015-07-01 02:14:00 +0000130/*
131 * bpf_prog should be a better name but it has been used in
132 * linux/filter.h.
133 */
134struct bpf_program {
135 /* Index in elf obj file, for relocation use. */
136 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700137 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700138 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000139 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800140 /* section_name with / replaced by _; makes recursive pinning
141 * in bpf_object__pin_programs easier
142 */
143 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000144 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800145 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000146 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000147
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800148 struct reloc_desc {
149 enum {
150 RELO_LD64,
151 RELO_CALL,
152 } type;
Wang Nan34090912015-07-01 02:14:02 +0000153 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800154 union {
155 int map_idx;
156 int text_off;
157 };
Wang Nan34090912015-07-01 02:14:02 +0000158 } *reloc_desc;
159 int nr_reloc;
Alexei Starovoitovda11b412019-04-01 21:27:47 -0700160 int log_level;
Wang Nan55cffde2015-07-01 02:14:07 +0000161
Wang Nanb5805632015-11-16 12:10:09 +0000162 struct {
163 int nr;
164 int *fds;
165 } instances;
166 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000167
168 struct bpf_object *obj;
169 void *priv;
170 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700171
172 enum bpf_attach_type expected_attach_type;
Yonghong Song2993e052018-11-19 15:29:16 -0800173 int btf_fd;
174 void *func_info;
175 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800176 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800177
178 struct bpf_capabilities *caps;
Martin KaFai Lau3d650142018-12-07 16:42:31 -0800179
180 void *line_info;
181 __u32 line_info_rec_size;
182 __u32 line_info_cnt;
Wang Nana5b8bd42015-07-01 02:14:00 +0000183};
184
Wang Nan9d759a92015-11-27 08:47:35 +0000185struct bpf_map {
186 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000187 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000188 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700189 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800190 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000191 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700192 __u32 btf_key_type_id;
193 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000194 void *priv;
195 bpf_map_clear_priv_t clear_priv;
196};
197
Wang Nan9a208ef2015-07-01 02:14:10 +0000198static LIST_HEAD(bpf_objects_list);
199
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000200struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000201 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700202 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000203
Wang Nana5b8bd42015-07-01 02:14:00 +0000204 struct bpf_program *programs;
205 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000206 struct bpf_map *maps;
207 size_t nr_maps;
208
Wang Nan52d33522015-07-01 02:14:04 +0000209 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700210 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000211
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000212 /*
213 * Information when doing elf related work. Only valid if fd
214 * is valid.
215 */
216 struct {
217 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000218 void *obj_buf;
219 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000220 Elf *elf;
221 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000222 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000223 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000224 struct {
225 GElf_Shdr shdr;
226 Elf_Data *data;
227 } *reloc;
228 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000229 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800230 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000231 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000232 /*
233 * All loaded bpf_object is linked in a list, which is
234 * hidden to caller. bpf_objects__<func> handlers deal with
235 * all objects.
236 */
237 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000238
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700239 struct btf *btf;
Yonghong Song2993e052018-11-19 15:29:16 -0800240 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700241
Wang Nan10931d22016-11-26 07:03:26 +0000242 void *priv;
243 bpf_object_clear_priv_t clear_priv;
244
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800245 struct bpf_capabilities caps;
246
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000247 char path[];
248};
249#define obj_elf_valid(o) ((o)->efile.elf)
250
Joe Stringer29cd77f2018-10-02 13:35:39 -0700251void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000252{
Wang Nanb5805632015-11-16 12:10:09 +0000253 int i;
254
Wang Nan55cffde2015-07-01 02:14:07 +0000255 if (!prog)
256 return;
257
Wang Nanb5805632015-11-16 12:10:09 +0000258 /*
259 * If the object is opened but the program was never loaded,
260 * it is possible that prog->instances.nr == -1.
261 */
262 if (prog->instances.nr > 0) {
263 for (i = 0; i < prog->instances.nr; i++)
264 zclose(prog->instances.fds[i]);
265 } else if (prog->instances.nr != -1) {
266 pr_warning("Internal error: instances.nr is %d\n",
267 prog->instances.nr);
268 }
269
270 prog->instances.nr = -1;
271 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800272
273 zclose(prog->btf_fd);
274 zfree(&prog->func_info);
Prashant Bhole07a09d12018-12-17 16:57:50 +0900275 zfree(&prog->line_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000276}
277
Wang Nana5b8bd42015-07-01 02:14:00 +0000278static void bpf_program__exit(struct bpf_program *prog)
279{
280 if (!prog)
281 return;
282
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000283 if (prog->clear_priv)
284 prog->clear_priv(prog, prog->priv);
285
286 prog->priv = NULL;
287 prog->clear_priv = NULL;
288
Wang Nan55cffde2015-07-01 02:14:07 +0000289 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700290 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000291 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800292 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000293 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000294 zfree(&prog->reloc_desc);
295
296 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000297 prog->insns_cnt = 0;
298 prog->idx = -1;
299}
300
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800301static char *__bpf_program__pin_name(struct bpf_program *prog)
302{
303 char *name, *p;
304
305 name = p = strdup(prog->section_name);
306 while ((p = strchr(p, '/')))
307 *p = '_';
308
309 return name;
310}
311
Wang Nana5b8bd42015-07-01 02:14:00 +0000312static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700313bpf_program__init(void *data, size_t size, char *section_name, int idx,
314 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000315{
316 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700317 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000318 return -EINVAL;
319 }
320
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -0800321 memset(prog, 0, sizeof(*prog));
Wang Nana5b8bd42015-07-01 02:14:00 +0000322
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700323 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000324 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100325 pr_warning("failed to alloc name for prog under section(%d) %s\n",
326 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000327 goto errout;
328 }
329
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800330 prog->pin_name = __bpf_program__pin_name(prog);
331 if (!prog->pin_name) {
332 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
333 idx, section_name);
334 goto errout;
335 }
336
Wang Nana5b8bd42015-07-01 02:14:00 +0000337 prog->insns = malloc(size);
338 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700339 pr_warning("failed to alloc insns for prog under section %s\n",
340 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000341 goto errout;
342 }
343 prog->insns_cnt = size / sizeof(struct bpf_insn);
344 memcpy(prog->insns, data,
345 prog->insns_cnt * sizeof(struct bpf_insn));
346 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000347 prog->instances.fds = NULL;
348 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800349 prog->type = BPF_PROG_TYPE_UNSPEC;
Yonghong Song2993e052018-11-19 15:29:16 -0800350 prog->btf_fd = -1;
Wang Nana5b8bd42015-07-01 02:14:00 +0000351
352 return 0;
353errout:
354 bpf_program__exit(prog);
355 return -ENOMEM;
356}
357
358static int
359bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700360 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000361{
362 struct bpf_program prog, *progs;
363 int nr_progs, err;
364
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700365 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000366 if (err)
367 return err;
368
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800369 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000370 progs = obj->programs;
371 nr_progs = obj->nr_programs;
372
Jakub Kicinski531b0142018-07-10 14:43:05 -0700373 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000374 if (!progs) {
375 /*
376 * In this case the original obj->programs
377 * is still valid, so don't need special treat for
378 * bpf_close_object().
379 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700380 pr_warning("failed to alloc a new program under section '%s'\n",
381 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000382 bpf_program__exit(&prog);
383 return -ENOMEM;
384 }
385
386 pr_debug("found program %s\n", prog.section_name);
387 obj->programs = progs;
388 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000389 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000390 progs[nr_progs] = prog;
391 return 0;
392}
393
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700394static int
395bpf_object__init_prog_names(struct bpf_object *obj)
396{
397 Elf_Data *symbols = obj->efile.symbols;
398 struct bpf_program *prog;
399 size_t pi, si;
400
401 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800402 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700403
404 prog = &obj->programs[pi];
405
406 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
407 si++) {
408 GElf_Sym sym;
409
410 if (!gelf_getsym(symbols, si, &sym))
411 continue;
412 if (sym.st_shndx != prog->idx)
413 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000414 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
415 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700416
417 name = elf_strptr(obj->efile.elf,
418 obj->efile.strtabidx,
419 sym.st_name);
420 if (!name) {
421 pr_warning("failed to get sym name string for prog %s\n",
422 prog->section_name);
423 return -LIBBPF_ERRNO__LIBELF;
424 }
425 }
426
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700427 if (!name && prog->idx == obj->efile.text_shndx)
428 name = ".text";
429
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700430 if (!name) {
431 pr_warning("failed to find sym for prog %s\n",
432 prog->section_name);
433 return -EINVAL;
434 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700435
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700436 prog->name = strdup(name);
437 if (!prog->name) {
438 pr_warning("failed to allocate memory for prog sym %s\n",
439 name);
440 return -ENOMEM;
441 }
442 }
443
444 return 0;
445}
446
Wang Nan6c956392015-07-01 02:13:54 +0000447static struct bpf_object *bpf_object__new(const char *path,
448 void *obj_buf,
449 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000450{
451 struct bpf_object *obj;
452
453 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
454 if (!obj) {
455 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000456 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000457 }
458
459 strcpy(obj->path, path);
460 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000461
462 /*
463 * Caller of this function should also calls
464 * bpf_object__elf_finish() after data collection to return
465 * obj_buf to user. If not, we should duplicate the buffer to
466 * avoid user freeing them before elf finish.
467 */
468 obj->efile.obj_buf = obj_buf;
469 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000470 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000471
Wang Nan52d33522015-07-01 02:14:04 +0000472 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000473
474 INIT_LIST_HEAD(&obj->list);
475 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000476 return obj;
477}
478
479static void bpf_object__elf_finish(struct bpf_object *obj)
480{
481 if (!obj_elf_valid(obj))
482 return;
483
484 if (obj->efile.elf) {
485 elf_end(obj->efile.elf);
486 obj->efile.elf = NULL;
487 }
Wang Nanbec7d682015-07-01 02:13:59 +0000488 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000489
490 zfree(&obj->efile.reloc);
491 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000492 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000493 obj->efile.obj_buf = NULL;
494 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000495}
496
497static int bpf_object__elf_init(struct bpf_object *obj)
498{
499 int err = 0;
500 GElf_Ehdr *ep;
501
502 if (obj_elf_valid(obj)) {
503 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000504 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000505 }
506
Wang Nan6c956392015-07-01 02:13:54 +0000507 if (obj->efile.obj_buf_sz > 0) {
508 /*
509 * obj_buf should have been validated by
510 * bpf_object__open_buffer().
511 */
512 obj->efile.elf = elf_memory(obj->efile.obj_buf,
513 obj->efile.obj_buf_sz);
514 } else {
515 obj->efile.fd = open(obj->path, O_RDONLY);
516 if (obj->efile.fd < 0) {
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200517 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700518 char *cp = libbpf_strerror_r(errno, errmsg,
519 sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200520
521 pr_warning("failed to open %s: %s\n", obj->path, cp);
Wang Nan6c956392015-07-01 02:13:54 +0000522 return -errno;
523 }
524
525 obj->efile.elf = elf_begin(obj->efile.fd,
526 LIBBPF_ELF_C_READ_MMAP,
527 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000528 }
529
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000530 if (!obj->efile.elf) {
531 pr_warning("failed to open %s as ELF file\n",
532 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000533 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000534 goto errout;
535 }
536
537 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
538 pr_warning("failed to get EHDR from %s\n",
539 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000540 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000541 goto errout;
542 }
543 ep = &obj->efile.ehdr;
544
Wang Nan9b161372016-07-18 06:01:08 +0000545 /* Old LLVM set e_machine to EM_NONE */
546 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000547 pr_warning("%s is not an eBPF object file\n",
548 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000549 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000550 goto errout;
551 }
552
553 return 0;
554errout:
555 bpf_object__elf_finish(obj);
556 return err;
557}
558
Wang Nancc4228d2015-07-01 02:13:55 +0000559static int
560bpf_object__check_endianness(struct bpf_object *obj)
561{
562 static unsigned int const endian = 1;
563
564 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
565 case ELFDATA2LSB:
566 /* We are big endian, BPF obj is little endian. */
567 if (*(unsigned char const *)&endian != 1)
568 goto mismatch;
569 break;
570
571 case ELFDATA2MSB:
572 /* We are little endian, BPF obj is big endian. */
573 if (*(unsigned char const *)&endian != 0)
574 goto mismatch;
575 break;
576 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000577 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000578 }
579
580 return 0;
581
582mismatch:
583 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000584 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000585}
586
Wang Nancb1e5e92015-07-01 02:13:57 +0000587static int
588bpf_object__init_license(struct bpf_object *obj,
589 void *data, size_t size)
590{
591 memcpy(obj->license, data,
592 min(size, sizeof(obj->license) - 1));
593 pr_debug("license of %s is %s\n", obj->path, obj->license);
594 return 0;
595}
596
597static int
598bpf_object__init_kversion(struct bpf_object *obj,
599 void *data, size_t size)
600{
Yonghong Song438363c2018-10-09 16:14:47 -0700601 __u32 kver;
Wang Nancb1e5e92015-07-01 02:13:57 +0000602
603 if (size != sizeof(kver)) {
604 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000605 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000606 }
607 memcpy(&kver, data, sizeof(kver));
608 obj->kern_version = kver;
609 pr_debug("kernel version of %s is %x\n", obj->path,
610 obj->kern_version);
611 return 0;
612}
613
Eric Leblond4708bbd2016-11-15 04:05:47 +0000614static int compare_bpf_map(const void *_a, const void *_b)
615{
616 const struct bpf_map *a = _a;
617 const struct bpf_map *b = _b;
618
619 return a->offset - b->offset;
620}
621
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800622static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
623{
624 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
625 type == BPF_MAP_TYPE_HASH_OF_MAPS)
626 return true;
627 return false;
628}
629
Eric Leblond4708bbd2016-11-15 04:05:47 +0000630static int
John Fastabendc034a172018-10-15 11:19:55 -0700631bpf_object__init_maps(struct bpf_object *obj, int flags)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000632{
John Fastabendc034a172018-10-15 11:19:55 -0700633 bool strict = !(flags & MAPS_RELAX_COMPAT);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400634 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000635 Elf_Scn *scn;
Changbin Du11c1ea62019-03-16 16:05:43 +0800636 Elf_Data *data = NULL;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000637 Elf_Data *symbols = obj->efile.symbols;
638
639 if (obj->efile.maps_shndx < 0)
640 return -EINVAL;
641 if (!symbols)
642 return -EINVAL;
643
644 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
645 if (scn)
646 data = elf_getdata(scn, NULL);
647 if (!scn || !data) {
648 pr_warning("failed to get Elf_Data from map section %d\n",
649 obj->efile.maps_shndx);
650 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000651 }
652
Eric Leblond4708bbd2016-11-15 04:05:47 +0000653 /*
654 * Count number of maps. Each map has a name.
655 * Array of maps is not supported: only the first element is
656 * considered.
657 *
658 * TODO: Detect array of map and report error.
659 */
660 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
661 GElf_Sym sym;
662
663 if (!gelf_getsym(symbols, i, &sym))
664 continue;
665 if (sym.st_shndx != obj->efile.maps_shndx)
666 continue;
667 nr_maps++;
668 }
669
670 /* Alloc obj->maps and fill nr_maps. */
671 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
672 nr_maps, data->d_size);
673
674 if (!nr_maps)
675 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000676
Craig Gallekb13c5c12017-10-05 10:41:57 -0400677 /* Assume equally sized map definitions */
678 map_def_sz = data->d_size / nr_maps;
679 if (!data->d_size || (data->d_size % nr_maps) != 0) {
680 pr_warning("unable to determine map definition size "
681 "section %s, %d maps in %zd bytes\n",
682 obj->path, nr_maps, data->d_size);
683 return -EINVAL;
684 }
685
Wang Nan9d759a92015-11-27 08:47:35 +0000686 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
687 if (!obj->maps) {
688 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000689 return -ENOMEM;
690 }
Wang Nan9d759a92015-11-27 08:47:35 +0000691 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000692
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800693 for (i = 0; i < nr_maps; i++) {
694 /*
695 * fill all fd with -1 so won't close incorrect
696 * fd (fd=0 is stdin) when failure (zclose won't close
697 * negative fd)).
698 */
Wang Nan9d759a92015-11-27 08:47:35 +0000699 obj->maps[i].fd = -1;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800700 obj->maps[i].inner_map_fd = -1;
701 }
Wang Nan9d759a92015-11-27 08:47:35 +0000702
Eric Leblond4708bbd2016-11-15 04:05:47 +0000703 /*
704 * Fill obj->maps using data in "maps" section.
705 */
706 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000707 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000708 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000709 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000710
711 if (!gelf_getsym(symbols, i, &sym))
712 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000713 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000714 continue;
715
716 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000717 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000718 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000719 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400720 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000721 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
722 obj->path, map_name);
723 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000724 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000725
Wang Nan561bbcc2015-11-27 08:47:36 +0000726 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000727 if (!obj->maps[map_idx].name) {
728 pr_warning("failed to alloc map name\n");
729 return -ENOMEM;
730 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000731 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000732 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000733 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400734 /*
735 * If the definition of the map in the object file fits in
736 * bpf_map_def, copy it. Any extra fields in our version
737 * of bpf_map_def will default to zero as a result of the
738 * calloc above.
739 */
740 if (map_def_sz <= sizeof(struct bpf_map_def)) {
741 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
742 } else {
743 /*
744 * Here the map structure being read is bigger than what
745 * we expect, truncate if the excess bits are all zero.
746 * If they are not zero, reject this map as
747 * incompatible.
748 */
749 char *b;
750 for (b = ((char *)def) + sizeof(struct bpf_map_def);
751 b < ((char *)def) + map_def_sz; b++) {
752 if (*b != 0) {
753 pr_warning("maps section in %s: \"%s\" "
754 "has unrecognized, non-zero "
755 "options\n",
756 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -0700757 if (strict)
758 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400759 }
760 }
761 memcpy(&obj->maps[map_idx].def, def,
762 sizeof(struct bpf_map_def));
763 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000764 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000765 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000766
767 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400768 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000769}
770
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100771static bool section_have_execinstr(struct bpf_object *obj, int idx)
772{
773 Elf_Scn *scn;
774 GElf_Shdr sh;
775
776 scn = elf_getscn(obj->efile.elf, idx);
777 if (!scn)
778 return false;
779
780 if (gelf_getshdr(scn, &sh) != &sh)
781 return false;
782
783 if (sh.sh_flags & SHF_EXECINSTR)
784 return true;
785
786 return false;
787}
788
John Fastabendc034a172018-10-15 11:19:55 -0700789static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
Wang Nan29603662015-07-01 02:13:56 +0000790{
791 Elf *elf = obj->efile.elf;
792 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800793 Elf_Data *btf_ext_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +0000794 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000795 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000796
797 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
798 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
799 pr_warning("failed to get e_shstrndx from %s\n",
800 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000801 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000802 }
803
804 while ((scn = elf_nextscn(elf, scn)) != NULL) {
805 char *name;
806 GElf_Shdr sh;
807 Elf_Data *data;
808
809 idx++;
810 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100811 pr_warning("failed to get section(%d) header from %s\n",
812 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000813 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000814 goto out;
815 }
816
817 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
818 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100819 pr_warning("failed to get section(%d) name from %s\n",
820 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000821 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000822 goto out;
823 }
824
825 data = elf_getdata(scn, 0);
826 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100827 pr_warning("failed to get section(%d) data from %s(%s)\n",
828 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000829 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000830 goto out;
831 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100832 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
833 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000834 (int)sh.sh_link, (unsigned long)sh.sh_flags,
835 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000836
837 if (strcmp(name, "license") == 0)
838 err = bpf_object__init_license(obj,
839 data->d_buf,
840 data->d_size);
841 else if (strcmp(name, "version") == 0)
842 err = bpf_object__init_kversion(obj,
843 data->d_buf,
844 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000845 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000846 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700847 else if (strcmp(name, BTF_ELF_SEC) == 0) {
Yonghong Song8461ef82019-02-01 16:14:14 -0800848 obj->btf = btf__new(data->d_buf, data->d_size);
Andrii Nakryikof38a1f02019-03-08 15:58:20 -0800849 if (IS_ERR(obj->btf)) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700850 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
851 BTF_ELF_SEC, PTR_ERR(obj->btf));
852 obj->btf = NULL;
Andrii Nakryikof38a1f02019-03-08 15:58:20 -0800853 continue;
854 }
855 err = btf__load(obj->btf);
856 if (err) {
857 pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
858 BTF_ELF_SEC, err);
859 btf__free(obj->btf);
860 obj->btf = NULL;
861 err = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700862 }
Yonghong Song2993e052018-11-19 15:29:16 -0800863 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800864 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700865 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000866 if (obj->efile.symbols) {
867 pr_warning("bpf: multiple SYMTAB in %s\n",
868 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000869 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000870 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000871 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000872 obj->efile.strtabidx = sh.sh_link;
873 }
Joe Stringerf8c7a4d2019-04-09 23:20:12 +0200874 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
875 if (sh.sh_flags & SHF_EXECINSTR) {
876 if (strcmp(name, ".text") == 0)
877 obj->efile.text_shndx = idx;
878 err = bpf_object__add_program(obj, data->d_buf,
879 data->d_size, name, idx);
880 if (err) {
881 char errmsg[STRERR_BUFSIZE];
882 char *cp = libbpf_strerror_r(-err, errmsg,
883 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000884
Joe Stringerf8c7a4d2019-04-09 23:20:12 +0200885 pr_warning("failed to alloc program %s (%s): %s",
886 name, obj->path, cp);
887 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000888 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000889 } else if (sh.sh_type == SHT_REL) {
890 void *reloc = obj->efile.reloc;
891 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100892 int sec = sh.sh_info; /* points to other section */
893
894 /* Only do relo for section with exec instructions */
895 if (!section_have_execinstr(obj, sec)) {
896 pr_debug("skip relo %s(%d) for section(%d)\n",
897 name, idx, sec);
898 continue;
899 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000900
Jakub Kicinski531b0142018-07-10 14:43:05 -0700901 reloc = reallocarray(reloc, nr_reloc,
902 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +0000903 if (!reloc) {
904 pr_warning("realloc failed\n");
905 err = -ENOMEM;
906 } else {
907 int n = nr_reloc - 1;
908
909 obj->efile.reloc = reloc;
910 obj->efile.nr_reloc = nr_reloc;
911
912 obj->efile.reloc[n].shdr = sh;
913 obj->efile.reloc[n].data = data;
914 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100915 } else {
916 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000917 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000918 if (err)
919 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000920 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000921
Wang Nan77ba9a52015-12-08 02:25:30 +0000922 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
923 pr_warning("Corrupted ELF file: index of strtab invalid\n");
924 return LIBBPF_ERRNO__FORMAT;
925 }
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800926 if (btf_ext_data) {
927 if (!obj->btf) {
928 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
929 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
930 } else {
931 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
Yonghong Song8461ef82019-02-01 16:14:14 -0800932 btf_ext_data->d_size);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800933 if (IS_ERR(obj->btf_ext)) {
934 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
935 BTF_EXT_ELF_SEC,
936 PTR_ERR(obj->btf_ext));
937 obj->btf_ext = NULL;
938 }
939 }
940 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700941 if (obj->efile.maps_shndx >= 0) {
John Fastabendc034a172018-10-15 11:19:55 -0700942 err = bpf_object__init_maps(obj, flags);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700943 if (err)
944 goto out;
945 }
946 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000947out:
948 return err;
949}
950
Wang Nan34090912015-07-01 02:14:02 +0000951static struct bpf_program *
952bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
953{
954 struct bpf_program *prog;
955 size_t i;
956
957 for (i = 0; i < obj->nr_programs; i++) {
958 prog = &obj->programs[i];
959 if (prog->idx == idx)
960 return prog;
961 }
962 return NULL;
963}
964
Jakub Kicinski6d4b1982018-07-26 14:32:19 -0700965struct bpf_program *
966bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
967{
968 struct bpf_program *pos;
969
970 bpf_object__for_each_program(pos, obj) {
971 if (pos->section_name && !strcmp(pos->section_name, title))
972 return pos;
973 }
974 return NULL;
975}
976
Wang Nan34090912015-07-01 02:14:02 +0000977static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800978bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
979 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000980{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800981 Elf_Data *symbols = obj->efile.symbols;
982 int text_shndx = obj->efile.text_shndx;
983 int maps_shndx = obj->efile.maps_shndx;
984 struct bpf_map *maps = obj->maps;
985 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000986 int i, nrels;
987
988 pr_debug("collecting relocating info for: '%s'\n",
989 prog->section_name);
990 nrels = shdr->sh_size / shdr->sh_entsize;
991
992 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
993 if (!prog->reloc_desc) {
994 pr_warning("failed to alloc memory in relocation\n");
995 return -ENOMEM;
996 }
997 prog->nr_reloc = nrels;
998
999 for (i = 0; i < nrels; i++) {
1000 GElf_Sym sym;
1001 GElf_Rel rel;
1002 unsigned int insn_idx;
1003 struct bpf_insn *insns = prog->insns;
1004 size_t map_idx;
1005
1006 if (!gelf_getrel(data, i, &rel)) {
1007 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001008 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001009 }
1010
Wang Nan34090912015-07-01 02:14:02 +00001011 if (!gelf_getsym(symbols,
1012 GELF_R_SYM(rel.r_info),
1013 &sym)) {
1014 pr_warning("relocation: symbol %"PRIx64" not found\n",
1015 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001016 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001017 }
David Miller7d9890e2017-12-19 15:53:11 -05001018 pr_debug("relo for %lld value %lld name %d\n",
1019 (long long) (rel.r_info >> 32),
1020 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +00001021
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001022 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +00001023 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
1024 prog->section_name, sym.st_shndx);
1025 return -LIBBPF_ERRNO__RELOC;
1026 }
1027
1028 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1029 pr_debug("relocation: insn_idx=%u\n", insn_idx);
1030
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001031 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1032 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1033 pr_warning("incorrect bpf_call opcode\n");
1034 return -LIBBPF_ERRNO__RELOC;
1035 }
1036 prog->reloc_desc[i].type = RELO_CALL;
1037 prog->reloc_desc[i].insn_idx = insn_idx;
1038 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001039 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001040 continue;
1041 }
1042
Wang Nan34090912015-07-01 02:14:02 +00001043 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1044 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1045 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001046 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001047 }
1048
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001049 if (sym.st_shndx == maps_shndx) {
1050 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1051 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1052 if (maps[map_idx].offset == sym.st_value) {
1053 pr_debug("relocation: find map %zd (%s) for insn %u\n",
1054 map_idx, maps[map_idx].name, insn_idx);
1055 break;
1056 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001057 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001058
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001059 if (map_idx >= nr_maps) {
1060 pr_warning("bpf relocation: map_idx %d large than %d\n",
1061 (int)map_idx, (int)nr_maps - 1);
1062 return -LIBBPF_ERRNO__RELOC;
1063 }
Wang Nan34090912015-07-01 02:14:02 +00001064
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001065 prog->reloc_desc[i].type = RELO_LD64;
1066 prog->reloc_desc[i].insn_idx = insn_idx;
1067 prog->reloc_desc[i].map_idx = map_idx;
1068 }
Wang Nan34090912015-07-01 02:14:02 +00001069 }
1070 return 0;
1071}
1072
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001073static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1074{
1075 struct bpf_map_def *def = &map->def;
Yonghong Song96408c42019-02-04 11:00:58 -08001076 __u32 key_type_id, value_type_id;
1077 int ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001078
Yonghong Song96408c42019-02-04 11:00:58 -08001079 ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
1080 def->value_size, &key_type_id,
1081 &value_type_id);
1082 if (ret)
1083 return ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001084
Yonghong Song96408c42019-02-04 11:00:58 -08001085 map->btf_key_type_id = key_type_id;
1086 map->btf_value_type_id = value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001087
1088 return 0;
1089}
1090
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001091int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1092{
1093 struct bpf_map_info info = {};
1094 __u32 len = sizeof(info);
1095 int new_fd, err;
1096 char *new_name;
1097
1098 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1099 if (err)
1100 return err;
1101
1102 new_name = strdup(info.name);
1103 if (!new_name)
1104 return -errno;
1105
1106 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1107 if (new_fd < 0)
1108 goto err_free_new_name;
1109
1110 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1111 if (new_fd < 0)
1112 goto err_close_new_fd;
1113
1114 err = zclose(map->fd);
1115 if (err)
1116 goto err_close_new_fd;
1117 free(map->name);
1118
1119 map->fd = new_fd;
1120 map->name = new_name;
1121 map->def.type = info.type;
1122 map->def.key_size = info.key_size;
1123 map->def.value_size = info.value_size;
1124 map->def.max_entries = info.max_entries;
1125 map->def.map_flags = info.map_flags;
1126 map->btf_key_type_id = info.btf_key_type_id;
1127 map->btf_value_type_id = info.btf_value_type_id;
1128
1129 return 0;
1130
1131err_close_new_fd:
1132 close(new_fd);
1133err_free_new_name:
1134 free(new_name);
1135 return -errno;
1136}
1137
Andrey Ignatov1a11a4c2019-02-14 15:01:42 -08001138int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1139{
1140 if (!map || !max_entries)
1141 return -EINVAL;
1142
1143 /* If map already created, its attributes can't be changed. */
1144 if (map->fd >= 0)
1145 return -EBUSY;
1146
1147 map->def.max_entries = max_entries;
1148
1149 return 0;
1150}
1151
Wang Nan52d33522015-07-01 02:14:04 +00001152static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08001153bpf_object__probe_name(struct bpf_object *obj)
1154{
1155 struct bpf_load_program_attr attr;
1156 char *cp, errmsg[STRERR_BUFSIZE];
1157 struct bpf_insn insns[] = {
1158 BPF_MOV64_IMM(BPF_REG_0, 0),
1159 BPF_EXIT_INSN(),
1160 };
1161 int ret;
1162
1163 /* make sure basic loading works */
1164
1165 memset(&attr, 0, sizeof(attr));
1166 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1167 attr.insns = insns;
1168 attr.insns_cnt = ARRAY_SIZE(insns);
1169 attr.license = "GPL";
1170
1171 ret = bpf_load_program_xattr(&attr, NULL, 0);
1172 if (ret < 0) {
1173 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1174 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1175 __func__, cp, errno);
1176 return -errno;
1177 }
1178 close(ret);
1179
1180 /* now try the same program, but with the name */
1181
1182 attr.name = "test";
1183 ret = bpf_load_program_xattr(&attr, NULL, 0);
1184 if (ret >= 0) {
1185 obj->caps.name = 1;
1186 close(ret);
1187 }
1188
1189 return 0;
1190}
1191
1192static int
1193bpf_object__probe_caps(struct bpf_object *obj)
1194{
1195 return bpf_object__probe_name(obj);
1196}
1197
1198static int
Wang Nan52d33522015-07-01 02:14:04 +00001199bpf_object__create_maps(struct bpf_object *obj)
1200{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001201 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001202 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001203 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001204
Wang Nan9d759a92015-11-27 08:47:35 +00001205 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001206 struct bpf_map *map = &obj->maps[i];
1207 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001208 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001209 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001210
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001211 if (map->fd >= 0) {
1212 pr_debug("skip map create (preset) %s: fd=%d\n",
1213 map->name, map->fd);
1214 continue;
1215 }
1216
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08001217 if (obj->caps.name)
1218 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001219 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001220 create_attr.map_type = def->type;
1221 create_attr.map_flags = def->map_flags;
1222 create_attr.key_size = def->key_size;
1223 create_attr.value_size = def->value_size;
1224 create_attr.max_entries = def->max_entries;
1225 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001226 create_attr.btf_key_type_id = 0;
1227 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08001228 if (bpf_map_type__is_map_in_map(def->type) &&
1229 map->inner_map_fd >= 0)
1230 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001231
1232 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1233 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001234 create_attr.btf_key_type_id = map->btf_key_type_id;
1235 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001236 }
1237
1238 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001239 if (*pfd < 0 && create_attr.btf_key_type_id) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001240 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001241 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001242 map->name, cp, errno);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001243 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001244 create_attr.btf_key_type_id = 0;
1245 create_attr.btf_value_type_id = 0;
1246 map->btf_key_type_id = 0;
1247 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001248 *pfd = bpf_create_map_xattr(&create_attr);
1249 }
1250
Wang Nan52d33522015-07-01 02:14:04 +00001251 if (*pfd < 0) {
1252 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001253
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001254 err = *pfd;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001255 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Eric Leblond49bf4b32017-08-20 21:48:14 +02001256 pr_warning("failed to create map (name: '%s'): %s\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001257 map->name, cp);
Wang Nan52d33522015-07-01 02:14:04 +00001258 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001259 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001260 return err;
1261 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001262 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001263 }
1264
Wang Nan52d33522015-07-01 02:14:04 +00001265 return 0;
1266}
1267
Wang Nan8a47a6c2015-07-01 02:14:05 +00001268static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001269check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1270 void *btf_prog_info, const char *info_name)
1271{
1272 if (err != -ENOENT) {
1273 pr_warning("Error in loading %s for sec %s.\n",
1274 info_name, prog->section_name);
1275 return err;
1276 }
1277
1278 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1279
1280 if (btf_prog_info) {
1281 /*
1282 * Some info has already been found but has problem
1283 * in the last btf_ext reloc. Must have to error
1284 * out.
1285 */
1286 pr_warning("Error in relocating %s for sec %s.\n",
1287 info_name, prog->section_name);
1288 return err;
1289 }
1290
1291 /*
1292 * Have problem loading the very first info. Ignore
1293 * the rest.
1294 */
1295 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1296 info_name, prog->section_name, info_name);
1297 return 0;
1298}
1299
1300static int
1301bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1302 const char *section_name, __u32 insn_offset)
1303{
1304 int err;
1305
1306 if (!insn_offset || prog->func_info) {
1307 /*
1308 * !insn_offset => main program
1309 *
1310 * For sub prog, the main program's func_info has to
1311 * be loaded first (i.e. prog->func_info != NULL)
1312 */
1313 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1314 section_name, insn_offset,
1315 &prog->func_info,
1316 &prog->func_info_cnt);
1317 if (err)
1318 return check_btf_ext_reloc_err(prog, err,
1319 prog->func_info,
1320 "bpf_func_info");
1321
1322 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1323 }
1324
Martin KaFai Lau3d650142018-12-07 16:42:31 -08001325 if (!insn_offset || prog->line_info) {
1326 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1327 section_name, insn_offset,
1328 &prog->line_info,
1329 &prog->line_info_cnt);
1330 if (err)
1331 return check_btf_ext_reloc_err(prog, err,
1332 prog->line_info,
1333 "bpf_line_info");
1334
1335 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1336 }
1337
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001338 if (!insn_offset)
1339 prog->btf_fd = btf__fd(obj->btf);
1340
1341 return 0;
1342}
1343
1344static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001345bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1346 struct reloc_desc *relo)
1347{
1348 struct bpf_insn *insn, *new_insn;
1349 struct bpf_program *text;
1350 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08001351 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001352
1353 if (relo->type != RELO_CALL)
1354 return -LIBBPF_ERRNO__RELOC;
1355
1356 if (prog->idx == obj->efile.text_shndx) {
1357 pr_warning("relo in .text insn %d into off %d\n",
1358 relo->insn_idx, relo->text_off);
1359 return -LIBBPF_ERRNO__RELOC;
1360 }
1361
1362 if (prog->main_prog_cnt == 0) {
1363 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1364 if (!text) {
1365 pr_warning("no .text section found yet relo into text exist\n");
1366 return -LIBBPF_ERRNO__RELOC;
1367 }
1368 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001369 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001370 if (!new_insn) {
1371 pr_warning("oom in prog realloc\n");
1372 return -ENOMEM;
1373 }
Yonghong Song2993e052018-11-19 15:29:16 -08001374
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001375 if (obj->btf_ext) {
1376 err = bpf_program_reloc_btf_ext(prog, obj,
1377 text->section_name,
1378 prog->insns_cnt);
1379 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08001380 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08001381 }
1382
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001383 memcpy(new_insn + prog->insns_cnt, text->insns,
1384 text->insns_cnt * sizeof(*insn));
1385 prog->insns = new_insn;
1386 prog->main_prog_cnt = prog->insns_cnt;
1387 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001388 pr_debug("added %zd insn from %s to prog %s\n",
1389 text->insns_cnt, text->section_name,
1390 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001391 }
1392 insn = &prog->insns[relo->insn_idx];
1393 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001394 return 0;
1395}
1396
1397static int
Wang Nan9d759a92015-11-27 08:47:35 +00001398bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001399{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001400 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001401
Yonghong Song2993e052018-11-19 15:29:16 -08001402 if (!prog)
1403 return 0;
1404
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001405 if (obj->btf_ext) {
1406 err = bpf_program_reloc_btf_ext(prog, obj,
1407 prog->section_name, 0);
1408 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08001409 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08001410 }
1411
1412 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001413 return 0;
1414
1415 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001416 if (prog->reloc_desc[i].type == RELO_LD64) {
1417 struct bpf_insn *insns = prog->insns;
1418 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001419
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001420 insn_idx = prog->reloc_desc[i].insn_idx;
1421 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001422
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001423 if (insn_idx >= (int)prog->insns_cnt) {
1424 pr_warning("relocation out of range: '%s'\n",
1425 prog->section_name);
1426 return -LIBBPF_ERRNO__RELOC;
1427 }
1428 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1429 insns[insn_idx].imm = obj->maps[map_idx].fd;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001430 } else if (prog->reloc_desc[i].type == RELO_CALL) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001431 err = bpf_program__reloc_text(prog, obj,
1432 &prog->reloc_desc[i]);
1433 if (err)
1434 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001435 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001436 }
1437
1438 zfree(&prog->reloc_desc);
1439 prog->nr_reloc = 0;
1440 return 0;
1441}
1442
1443
1444static int
1445bpf_object__relocate(struct bpf_object *obj)
1446{
1447 struct bpf_program *prog;
1448 size_t i;
1449 int err;
1450
1451 for (i = 0; i < obj->nr_programs; i++) {
1452 prog = &obj->programs[i];
1453
Wang Nan9d759a92015-11-27 08:47:35 +00001454 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001455 if (err) {
1456 pr_warning("failed to relocate '%s'\n",
1457 prog->section_name);
1458 return err;
1459 }
1460 }
1461 return 0;
1462}
1463
Wang Nan34090912015-07-01 02:14:02 +00001464static int bpf_object__collect_reloc(struct bpf_object *obj)
1465{
1466 int i, err;
1467
1468 if (!obj_elf_valid(obj)) {
1469 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001470 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001471 }
1472
1473 for (i = 0; i < obj->efile.nr_reloc; i++) {
1474 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1475 Elf_Data *data = obj->efile.reloc[i].data;
1476 int idx = shdr->sh_info;
1477 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001478
1479 if (shdr->sh_type != SHT_REL) {
1480 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001481 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001482 }
1483
1484 prog = bpf_object__find_prog_by_idx(obj, idx);
1485 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001486 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001487 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001488 }
1489
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001490 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001491 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001492 obj);
Wang Nan34090912015-07-01 02:14:02 +00001493 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001494 return err;
Wang Nan34090912015-07-01 02:14:02 +00001495 }
1496 return 0;
1497}
1498
Wang Nan55cffde2015-07-01 02:14:07 +00001499static int
Yonghong Song2993e052018-11-19 15:29:16 -08001500load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001501 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00001502{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001503 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001504 char *cp, errmsg[STRERR_BUFSIZE];
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001505 int log_buf_size = BPF_LOG_BUF_SIZE;
Wang Nan55cffde2015-07-01 02:14:07 +00001506 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001507 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001508
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001509 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08001510 load_attr.prog_type = prog->type;
1511 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08001512 if (prog->caps->name)
1513 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001514 load_attr.insns = insns;
1515 load_attr.insns_cnt = insns_cnt;
1516 load_attr.license = license;
1517 load_attr.kern_version = kern_version;
Yonghong Song2993e052018-11-19 15:29:16 -08001518 load_attr.prog_ifindex = prog->prog_ifindex;
Yonghong Song462c1242018-11-21 11:22:42 -08001519 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08001520 load_attr.func_info = prog->func_info;
1521 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001522 load_attr.func_info_cnt = prog->func_info_cnt;
Martin KaFai Lau3d650142018-12-07 16:42:31 -08001523 load_attr.line_info = prog->line_info;
1524 load_attr.line_info_rec_size = prog->line_info_rec_size;
1525 load_attr.line_info_cnt = prog->line_info_cnt;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001526 load_attr.log_level = prog->log_level;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001527 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001528 return -EINVAL;
1529
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001530retry_load:
1531 log_buf = malloc(log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00001532 if (!log_buf)
1533 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1534
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001535 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00001536
1537 if (ret >= 0) {
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001538 if (load_attr.log_level)
1539 pr_debug("verifier log:\n%s", log_buf);
Wang Nan55cffde2015-07-01 02:14:07 +00001540 *pfd = ret;
1541 ret = 0;
1542 goto out;
1543 }
1544
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001545 if (errno == ENOSPC) {
1546 log_buf_size <<= 1;
1547 free(log_buf);
1548 goto retry_load;
1549 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001550 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001551 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001552 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00001553
Wang Nan6371ca3b2015-11-06 13:49:37 +00001554 if (log_buf && log_buf[0] != '\0') {
1555 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001556 pr_warning("-- BEGIN DUMP LOG ---\n");
1557 pr_warning("\n%s\n", log_buf);
1558 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001559 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1560 pr_warning("Program too large (%zu insns), at most %d insns\n",
1561 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001562 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001563 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001564 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001565 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001566 int fd;
1567
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001568 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1569 load_attr.expected_attach_type = 0;
1570 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001571 if (fd >= 0) {
1572 close(fd);
1573 ret = -LIBBPF_ERRNO__PROGTYPE;
1574 goto out;
1575 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001576 }
Wang Nan705fa212016-07-13 10:44:02 +00001577
1578 if (log_buf)
1579 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001580 }
1581
1582out:
1583 free(log_buf);
1584 return ret;
1585}
1586
Joe Stringer29cd77f2018-10-02 13:35:39 -07001587int
Wang Nan55cffde2015-07-01 02:14:07 +00001588bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07001589 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00001590{
Wang Nanb5805632015-11-16 12:10:09 +00001591 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001592
Wang Nanb5805632015-11-16 12:10:09 +00001593 if (prog->instances.nr < 0 || !prog->instances.fds) {
1594 if (prog->preprocessor) {
1595 pr_warning("Internal error: can't load program '%s'\n",
1596 prog->section_name);
1597 return -LIBBPF_ERRNO__INTERNAL;
1598 }
Wang Nan55cffde2015-07-01 02:14:07 +00001599
Wang Nanb5805632015-11-16 12:10:09 +00001600 prog->instances.fds = malloc(sizeof(int));
1601 if (!prog->instances.fds) {
1602 pr_warning("Not enough memory for BPF fds\n");
1603 return -ENOMEM;
1604 }
1605 prog->instances.nr = 1;
1606 prog->instances.fds[0] = -1;
1607 }
1608
1609 if (!prog->preprocessor) {
1610 if (prog->instances.nr != 1) {
1611 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1612 prog->section_name, prog->instances.nr);
1613 }
Yonghong Song2993e052018-11-19 15:29:16 -08001614 err = load_program(prog, prog->insns, prog->insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001615 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001616 if (!err)
1617 prog->instances.fds[0] = fd;
1618 goto out;
1619 }
1620
1621 for (i = 0; i < prog->instances.nr; i++) {
1622 struct bpf_prog_prep_result result;
1623 bpf_program_prep_t preprocessor = prog->preprocessor;
1624
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -08001625 memset(&result, 0, sizeof(result));
Wang Nanb5805632015-11-16 12:10:09 +00001626 err = preprocessor(prog, i, prog->insns,
1627 prog->insns_cnt, &result);
1628 if (err) {
1629 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1630 i, prog->section_name);
1631 goto out;
1632 }
1633
1634 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1635 pr_debug("Skip loading the %dth instance of program '%s'\n",
1636 i, prog->section_name);
1637 prog->instances.fds[i] = -1;
1638 if (result.pfd)
1639 *result.pfd = -1;
1640 continue;
1641 }
1642
Yonghong Song2993e052018-11-19 15:29:16 -08001643 err = load_program(prog, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001644 result.new_insn_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001645 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001646
1647 if (err) {
1648 pr_warning("Loading the %dth instance of program '%s' failed\n",
1649 i, prog->section_name);
1650 goto out;
1651 }
1652
1653 if (result.pfd)
1654 *result.pfd = fd;
1655 prog->instances.fds[i] = fd;
1656 }
1657out:
Wang Nan55cffde2015-07-01 02:14:07 +00001658 if (err)
1659 pr_warning("failed to load program '%s'\n",
1660 prog->section_name);
1661 zfree(&prog->insns);
1662 prog->insns_cnt = 0;
1663 return err;
1664}
1665
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001666static bool bpf_program__is_function_storage(struct bpf_program *prog,
1667 struct bpf_object *obj)
1668{
1669 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1670}
1671
Wang Nan55cffde2015-07-01 02:14:07 +00001672static int
1673bpf_object__load_progs(struct bpf_object *obj)
1674{
1675 size_t i;
1676 int err;
1677
1678 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001679 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001680 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001681 err = bpf_program__load(&obj->programs[i],
1682 obj->license,
1683 obj->kern_version);
1684 if (err)
1685 return err;
1686 }
1687 return 0;
1688}
1689
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001690static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001691{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001692 switch (type) {
1693 case BPF_PROG_TYPE_SOCKET_FILTER:
1694 case BPF_PROG_TYPE_SCHED_CLS:
1695 case BPF_PROG_TYPE_SCHED_ACT:
1696 case BPF_PROG_TYPE_XDP:
1697 case BPF_PROG_TYPE_CGROUP_SKB:
1698 case BPF_PROG_TYPE_CGROUP_SOCK:
1699 case BPF_PROG_TYPE_LWT_IN:
1700 case BPF_PROG_TYPE_LWT_OUT:
1701 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001702 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001703 case BPF_PROG_TYPE_SOCK_OPS:
1704 case BPF_PROG_TYPE_SK_SKB:
1705 case BPF_PROG_TYPE_CGROUP_DEVICE:
1706 case BPF_PROG_TYPE_SK_MSG:
1707 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001708 case BPF_PROG_TYPE_LIRC_MODE2:
Martin KaFai Lau6bc8529c2018-08-08 01:01:30 -07001709 case BPF_PROG_TYPE_SK_REUSEPORT:
Petar Penkovc22fbae2018-09-14 07:46:20 -07001710 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001711 case BPF_PROG_TYPE_UNSPEC:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001712 case BPF_PROG_TYPE_TRACEPOINT:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001713 case BPF_PROG_TYPE_RAW_TRACEPOINT:
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -08001714 case BPF_PROG_TYPE_PERF_EVENT:
1715 return false;
1716 case BPF_PROG_TYPE_KPROBE:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001717 default:
1718 return true;
1719 }
1720}
1721
1722static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1723{
1724 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001725 pr_warning("%s doesn't provide kernel version\n",
1726 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001727 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001728 }
1729 return 0;
1730}
1731
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001732static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001733__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
John Fastabendc034a172018-10-15 11:19:55 -07001734 bool needs_kver, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001735{
1736 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001737 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001738
1739 if (elf_version(EV_CURRENT) == EV_NONE) {
1740 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001741 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001742 }
1743
Wang Nan6c956392015-07-01 02:13:54 +00001744 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001745 if (IS_ERR(obj))
1746 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001747
Wang Nan6371ca3b2015-11-06 13:49:37 +00001748 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1749 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
John Fastabendc034a172018-10-15 11:19:55 -07001750 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001751 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001752 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001753
1754 bpf_object__elf_finish(obj);
1755 return obj;
1756out:
1757 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001758 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001759}
1760
John Fastabendc034a172018-10-15 11:19:55 -07001761struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1762 int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001763{
1764 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001765 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001766 return NULL;
1767
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001768 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001769
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001770 return __bpf_object__open(attr->file, NULL, 0,
John Fastabendc034a172018-10-15 11:19:55 -07001771 bpf_prog_type__needs_kver(attr->prog_type),
1772 flags);
1773}
1774
1775struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1776{
1777 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07001778}
1779
1780struct bpf_object *bpf_object__open(const char *path)
1781{
1782 struct bpf_object_open_attr attr = {
1783 .file = path,
1784 .prog_type = BPF_PROG_TYPE_UNSPEC,
1785 };
1786
1787 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00001788}
1789
1790struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001791 size_t obj_buf_sz,
1792 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001793{
Wang Nanacf860a2015-08-27 02:30:55 +00001794 char tmp_name[64];
1795
Wang Nan6c956392015-07-01 02:13:54 +00001796 /* param validation */
1797 if (!obj_buf || obj_buf_sz <= 0)
1798 return NULL;
1799
Wang Nanacf860a2015-08-27 02:30:55 +00001800 if (!name) {
1801 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1802 (unsigned long)obj_buf,
1803 (unsigned long)obj_buf_sz);
1804 tmp_name[sizeof(tmp_name) - 1] = '\0';
1805 name = tmp_name;
1806 }
1807 pr_debug("loading object '%s' from buffer\n",
1808 name);
Wang Nan6c956392015-07-01 02:13:54 +00001809
John Fastabendc034a172018-10-15 11:19:55 -07001810 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001811}
1812
Wang Nan52d33522015-07-01 02:14:04 +00001813int bpf_object__unload(struct bpf_object *obj)
1814{
1815 size_t i;
1816
1817 if (!obj)
1818 return -EINVAL;
1819
Wang Nan9d759a92015-11-27 08:47:35 +00001820 for (i = 0; i < obj->nr_maps; i++)
1821 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001822
Wang Nan55cffde2015-07-01 02:14:07 +00001823 for (i = 0; i < obj->nr_programs; i++)
1824 bpf_program__unload(&obj->programs[i]);
1825
Wang Nan52d33522015-07-01 02:14:04 +00001826 return 0;
1827}
1828
1829int bpf_object__load(struct bpf_object *obj)
1830{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001831 int err;
1832
Wang Nan52d33522015-07-01 02:14:04 +00001833 if (!obj)
1834 return -EINVAL;
1835
1836 if (obj->loaded) {
1837 pr_warning("object should not be loaded twice\n");
1838 return -EINVAL;
1839 }
1840
1841 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001842
Stanislav Fomichev47eff612018-11-20 17:11:19 -08001843 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001844 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1845 CHECK_ERR(bpf_object__relocate(obj), err, out);
1846 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001847
1848 return 0;
1849out:
1850 bpf_object__unload(obj);
1851 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001852 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001853}
1854
Joe Stringerf3675402017-01-26 13:19:56 -08001855static int check_path(const char *path)
1856{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001857 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001858 struct statfs st_fs;
1859 char *dname, *dir;
1860 int err = 0;
1861
1862 if (path == NULL)
1863 return -EINVAL;
1864
1865 dname = strdup(path);
1866 if (dname == NULL)
1867 return -ENOMEM;
1868
1869 dir = dirname(dname);
1870 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001871 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001872 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001873 err = -errno;
1874 }
1875 free(dname);
1876
1877 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1878 pr_warning("specified path %s is not on BPF FS\n", path);
1879 err = -EINVAL;
1880 }
1881
1882 return err;
1883}
1884
1885int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1886 int instance)
1887{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001888 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001889 int err;
1890
1891 err = check_path(path);
1892 if (err)
1893 return err;
1894
1895 if (prog == NULL) {
1896 pr_warning("invalid program pointer\n");
1897 return -EINVAL;
1898 }
1899
1900 if (instance < 0 || instance >= prog->instances.nr) {
1901 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1902 instance, prog->section_name, prog->instances.nr);
1903 return -EINVAL;
1904 }
1905
1906 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001907 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001908 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08001909 return -errno;
1910 }
1911 pr_debug("pinned program '%s'\n", path);
1912
1913 return 0;
1914}
1915
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001916int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1917 int instance)
1918{
1919 int err;
1920
1921 err = check_path(path);
1922 if (err)
1923 return err;
1924
1925 if (prog == NULL) {
1926 pr_warning("invalid program pointer\n");
1927 return -EINVAL;
1928 }
1929
1930 if (instance < 0 || instance >= prog->instances.nr) {
1931 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1932 instance, prog->section_name, prog->instances.nr);
1933 return -EINVAL;
1934 }
1935
1936 err = unlink(path);
1937 if (err != 0)
1938 return -errno;
1939 pr_debug("unpinned program '%s'\n", path);
1940
1941 return 0;
1942}
1943
Joe Stringerf3675402017-01-26 13:19:56 -08001944static int make_dir(const char *path)
1945{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001946 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08001947 int err = 0;
1948
1949 if (mkdir(path, 0700) && errno != EEXIST)
1950 err = -errno;
1951
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001952 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001953 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001954 pr_warning("failed to mkdir %s: %s\n", path, cp);
1955 }
Joe Stringerf3675402017-01-26 13:19:56 -08001956 return err;
1957}
1958
1959int bpf_program__pin(struct bpf_program *prog, const char *path)
1960{
1961 int i, err;
1962
1963 err = check_path(path);
1964 if (err)
1965 return err;
1966
1967 if (prog == NULL) {
1968 pr_warning("invalid program pointer\n");
1969 return -EINVAL;
1970 }
1971
1972 if (prog->instances.nr <= 0) {
1973 pr_warning("no instances of prog %s to pin\n",
1974 prog->section_name);
1975 return -EINVAL;
1976 }
1977
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08001978 if (prog->instances.nr == 1) {
1979 /* don't create subdirs when pinning single instance */
1980 return bpf_program__pin_instance(prog, path, 0);
1981 }
1982
Joe Stringerf3675402017-01-26 13:19:56 -08001983 err = make_dir(path);
1984 if (err)
1985 return err;
1986
1987 for (i = 0; i < prog->instances.nr; i++) {
1988 char buf[PATH_MAX];
1989 int len;
1990
1991 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08001992 if (len < 0) {
1993 err = -EINVAL;
1994 goto err_unpin;
1995 } else if (len >= PATH_MAX) {
1996 err = -ENAMETOOLONG;
1997 goto err_unpin;
1998 }
1999
2000 err = bpf_program__pin_instance(prog, buf, i);
2001 if (err)
2002 goto err_unpin;
2003 }
2004
2005 return 0;
2006
2007err_unpin:
2008 for (i = i - 1; i >= 0; i--) {
2009 char buf[PATH_MAX];
2010 int len;
2011
2012 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2013 if (len < 0)
2014 continue;
2015 else if (len >= PATH_MAX)
2016 continue;
2017
2018 bpf_program__unpin_instance(prog, buf, i);
2019 }
2020
2021 rmdir(path);
2022
2023 return err;
2024}
2025
2026int bpf_program__unpin(struct bpf_program *prog, const char *path)
2027{
2028 int i, err;
2029
2030 err = check_path(path);
2031 if (err)
2032 return err;
2033
2034 if (prog == NULL) {
2035 pr_warning("invalid program pointer\n");
2036 return -EINVAL;
2037 }
2038
2039 if (prog->instances.nr <= 0) {
2040 pr_warning("no instances of prog %s to pin\n",
2041 prog->section_name);
2042 return -EINVAL;
2043 }
2044
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08002045 if (prog->instances.nr == 1) {
2046 /* don't create subdirs when pinning single instance */
2047 return bpf_program__unpin_instance(prog, path, 0);
2048 }
2049
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002050 for (i = 0; i < prog->instances.nr; i++) {
2051 char buf[PATH_MAX];
2052 int len;
2053
2054 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08002055 if (len < 0)
2056 return -EINVAL;
2057 else if (len >= PATH_MAX)
2058 return -ENAMETOOLONG;
2059
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002060 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08002061 if (err)
2062 return err;
2063 }
2064
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002065 err = rmdir(path);
2066 if (err)
2067 return -errno;
2068
Joe Stringerf3675402017-01-26 13:19:56 -08002069 return 0;
2070}
2071
Joe Stringerb6989f32017-01-26 13:19:57 -08002072int bpf_map__pin(struct bpf_map *map, const char *path)
2073{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002074 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08002075 int err;
2076
2077 err = check_path(path);
2078 if (err)
2079 return err;
2080
2081 if (map == NULL) {
2082 pr_warning("invalid map pointer\n");
2083 return -EINVAL;
2084 }
2085
2086 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07002087 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002088 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08002089 return -errno;
2090 }
2091
2092 pr_debug("pinned map '%s'\n", path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002093
Joe Stringerb6989f32017-01-26 13:19:57 -08002094 return 0;
2095}
2096
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002097int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08002098{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002099 int err;
2100
2101 err = check_path(path);
2102 if (err)
2103 return err;
2104
2105 if (map == NULL) {
2106 pr_warning("invalid map pointer\n");
2107 return -EINVAL;
2108 }
2109
2110 err = unlink(path);
2111 if (err != 0)
2112 return -errno;
2113 pr_debug("unpinned map '%s'\n", path);
2114
2115 return 0;
2116}
2117
2118int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2119{
Joe Stringerd5148d82017-01-26 13:19:58 -08002120 struct bpf_map *map;
2121 int err;
2122
2123 if (!obj)
2124 return -ENOENT;
2125
2126 if (!obj->loaded) {
2127 pr_warning("object not yet loaded; load it first\n");
2128 return -ENOENT;
2129 }
2130
2131 err = make_dir(path);
2132 if (err)
2133 return err;
2134
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08002135 bpf_object__for_each_map(map, obj) {
Joe Stringerd5148d82017-01-26 13:19:58 -08002136 char buf[PATH_MAX];
2137 int len;
2138
2139 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2140 bpf_map__name(map));
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002141 if (len < 0) {
2142 err = -EINVAL;
2143 goto err_unpin_maps;
2144 } else if (len >= PATH_MAX) {
2145 err = -ENAMETOOLONG;
2146 goto err_unpin_maps;
2147 }
2148
2149 err = bpf_map__pin(map, buf);
2150 if (err)
2151 goto err_unpin_maps;
2152 }
2153
2154 return 0;
2155
2156err_unpin_maps:
2157 while ((map = bpf_map__prev(map, obj))) {
2158 char buf[PATH_MAX];
2159 int len;
2160
2161 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2162 bpf_map__name(map));
2163 if (len < 0)
2164 continue;
2165 else if (len >= PATH_MAX)
2166 continue;
2167
2168 bpf_map__unpin(map, buf);
2169 }
2170
2171 return err;
2172}
2173
2174int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2175{
2176 struct bpf_map *map;
2177 int err;
2178
2179 if (!obj)
2180 return -ENOENT;
2181
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08002182 bpf_object__for_each_map(map, obj) {
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002183 char buf[PATH_MAX];
2184 int len;
2185
2186 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2187 bpf_map__name(map));
Joe Stringerd5148d82017-01-26 13:19:58 -08002188 if (len < 0)
2189 return -EINVAL;
2190 else if (len >= PATH_MAX)
2191 return -ENAMETOOLONG;
2192
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002193 err = bpf_map__unpin(map, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002194 if (err)
2195 return err;
2196 }
2197
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002198 return 0;
2199}
2200
2201int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2202{
2203 struct bpf_program *prog;
2204 int err;
2205
2206 if (!obj)
2207 return -ENOENT;
2208
2209 if (!obj->loaded) {
2210 pr_warning("object not yet loaded; load it first\n");
2211 return -ENOENT;
2212 }
2213
2214 err = make_dir(path);
2215 if (err)
2216 return err;
2217
2218 bpf_object__for_each_program(prog, obj) {
2219 char buf[PATH_MAX];
2220 int len;
2221
2222 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002223 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002224 if (len < 0) {
2225 err = -EINVAL;
2226 goto err_unpin_programs;
2227 } else if (len >= PATH_MAX) {
2228 err = -ENAMETOOLONG;
2229 goto err_unpin_programs;
2230 }
2231
2232 err = bpf_program__pin(prog, buf);
2233 if (err)
2234 goto err_unpin_programs;
2235 }
2236
2237 return 0;
2238
2239err_unpin_programs:
2240 while ((prog = bpf_program__prev(prog, obj))) {
2241 char buf[PATH_MAX];
2242 int len;
2243
2244 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002245 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002246 if (len < 0)
2247 continue;
2248 else if (len >= PATH_MAX)
2249 continue;
2250
2251 bpf_program__unpin(prog, buf);
2252 }
2253
2254 return err;
2255}
2256
2257int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2258{
2259 struct bpf_program *prog;
2260 int err;
2261
2262 if (!obj)
2263 return -ENOENT;
2264
Joe Stringerd5148d82017-01-26 13:19:58 -08002265 bpf_object__for_each_program(prog, obj) {
2266 char buf[PATH_MAX];
2267 int len;
2268
2269 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002270 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08002271 if (len < 0)
2272 return -EINVAL;
2273 else if (len >= PATH_MAX)
2274 return -ENAMETOOLONG;
2275
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002276 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002277 if (err)
2278 return err;
2279 }
2280
2281 return 0;
2282}
2283
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002284int bpf_object__pin(struct bpf_object *obj, const char *path)
2285{
2286 int err;
2287
2288 err = bpf_object__pin_maps(obj, path);
2289 if (err)
2290 return err;
2291
2292 err = bpf_object__pin_programs(obj, path);
2293 if (err) {
2294 bpf_object__unpin_maps(obj, path);
2295 return err;
2296 }
2297
2298 return 0;
2299}
2300
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002301void bpf_object__close(struct bpf_object *obj)
2302{
Wang Nana5b8bd42015-07-01 02:14:00 +00002303 size_t i;
2304
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002305 if (!obj)
2306 return;
2307
Wang Nan10931d22016-11-26 07:03:26 +00002308 if (obj->clear_priv)
2309 obj->clear_priv(obj, obj->priv);
2310
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002311 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00002312 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002313 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08002314 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002315
Wang Nan9d759a92015-11-27 08:47:35 +00002316 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00002317 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00002318 if (obj->maps[i].clear_priv)
2319 obj->maps[i].clear_priv(&obj->maps[i],
2320 obj->maps[i].priv);
2321 obj->maps[i].priv = NULL;
2322 obj->maps[i].clear_priv = NULL;
2323 }
2324 zfree(&obj->maps);
2325 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00002326
2327 if (obj->programs && obj->nr_programs) {
2328 for (i = 0; i < obj->nr_programs; i++)
2329 bpf_program__exit(&obj->programs[i]);
2330 }
2331 zfree(&obj->programs);
2332
Wang Nan9a208ef2015-07-01 02:14:10 +00002333 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002334 free(obj);
2335}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002336
Wang Nan9a208ef2015-07-01 02:14:10 +00002337struct bpf_object *
2338bpf_object__next(struct bpf_object *prev)
2339{
2340 struct bpf_object *next;
2341
2342 if (!prev)
2343 next = list_first_entry(&bpf_objects_list,
2344 struct bpf_object,
2345 list);
2346 else
2347 next = list_next_entry(prev, list);
2348
2349 /* Empty list is noticed here so don't need checking on entry. */
2350 if (&next->list == &bpf_objects_list)
2351 return NULL;
2352
2353 return next;
2354}
2355
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002356const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00002357{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002358 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00002359}
2360
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002361unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00002362{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002363 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00002364}
2365
Andrey Ignatov789f6ba2019-02-14 15:01:43 -08002366struct btf *bpf_object__btf(struct bpf_object *obj)
2367{
2368 return obj ? obj->btf : NULL;
2369}
2370
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002371int bpf_object__btf_fd(const struct bpf_object *obj)
2372{
2373 return obj->btf ? btf__fd(obj->btf) : -1;
2374}
2375
Wang Nan10931d22016-11-26 07:03:26 +00002376int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2377 bpf_object_clear_priv_t clear_priv)
2378{
2379 if (obj->priv && obj->clear_priv)
2380 obj->clear_priv(obj, obj->priv);
2381
2382 obj->priv = priv;
2383 obj->clear_priv = clear_priv;
2384 return 0;
2385}
2386
2387void *bpf_object__priv(struct bpf_object *obj)
2388{
2389 return obj ? obj->priv : ERR_PTR(-EINVAL);
2390}
2391
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002392static struct bpf_program *
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002393__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002394{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002395 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002396 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002397
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002398 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002399 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002400
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002401 if (!p)
2402 /* Iter from the beginning */
2403 return forward ? &obj->programs[0] :
2404 &obj->programs[nr_programs - 1];
2405
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002406 if (p->obj != obj) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002407 pr_warning("error: program handler doesn't match object\n");
2408 return NULL;
2409 }
2410
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002411 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002412 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002413 return NULL;
2414 return &obj->programs[idx];
2415}
2416
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002417struct bpf_program *
2418bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2419{
2420 struct bpf_program *prog = prev;
2421
2422 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002423 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002424 } while (prog && bpf_program__is_function_storage(prog, obj));
2425
2426 return prog;
2427}
2428
2429struct bpf_program *
2430bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2431{
2432 struct bpf_program *prog = next;
2433
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002434 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002435 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002436 } while (prog && bpf_program__is_function_storage(prog, obj));
2437
2438 return prog;
2439}
2440
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002441int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2442 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002443{
2444 if (prog->priv && prog->clear_priv)
2445 prog->clear_priv(prog, prog->priv);
2446
2447 prog->priv = priv;
2448 prog->clear_priv = clear_priv;
2449 return 0;
2450}
2451
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002452void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002453{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002454 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002455}
2456
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002457void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2458{
2459 prog->prog_ifindex = ifindex;
2460}
2461
Namhyung Kim715f8db2015-11-03 20:21:05 +09002462const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002463{
2464 const char *title;
2465
2466 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09002467 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002468 title = strdup(title);
2469 if (!title) {
2470 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00002471 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002472 }
2473 }
2474
2475 return title;
2476}
2477
2478int bpf_program__fd(struct bpf_program *prog)
2479{
Wang Nanb5805632015-11-16 12:10:09 +00002480 return bpf_program__nth_fd(prog, 0);
2481}
2482
2483int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2484 bpf_program_prep_t prep)
2485{
2486 int *instances_fds;
2487
2488 if (nr_instances <= 0 || !prep)
2489 return -EINVAL;
2490
2491 if (prog->instances.nr > 0 || prog->instances.fds) {
2492 pr_warning("Can't set pre-processor after loading\n");
2493 return -EINVAL;
2494 }
2495
2496 instances_fds = malloc(sizeof(int) * nr_instances);
2497 if (!instances_fds) {
2498 pr_warning("alloc memory failed for fds\n");
2499 return -ENOMEM;
2500 }
2501
2502 /* fill all fd with -1 */
2503 memset(instances_fds, -1, sizeof(int) * nr_instances);
2504
2505 prog->instances.nr = nr_instances;
2506 prog->instances.fds = instances_fds;
2507 prog->preprocessor = prep;
2508 return 0;
2509}
2510
2511int bpf_program__nth_fd(struct bpf_program *prog, int n)
2512{
2513 int fd;
2514
Jakub Kicinski1e960042018-07-26 14:32:18 -07002515 if (!prog)
2516 return -EINVAL;
2517
Wang Nanb5805632015-11-16 12:10:09 +00002518 if (n >= prog->instances.nr || n < 0) {
2519 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2520 n, prog->section_name, prog->instances.nr);
2521 return -EINVAL;
2522 }
2523
2524 fd = prog->instances.fds[n];
2525 if (fd < 0) {
2526 pr_warning("%dth instance of program '%s' is invalid\n",
2527 n, prog->section_name);
2528 return -ENOENT;
2529 }
2530
2531 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002532}
Wang Nan9d759a92015-11-27 08:47:35 +00002533
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002534void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002535{
2536 prog->type = type;
2537}
2538
Wang Nan5f44e4c82016-07-13 10:44:01 +00002539static bool bpf_program__is_type(struct bpf_program *prog,
2540 enum bpf_prog_type type)
2541{
2542 return prog ? (prog->type == type) : false;
2543}
2544
Joe Stringered794072017-01-22 17:11:23 -08002545#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2546int bpf_program__set_##NAME(struct bpf_program *prog) \
2547{ \
2548 if (!prog) \
2549 return -EINVAL; \
2550 bpf_program__set_type(prog, TYPE); \
2551 return 0; \
2552} \
2553 \
2554bool bpf_program__is_##NAME(struct bpf_program *prog) \
2555{ \
2556 return bpf_program__is_type(prog, TYPE); \
2557} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002558
Joe Stringer7803ba72017-01-22 17:11:24 -08002559BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002560BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002561BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2562BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002563BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002564BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002565BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2566BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002567
John Fastabend16962b22018-04-23 14:30:38 -07002568void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2569 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002570{
2571 prog->expected_attach_type = type;
2572}
2573
Andrey Ignatov36153532018-10-31 12:57:18 -07002574#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2575 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002576
Andrey Ignatov956b6202018-09-26 15:24:53 -07002577/* Programs that can NOT be attached. */
Andrey Ignatov36153532018-10-31 12:57:18 -07002578#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002579
Andrey Ignatov956b6202018-09-26 15:24:53 -07002580/* Programs that can be attached. */
2581#define BPF_APROG_SEC(string, ptype, atype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002582 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07002583
Andrey Ignatov956b6202018-09-26 15:24:53 -07002584/* Programs that must specify expected attach type at load time. */
2585#define BPF_EAPROG_SEC(string, ptype, eatype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002586 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002587
2588/* Programs that can be attached but attach type can't be identified by section
2589 * name. Kept for backward compatibility.
2590 */
2591#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002592
Roman Gushchin583c9002017-12-13 15:18:51 +00002593static const struct {
2594 const char *sec;
2595 size_t len;
2596 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002597 enum bpf_attach_type expected_attach_type;
Andrey Ignatov36153532018-10-31 12:57:18 -07002598 int is_attachable;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002599 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002600} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07002601 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2602 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2603 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2604 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2605 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2606 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2607 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2608 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2609 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2610 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2611 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2612 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2613 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07002614 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2615 BPF_CGROUP_INET_INGRESS),
2616 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2617 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002618 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2619 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2620 BPF_CGROUP_INET_SOCK_CREATE),
2621 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2622 BPF_CGROUP_INET4_POST_BIND),
2623 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2624 BPF_CGROUP_INET6_POST_BIND),
2625 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2626 BPF_CGROUP_DEVICE),
2627 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2628 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07002629 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2630 BPF_SK_SKB_STREAM_PARSER),
2631 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2632 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002633 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2634 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2635 BPF_SK_MSG_VERDICT),
2636 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2637 BPF_LIRC_MODE2),
2638 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2639 BPF_FLOW_DISSECTOR),
2640 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2641 BPF_CGROUP_INET4_BIND),
2642 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2643 BPF_CGROUP_INET6_BIND),
2644 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2645 BPF_CGROUP_INET4_CONNECT),
2646 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2647 BPF_CGROUP_INET6_CONNECT),
2648 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2649 BPF_CGROUP_UDP4_SENDMSG),
2650 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2651 BPF_CGROUP_UDP6_SENDMSG),
Roman Gushchin583c9002017-12-13 15:18:51 +00002652};
Roman Gushchin583c9002017-12-13 15:18:51 +00002653
Andrey Ignatov956b6202018-09-26 15:24:53 -07002654#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002655#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07002656#undef BPF_APROG_SEC
2657#undef BPF_EAPROG_SEC
2658#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002659
Taeung Songc76e4c22019-01-21 22:06:38 +09002660#define MAX_TYPE_NAME_SIZE 32
2661
2662static char *libbpf_get_type_names(bool attach_type)
2663{
2664 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
2665 char *buf;
2666
2667 buf = malloc(len);
2668 if (!buf)
2669 return NULL;
2670
2671 buf[0] = '\0';
2672 /* Forge string buf with all available names */
2673 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2674 if (attach_type && !section_names[i].is_attachable)
2675 continue;
2676
2677 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
2678 free(buf);
2679 return NULL;
2680 }
2681 strcat(buf, " ");
2682 strcat(buf, section_names[i].sec);
2683 }
2684
2685 return buf;
2686}
2687
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002688int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2689 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00002690{
Taeung Songc76e4c22019-01-21 22:06:38 +09002691 char *type_names;
Roman Gushchin583c9002017-12-13 15:18:51 +00002692 int i;
2693
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002694 if (!name)
2695 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00002696
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002697 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2698 if (strncmp(name, section_names[i].sec, section_names[i].len))
2699 continue;
2700 *prog_type = section_names[i].prog_type;
2701 *expected_attach_type = section_names[i].expected_attach_type;
2702 return 0;
2703 }
Taeung Songc76e4c22019-01-21 22:06:38 +09002704 pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
2705 type_names = libbpf_get_type_names(false);
2706 if (type_names != NULL) {
2707 pr_info("supported section(type) names are:%s\n", type_names);
2708 free(type_names);
2709 }
2710
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002711 return -EINVAL;
2712}
Roman Gushchin583c9002017-12-13 15:18:51 +00002713
Andrey Ignatov956b6202018-09-26 15:24:53 -07002714int libbpf_attach_type_by_name(const char *name,
2715 enum bpf_attach_type *attach_type)
2716{
Taeung Songc76e4c22019-01-21 22:06:38 +09002717 char *type_names;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002718 int i;
2719
2720 if (!name)
2721 return -EINVAL;
2722
2723 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2724 if (strncmp(name, section_names[i].sec, section_names[i].len))
2725 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07002726 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002727 return -EINVAL;
2728 *attach_type = section_names[i].attach_type;
2729 return 0;
2730 }
Taeung Songc76e4c22019-01-21 22:06:38 +09002731 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
2732 type_names = libbpf_get_type_names(true);
2733 if (type_names != NULL) {
2734 pr_info("attachable section(type) names are:%s\n", type_names);
2735 free(type_names);
2736 }
2737
Andrey Ignatov956b6202018-09-26 15:24:53 -07002738 return -EINVAL;
2739}
2740
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002741static int
2742bpf_program__identify_section(struct bpf_program *prog,
2743 enum bpf_prog_type *prog_type,
2744 enum bpf_attach_type *expected_attach_type)
2745{
2746 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2747 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00002748}
2749
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002750int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002751{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03002752 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002753}
2754
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002755const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002756{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002757 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002758}
2759
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002760const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002761{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002762 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002763}
2764
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002765__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002766{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002767 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002768}
2769
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07002770__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002771{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002772 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002773}
2774
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002775int bpf_map__set_priv(struct bpf_map *map, void *priv,
2776 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002777{
2778 if (!map)
2779 return -EINVAL;
2780
2781 if (map->priv) {
2782 if (map->clear_priv)
2783 map->clear_priv(map, map->priv);
2784 }
2785
2786 map->priv = priv;
2787 map->clear_priv = clear_priv;
2788 return 0;
2789}
2790
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002791void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002792{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002793 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002794}
2795
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002796bool bpf_map__is_offload_neutral(struct bpf_map *map)
2797{
2798 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2799}
2800
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002801void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2802{
2803 map->map_ifindex = ifindex;
2804}
2805
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08002806int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2807{
2808 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2809 pr_warning("error: unsupported map type\n");
2810 return -EINVAL;
2811 }
2812 if (map->inner_map_fd != -1) {
2813 pr_warning("error: inner_map_fd already specified\n");
2814 return -EINVAL;
2815 }
2816 map->inner_map_fd = fd;
2817 return 0;
2818}
2819
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002820static struct bpf_map *
2821__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00002822{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002823 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00002824 struct bpf_map *s, *e;
2825
2826 if (!obj || !obj->maps)
2827 return NULL;
2828
2829 s = obj->maps;
2830 e = obj->maps + obj->nr_maps;
2831
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002832 if ((m < s) || (m >= e)) {
Wang Nan9d759a92015-11-27 08:47:35 +00002833 pr_warning("error in %s: map handler doesn't belong to object\n",
2834 __func__);
2835 return NULL;
2836 }
2837
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002838 idx = (m - obj->maps) + i;
2839 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00002840 return NULL;
2841 return &obj->maps[idx];
2842}
Wang Nan561bbcc2015-11-27 08:47:36 +00002843
2844struct bpf_map *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002845bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2846{
2847 if (prev == NULL)
2848 return obj->maps;
2849
2850 return __bpf_map__iter(prev, obj, 1);
2851}
2852
2853struct bpf_map *
2854bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2855{
2856 if (next == NULL) {
2857 if (!obj->nr_maps)
2858 return NULL;
2859 return obj->maps + obj->nr_maps - 1;
2860 }
2861
2862 return __bpf_map__iter(next, obj, -1);
2863}
2864
2865struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002866bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002867{
2868 struct bpf_map *pos;
2869
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08002870 bpf_object__for_each_map(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002871 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002872 return pos;
2873 }
2874 return NULL;
2875}
Wang Nan5a6acad2016-11-26 07:03:27 +00002876
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01002877int
2878bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
2879{
2880 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
2881}
2882
Wang Nan5a6acad2016-11-26 07:03:27 +00002883struct bpf_map *
2884bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2885{
2886 int i;
2887
2888 for (i = 0; i < obj->nr_maps; i++) {
2889 if (obj->maps[i].offset == offset)
2890 return &obj->maps[i];
2891 }
2892 return ERR_PTR(-ENOENT);
2893}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002894
2895long libbpf_get_error(const void *ptr)
2896{
2897 if (IS_ERR(ptr))
2898 return PTR_ERR(ptr);
2899 return 0;
2900}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002901
2902int bpf_prog_load(const char *file, enum bpf_prog_type type,
2903 struct bpf_object **pobj, int *prog_fd)
2904{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002905 struct bpf_prog_load_attr attr;
2906
2907 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2908 attr.file = file;
2909 attr.prog_type = type;
2910 attr.expected_attach_type = 0;
2911
2912 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2913}
2914
2915int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2916 struct bpf_object **pobj, int *prog_fd)
2917{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002918 struct bpf_object_open_attr open_attr = {
2919 .file = attr->file,
2920 .prog_type = attr->prog_type,
2921 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002922 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002923 enum bpf_attach_type expected_attach_type;
2924 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002925 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002926 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002927 int err;
2928
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002929 if (!attr)
2930 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002931 if (!attr->file)
2932 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002933
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002934 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07002935 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002936 return -ENOENT;
2937
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002938 bpf_object__for_each_program(prog, obj) {
2939 /*
2940 * If type is not specified, try to guess it based on
2941 * section name.
2942 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002943 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002944 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002945 expected_attach_type = attr->expected_attach_type;
2946 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07002947 err = bpf_program__identify_section(prog, &prog_type,
2948 &expected_attach_type);
2949 if (err < 0) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002950 bpf_object__close(obj);
2951 return -EINVAL;
2952 }
2953 }
2954
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002955 bpf_program__set_type(prog, prog_type);
2956 bpf_program__set_expected_attach_type(prog,
2957 expected_attach_type);
2958
Alexei Starovoitovda11b412019-04-01 21:27:47 -07002959 prog->log_level = attr->log_level;
Taeung Song69495d22018-09-03 08:30:07 +09002960 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002961 first_prog = prog;
2962 }
2963
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08002964 bpf_object__for_each_map(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07002965 if (!bpf_map__is_offload_neutral(map))
2966 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07002967 }
2968
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002969 if (!first_prog) {
2970 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002971 bpf_object__close(obj);
2972 return -ENOENT;
2973 }
2974
John Fastabend6f6d33f2017-08-15 22:34:22 -07002975 err = bpf_object__load(obj);
2976 if (err) {
2977 bpf_object__close(obj);
2978 return -EINVAL;
2979 }
2980
2981 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002982 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002983 return 0;
2984}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002985
2986enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002987bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2988 void **copy_mem, size_t *copy_size,
2989 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002990{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002991 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02002992 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002993 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002994 void *base = ((__u8 *)header) + page_size;
2995 int ret = LIBBPF_PERF_EVENT_CONT;
2996 struct perf_event_header *ehdr;
2997 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002998
Daniel Borkmann3dca2112018-10-21 02:09:28 +02002999 while (data_head != data_tail) {
3000 ehdr = base + (data_tail & (mmap_size - 1));
3001 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003002
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003003 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
3004 void *copy_start = ehdr;
3005 size_t len_first = base + mmap_size - copy_start;
3006 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003007
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003008 if (*copy_size < ehdr_size) {
3009 free(*copy_mem);
3010 *copy_mem = malloc(ehdr_size);
3011 if (!*copy_mem) {
3012 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003013 ret = LIBBPF_PERF_EVENT_ERROR;
3014 break;
3015 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003016 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003017 }
3018
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003019 memcpy(*copy_mem, copy_start, len_first);
3020 memcpy(*copy_mem + len_first, base, len_secnd);
3021 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003022 }
3023
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003024 ret = fn(ehdr, private_data);
3025 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003026 if (ret != LIBBPF_PERF_EVENT_CONT)
3027 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003028 }
3029
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02003030 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003031 return ret;
3032}
Song Liu34be16462019-03-11 22:30:38 -07003033
3034struct bpf_prog_info_array_desc {
3035 int array_offset; /* e.g. offset of jited_prog_insns */
3036 int count_offset; /* e.g. offset of jited_prog_len */
3037 int size_offset; /* > 0: offset of rec size,
3038 * < 0: fix size of -size_offset
3039 */
3040};
3041
3042static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
3043 [BPF_PROG_INFO_JITED_INSNS] = {
3044 offsetof(struct bpf_prog_info, jited_prog_insns),
3045 offsetof(struct bpf_prog_info, jited_prog_len),
3046 -1,
3047 },
3048 [BPF_PROG_INFO_XLATED_INSNS] = {
3049 offsetof(struct bpf_prog_info, xlated_prog_insns),
3050 offsetof(struct bpf_prog_info, xlated_prog_len),
3051 -1,
3052 },
3053 [BPF_PROG_INFO_MAP_IDS] = {
3054 offsetof(struct bpf_prog_info, map_ids),
3055 offsetof(struct bpf_prog_info, nr_map_ids),
3056 -(int)sizeof(__u32),
3057 },
3058 [BPF_PROG_INFO_JITED_KSYMS] = {
3059 offsetof(struct bpf_prog_info, jited_ksyms),
3060 offsetof(struct bpf_prog_info, nr_jited_ksyms),
3061 -(int)sizeof(__u64),
3062 },
3063 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
3064 offsetof(struct bpf_prog_info, jited_func_lens),
3065 offsetof(struct bpf_prog_info, nr_jited_func_lens),
3066 -(int)sizeof(__u32),
3067 },
3068 [BPF_PROG_INFO_FUNC_INFO] = {
3069 offsetof(struct bpf_prog_info, func_info),
3070 offsetof(struct bpf_prog_info, nr_func_info),
3071 offsetof(struct bpf_prog_info, func_info_rec_size),
3072 },
3073 [BPF_PROG_INFO_LINE_INFO] = {
3074 offsetof(struct bpf_prog_info, line_info),
3075 offsetof(struct bpf_prog_info, nr_line_info),
3076 offsetof(struct bpf_prog_info, line_info_rec_size),
3077 },
3078 [BPF_PROG_INFO_JITED_LINE_INFO] = {
3079 offsetof(struct bpf_prog_info, jited_line_info),
3080 offsetof(struct bpf_prog_info, nr_jited_line_info),
3081 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
3082 },
3083 [BPF_PROG_INFO_PROG_TAGS] = {
3084 offsetof(struct bpf_prog_info, prog_tags),
3085 offsetof(struct bpf_prog_info, nr_prog_tags),
3086 -(int)sizeof(__u8) * BPF_TAG_SIZE,
3087 },
3088
3089};
3090
3091static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
3092{
3093 __u32 *array = (__u32 *)info;
3094
3095 if (offset >= 0)
3096 return array[offset / sizeof(__u32)];
3097 return -(int)offset;
3098}
3099
3100static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
3101{
3102 __u64 *array = (__u64 *)info;
3103
3104 if (offset >= 0)
3105 return array[offset / sizeof(__u64)];
3106 return -(int)offset;
3107}
3108
3109static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
3110 __u32 val)
3111{
3112 __u32 *array = (__u32 *)info;
3113
3114 if (offset >= 0)
3115 array[offset / sizeof(__u32)] = val;
3116}
3117
3118static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
3119 __u64 val)
3120{
3121 __u64 *array = (__u64 *)info;
3122
3123 if (offset >= 0)
3124 array[offset / sizeof(__u64)] = val;
3125}
3126
3127struct bpf_prog_info_linear *
3128bpf_program__get_prog_info_linear(int fd, __u64 arrays)
3129{
3130 struct bpf_prog_info_linear *info_linear;
3131 struct bpf_prog_info info = {};
3132 __u32 info_len = sizeof(info);
3133 __u32 data_len = 0;
3134 int i, err;
3135 void *ptr;
3136
3137 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
3138 return ERR_PTR(-EINVAL);
3139
3140 /* step 1: get array dimensions */
3141 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
3142 if (err) {
3143 pr_debug("can't get prog info: %s", strerror(errno));
3144 return ERR_PTR(-EFAULT);
3145 }
3146
3147 /* step 2: calculate total size of all arrays */
3148 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3149 bool include_array = (arrays & (1UL << i)) > 0;
3150 struct bpf_prog_info_array_desc *desc;
3151 __u32 count, size;
3152
3153 desc = bpf_prog_info_array_desc + i;
3154
3155 /* kernel is too old to support this field */
3156 if (info_len < desc->array_offset + sizeof(__u32) ||
3157 info_len < desc->count_offset + sizeof(__u32) ||
3158 (desc->size_offset > 0 && info_len < desc->size_offset))
3159 include_array = false;
3160
3161 if (!include_array) {
3162 arrays &= ~(1UL << i); /* clear the bit */
3163 continue;
3164 }
3165
3166 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3167 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3168
3169 data_len += count * size;
3170 }
3171
3172 /* step 3: allocate continuous memory */
3173 data_len = roundup(data_len, sizeof(__u64));
3174 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
3175 if (!info_linear)
3176 return ERR_PTR(-ENOMEM);
3177
3178 /* step 4: fill data to info_linear->info */
3179 info_linear->arrays = arrays;
3180 memset(&info_linear->info, 0, sizeof(info));
3181 ptr = info_linear->data;
3182
3183 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3184 struct bpf_prog_info_array_desc *desc;
3185 __u32 count, size;
3186
3187 if ((arrays & (1UL << i)) == 0)
3188 continue;
3189
3190 desc = bpf_prog_info_array_desc + i;
3191 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3192 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3193 bpf_prog_info_set_offset_u32(&info_linear->info,
3194 desc->count_offset, count);
3195 bpf_prog_info_set_offset_u32(&info_linear->info,
3196 desc->size_offset, size);
3197 bpf_prog_info_set_offset_u64(&info_linear->info,
3198 desc->array_offset,
3199 ptr_to_u64(ptr));
3200 ptr += count * size;
3201 }
3202
3203 /* step 5: call syscall again to get required arrays */
3204 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
3205 if (err) {
3206 pr_debug("can't get prog info: %s", strerror(errno));
3207 free(info_linear);
3208 return ERR_PTR(-EFAULT);
3209 }
3210
3211 /* step 6: verify the data */
3212 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3213 struct bpf_prog_info_array_desc *desc;
3214 __u32 v1, v2;
3215
3216 if ((arrays & (1UL << i)) == 0)
3217 continue;
3218
3219 desc = bpf_prog_info_array_desc + i;
3220 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3221 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3222 desc->count_offset);
3223 if (v1 != v2)
3224 pr_warning("%s: mismatch in element count\n", __func__);
3225
3226 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3227 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3228 desc->size_offset);
3229 if (v1 != v2)
3230 pr_warning("%s: mismatch in rec size\n", __func__);
3231 }
3232
3233 /* step 7: update info_len and data_len */
3234 info_linear->info_len = sizeof(struct bpf_prog_info);
3235 info_linear->data_len = data_len;
3236
3237 return info_linear;
3238}
3239
3240void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
3241{
3242 int i;
3243
3244 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3245 struct bpf_prog_info_array_desc *desc;
3246 __u64 addr, offs;
3247
3248 if ((info_linear->arrays & (1UL << i)) == 0)
3249 continue;
3250
3251 desc = bpf_prog_info_array_desc + i;
3252 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
3253 desc->array_offset);
3254 offs = addr - ptr_to_u64(info_linear->data);
3255 bpf_prog_info_set_offset_u64(&info_linear->info,
3256 desc->array_offset, offs);
3257 }
3258}
3259
3260void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
3261{
3262 int i;
3263
3264 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3265 struct bpf_prog_info_array_desc *desc;
3266 __u64 addr, offs;
3267
3268 if ((info_linear->arrays & (1UL << i)) == 0)
3269 continue;
3270
3271 desc = bpf_prog_info_array_desc + i;
3272 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
3273 desc->array_offset);
3274 addr = offs + ptr_to_u64(info_linear->data);
3275 bpf_prog_info_set_offset_u64(&info_linear->info,
3276 desc->array_offset, addr);
3277 }
3278}