blob: e2a457e7c318451845ce280240bcf905d6768225 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Daniel Borkmannd8599002019-04-09 23:20:13 +020010 * Copyright (C) 2019 Isovalent, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000011 */
12
Yonghong Songb4269952018-11-29 15:31:45 -080013#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070014#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080015#endif
Wang Nan1b76c132015-07-01 02:13:51 +000016#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000017#include <stdio.h>
18#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080019#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000020#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000021#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000023#include <fcntl.h>
24#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000025#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080026#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000027#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000028#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070029#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080030#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000031#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080032#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070033#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020034#include <linux/ring_buffer.h>
Joe Stringerf3675402017-01-26 13:19:56 -080035#include <sys/stat.h>
36#include <sys/types.h>
37#include <sys/vfs.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070038#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000039#include <libelf.h>
40#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000041
42#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000043#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070044#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030045#include "str_error.h"
Yonghong Song8461ef82019-02-01 16:14:14 -080046#include "libbpf_util.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000047
Wang Nan9b161372016-07-18 06:01:08 +000048#ifndef EM_BPF
49#define EM_BPF 247
50#endif
51
Joe Stringerf3675402017-01-26 13:19:56 -080052#ifndef BPF_FS_MAGIC
53#define BPF_FS_MAGIC 0xcafe4a11
54#endif
55
Andrey Ignatovff466b52019-04-06 22:37:34 -070056/* vsprintf() in __base_pr() uses nonliteral format string. It may break
57 * compilation if user enables corresponding warning. Disable it explicitly.
58 */
59#pragma GCC diagnostic ignored "-Wformat-nonliteral"
60
Wang Nanb3f59d62015-07-01 02:13:52 +000061#define __printf(a, b) __attribute__((format(printf, a, b)))
62
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080063static int __base_pr(enum libbpf_print_level level, const char *format,
64 va_list args)
Wang Nanb3f59d62015-07-01 02:13:52 +000065{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080066 if (level == LIBBPF_DEBUG)
67 return 0;
68
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080069 return vfprintf(stderr, format, args);
Wang Nanb3f59d62015-07-01 02:13:52 +000070}
71
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080072static libbpf_print_fn_t __libbpf_pr = __base_pr;
Wang Nanb3f59d62015-07-01 02:13:52 +000073
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080074void libbpf_set_print(libbpf_print_fn_t fn)
Wang Nanb3f59d62015-07-01 02:13:52 +000075{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080076 __libbpf_pr = fn;
Wang Nanb3f59d62015-07-01 02:13:52 +000077}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000078
Yonghong Song8461ef82019-02-01 16:14:14 -080079__printf(2, 3)
80void libbpf_print(enum libbpf_print_level level, const char *format, ...)
81{
82 va_list args;
83
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080084 if (!__libbpf_pr)
85 return;
86
Yonghong Song8461ef82019-02-01 16:14:14 -080087 va_start(args, format);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080088 __libbpf_pr(level, format, args);
Yonghong Song8461ef82019-02-01 16:14:14 -080089 va_end(args);
90}
91
Wang Nan6371ca3b2015-11-06 13:49:37 +000092#define STRERR_BUFSIZE 128
93
Wang Nan6371ca3b2015-11-06 13:49:37 +000094#define CHECK_ERR(action, err, out) do { \
95 err = action; \
96 if (err) \
97 goto out; \
98} while(0)
99
100
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000101/* Copied from tools/perf/util/util.h */
102#ifndef zfree
103# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
104#endif
105
106#ifndef zclose
107# define zclose(fd) ({ \
108 int ___err = 0; \
109 if ((fd) >= 0) \
110 ___err = close((fd)); \
111 fd = -1; \
112 ___err; })
113#endif
114
115#ifdef HAVE_LIBELF_MMAP_SUPPORT
116# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
117#else
118# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
119#endif
120
Song Liu34be16462019-03-11 22:30:38 -0700121static inline __u64 ptr_to_u64(const void *ptr)
122{
123 return (__u64) (unsigned long) ptr;
124}
125
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800126struct bpf_capabilities {
127 /* v4.14: kernel support for program & map names. */
128 __u32 name:1;
129};
130
Wang Nana5b8bd42015-07-01 02:14:00 +0000131/*
132 * bpf_prog should be a better name but it has been used in
133 * linux/filter.h.
134 */
135struct bpf_program {
136 /* Index in elf obj file, for relocation use. */
137 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700138 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700139 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000140 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800141 /* section_name with / replaced by _; makes recursive pinning
142 * in bpf_object__pin_programs easier
143 */
144 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000145 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800146 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000147 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000148
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800149 struct reloc_desc {
150 enum {
151 RELO_LD64,
152 RELO_CALL,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200153 RELO_DATA,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800154 } type;
Wang Nan34090912015-07-01 02:14:02 +0000155 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800156 union {
157 int map_idx;
158 int text_off;
159 };
Wang Nan34090912015-07-01 02:14:02 +0000160 } *reloc_desc;
161 int nr_reloc;
Alexei Starovoitovda11b412019-04-01 21:27:47 -0700162 int log_level;
Wang Nan55cffde2015-07-01 02:14:07 +0000163
Wang Nanb5805632015-11-16 12:10:09 +0000164 struct {
165 int nr;
166 int *fds;
167 } instances;
168 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000169
170 struct bpf_object *obj;
171 void *priv;
172 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700173
174 enum bpf_attach_type expected_attach_type;
Yonghong Song2993e052018-11-19 15:29:16 -0800175 int btf_fd;
176 void *func_info;
177 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800178 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800179
180 struct bpf_capabilities *caps;
Martin KaFai Lau3d650142018-12-07 16:42:31 -0800181
182 void *line_info;
183 __u32 line_info_rec_size;
184 __u32 line_info_cnt;
Wang Nana5b8bd42015-07-01 02:14:00 +0000185};
186
Daniel Borkmannd8599002019-04-09 23:20:13 +0200187enum libbpf_map_type {
188 LIBBPF_MAP_UNSPEC,
189 LIBBPF_MAP_DATA,
190 LIBBPF_MAP_BSS,
191 LIBBPF_MAP_RODATA,
192};
193
194static const char * const libbpf_type_to_btf_name[] = {
195 [LIBBPF_MAP_DATA] = ".data",
196 [LIBBPF_MAP_BSS] = ".bss",
197 [LIBBPF_MAP_RODATA] = ".rodata",
198};
199
Wang Nan9d759a92015-11-27 08:47:35 +0000200struct bpf_map {
201 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000202 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000203 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700204 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800205 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000206 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700207 __u32 btf_key_type_id;
208 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000209 void *priv;
210 bpf_map_clear_priv_t clear_priv;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200211 enum libbpf_map_type libbpf_type;
212};
213
214struct bpf_secdata {
215 void *rodata;
216 void *data;
Wang Nan9d759a92015-11-27 08:47:35 +0000217};
218
Wang Nan9a208ef2015-07-01 02:14:10 +0000219static LIST_HEAD(bpf_objects_list);
220
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000221struct bpf_object {
Daniel Borkmannd8599002019-04-09 23:20:13 +0200222 char name[BPF_OBJ_NAME_LEN];
Wang Nancb1e5e92015-07-01 02:13:57 +0000223 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700224 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000225
Wang Nana5b8bd42015-07-01 02:14:00 +0000226 struct bpf_program *programs;
227 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000228 struct bpf_map *maps;
229 size_t nr_maps;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200230 struct bpf_secdata sections;
Wang Nan9d759a92015-11-27 08:47:35 +0000231
Wang Nan52d33522015-07-01 02:14:04 +0000232 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700233 bool has_pseudo_calls;
Wang Nana5b8bd42015-07-01 02:14:00 +0000234
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000235 /*
236 * Information when doing elf related work. Only valid if fd
237 * is valid.
238 */
239 struct {
240 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000241 void *obj_buf;
242 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000243 Elf *elf;
244 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000245 Elf_Data *symbols;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200246 Elf_Data *data;
247 Elf_Data *rodata;
248 Elf_Data *bss;
Wang Nan77ba9a52015-12-08 02:25:30 +0000249 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000250 struct {
251 GElf_Shdr shdr;
252 Elf_Data *data;
253 } *reloc;
254 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000255 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800256 int text_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200257 int data_shndx;
258 int rodata_shndx;
259 int bss_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000260 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000261 /*
262 * All loaded bpf_object is linked in a list, which is
263 * hidden to caller. bpf_objects__<func> handlers deal with
264 * all objects.
265 */
266 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000267
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700268 struct btf *btf;
Yonghong Song2993e052018-11-19 15:29:16 -0800269 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700270
Wang Nan10931d22016-11-26 07:03:26 +0000271 void *priv;
272 bpf_object_clear_priv_t clear_priv;
273
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800274 struct bpf_capabilities caps;
275
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000276 char path[];
277};
278#define obj_elf_valid(o) ((o)->efile.elf)
279
Joe Stringer29cd77f2018-10-02 13:35:39 -0700280void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000281{
Wang Nanb5805632015-11-16 12:10:09 +0000282 int i;
283
Wang Nan55cffde2015-07-01 02:14:07 +0000284 if (!prog)
285 return;
286
Wang Nanb5805632015-11-16 12:10:09 +0000287 /*
288 * If the object is opened but the program was never loaded,
289 * it is possible that prog->instances.nr == -1.
290 */
291 if (prog->instances.nr > 0) {
292 for (i = 0; i < prog->instances.nr; i++)
293 zclose(prog->instances.fds[i]);
294 } else if (prog->instances.nr != -1) {
295 pr_warning("Internal error: instances.nr is %d\n",
296 prog->instances.nr);
297 }
298
299 prog->instances.nr = -1;
300 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800301
302 zclose(prog->btf_fd);
303 zfree(&prog->func_info);
Prashant Bhole07a09d12018-12-17 16:57:50 +0900304 zfree(&prog->line_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000305}
306
Wang Nana5b8bd42015-07-01 02:14:00 +0000307static void bpf_program__exit(struct bpf_program *prog)
308{
309 if (!prog)
310 return;
311
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000312 if (prog->clear_priv)
313 prog->clear_priv(prog, prog->priv);
314
315 prog->priv = NULL;
316 prog->clear_priv = NULL;
317
Wang Nan55cffde2015-07-01 02:14:07 +0000318 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700319 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000320 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800321 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000322 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000323 zfree(&prog->reloc_desc);
324
325 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000326 prog->insns_cnt = 0;
327 prog->idx = -1;
328}
329
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800330static char *__bpf_program__pin_name(struct bpf_program *prog)
331{
332 char *name, *p;
333
334 name = p = strdup(prog->section_name);
335 while ((p = strchr(p, '/')))
336 *p = '_';
337
338 return name;
339}
340
Wang Nana5b8bd42015-07-01 02:14:00 +0000341static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700342bpf_program__init(void *data, size_t size, char *section_name, int idx,
343 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000344{
345 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700346 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000347 return -EINVAL;
348 }
349
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -0800350 memset(prog, 0, sizeof(*prog));
Wang Nana5b8bd42015-07-01 02:14:00 +0000351
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700352 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000353 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100354 pr_warning("failed to alloc name for prog under section(%d) %s\n",
355 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000356 goto errout;
357 }
358
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800359 prog->pin_name = __bpf_program__pin_name(prog);
360 if (!prog->pin_name) {
361 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
362 idx, section_name);
363 goto errout;
364 }
365
Wang Nana5b8bd42015-07-01 02:14:00 +0000366 prog->insns = malloc(size);
367 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700368 pr_warning("failed to alloc insns for prog under section %s\n",
369 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000370 goto errout;
371 }
372 prog->insns_cnt = size / sizeof(struct bpf_insn);
373 memcpy(prog->insns, data,
374 prog->insns_cnt * sizeof(struct bpf_insn));
375 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000376 prog->instances.fds = NULL;
377 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800378 prog->type = BPF_PROG_TYPE_UNSPEC;
Yonghong Song2993e052018-11-19 15:29:16 -0800379 prog->btf_fd = -1;
Wang Nana5b8bd42015-07-01 02:14:00 +0000380
381 return 0;
382errout:
383 bpf_program__exit(prog);
384 return -ENOMEM;
385}
386
387static int
388bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700389 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000390{
391 struct bpf_program prog, *progs;
392 int nr_progs, err;
393
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700394 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000395 if (err)
396 return err;
397
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800398 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000399 progs = obj->programs;
400 nr_progs = obj->nr_programs;
401
Jakub Kicinski531b0142018-07-10 14:43:05 -0700402 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000403 if (!progs) {
404 /*
405 * In this case the original obj->programs
406 * is still valid, so don't need special treat for
407 * bpf_close_object().
408 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700409 pr_warning("failed to alloc a new program under section '%s'\n",
410 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000411 bpf_program__exit(&prog);
412 return -ENOMEM;
413 }
414
415 pr_debug("found program %s\n", prog.section_name);
416 obj->programs = progs;
417 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000418 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000419 progs[nr_progs] = prog;
420 return 0;
421}
422
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700423static int
424bpf_object__init_prog_names(struct bpf_object *obj)
425{
426 Elf_Data *symbols = obj->efile.symbols;
427 struct bpf_program *prog;
428 size_t pi, si;
429
430 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800431 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700432
433 prog = &obj->programs[pi];
434
435 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
436 si++) {
437 GElf_Sym sym;
438
439 if (!gelf_getsym(symbols, si, &sym))
440 continue;
441 if (sym.st_shndx != prog->idx)
442 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000443 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
444 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700445
446 name = elf_strptr(obj->efile.elf,
447 obj->efile.strtabidx,
448 sym.st_name);
449 if (!name) {
450 pr_warning("failed to get sym name string for prog %s\n",
451 prog->section_name);
452 return -LIBBPF_ERRNO__LIBELF;
453 }
454 }
455
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700456 if (!name && prog->idx == obj->efile.text_shndx)
457 name = ".text";
458
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700459 if (!name) {
460 pr_warning("failed to find sym for prog %s\n",
461 prog->section_name);
462 return -EINVAL;
463 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700464
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700465 prog->name = strdup(name);
466 if (!prog->name) {
467 pr_warning("failed to allocate memory for prog sym %s\n",
468 name);
469 return -ENOMEM;
470 }
471 }
472
473 return 0;
474}
475
Wang Nan6c956392015-07-01 02:13:54 +0000476static struct bpf_object *bpf_object__new(const char *path,
477 void *obj_buf,
478 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000479{
480 struct bpf_object *obj;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200481 char *end;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000482
483 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
484 if (!obj) {
485 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000486 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000487 }
488
489 strcpy(obj->path, path);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200490 /* Using basename() GNU version which doesn't modify arg. */
491 strncpy(obj->name, basename((void *)path),
492 sizeof(obj->name) - 1);
493 end = strchr(obj->name, '.');
494 if (end)
495 *end = 0;
Wang Nan6c956392015-07-01 02:13:54 +0000496
Daniel Borkmannd8599002019-04-09 23:20:13 +0200497 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000498 /*
499 * Caller of this function should also calls
500 * bpf_object__elf_finish() after data collection to return
501 * obj_buf to user. If not, we should duplicate the buffer to
502 * avoid user freeing them before elf finish.
503 */
504 obj->efile.obj_buf = obj_buf;
505 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000506 obj->efile.maps_shndx = -1;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200507 obj->efile.data_shndx = -1;
508 obj->efile.rodata_shndx = -1;
509 obj->efile.bss_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000510
Wang Nan52d33522015-07-01 02:14:04 +0000511 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000512
513 INIT_LIST_HEAD(&obj->list);
514 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000515 return obj;
516}
517
518static void bpf_object__elf_finish(struct bpf_object *obj)
519{
520 if (!obj_elf_valid(obj))
521 return;
522
523 if (obj->efile.elf) {
524 elf_end(obj->efile.elf);
525 obj->efile.elf = NULL;
526 }
Wang Nanbec7d682015-07-01 02:13:59 +0000527 obj->efile.symbols = NULL;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200528 obj->efile.data = NULL;
529 obj->efile.rodata = NULL;
530 obj->efile.bss = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000531
532 zfree(&obj->efile.reloc);
533 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000534 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000535 obj->efile.obj_buf = NULL;
536 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000537}
538
539static int bpf_object__elf_init(struct bpf_object *obj)
540{
541 int err = 0;
542 GElf_Ehdr *ep;
543
544 if (obj_elf_valid(obj)) {
545 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000546 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000547 }
548
Wang Nan6c956392015-07-01 02:13:54 +0000549 if (obj->efile.obj_buf_sz > 0) {
550 /*
551 * obj_buf should have been validated by
552 * bpf_object__open_buffer().
553 */
554 obj->efile.elf = elf_memory(obj->efile.obj_buf,
555 obj->efile.obj_buf_sz);
556 } else {
557 obj->efile.fd = open(obj->path, O_RDONLY);
558 if (obj->efile.fd < 0) {
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200559 char errmsg[STRERR_BUFSIZE];
Andrey Ignatov24d6a802018-10-03 15:26:41 -0700560 char *cp = libbpf_strerror_r(errno, errmsg,
561 sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200562
563 pr_warning("failed to open %s: %s\n", obj->path, cp);
Wang Nan6c956392015-07-01 02:13:54 +0000564 return -errno;
565 }
566
567 obj->efile.elf = elf_begin(obj->efile.fd,
568 LIBBPF_ELF_C_READ_MMAP,
569 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000570 }
571
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000572 if (!obj->efile.elf) {
573 pr_warning("failed to open %s as ELF file\n",
574 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000575 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000576 goto errout;
577 }
578
579 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
580 pr_warning("failed to get EHDR from %s\n",
581 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000582 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000583 goto errout;
584 }
585 ep = &obj->efile.ehdr;
586
Wang Nan9b161372016-07-18 06:01:08 +0000587 /* Old LLVM set e_machine to EM_NONE */
588 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000589 pr_warning("%s is not an eBPF object file\n",
590 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000591 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000592 goto errout;
593 }
594
595 return 0;
596errout:
597 bpf_object__elf_finish(obj);
598 return err;
599}
600
Wang Nancc4228d2015-07-01 02:13:55 +0000601static int
602bpf_object__check_endianness(struct bpf_object *obj)
603{
604 static unsigned int const endian = 1;
605
606 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
607 case ELFDATA2LSB:
608 /* We are big endian, BPF obj is little endian. */
609 if (*(unsigned char const *)&endian != 1)
610 goto mismatch;
611 break;
612
613 case ELFDATA2MSB:
614 /* We are little endian, BPF obj is big endian. */
615 if (*(unsigned char const *)&endian != 0)
616 goto mismatch;
617 break;
618 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000619 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000620 }
621
622 return 0;
623
624mismatch:
625 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000626 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000627}
628
Wang Nancb1e5e92015-07-01 02:13:57 +0000629static int
630bpf_object__init_license(struct bpf_object *obj,
631 void *data, size_t size)
632{
633 memcpy(obj->license, data,
634 min(size, sizeof(obj->license) - 1));
635 pr_debug("license of %s is %s\n", obj->path, obj->license);
636 return 0;
637}
638
639static int
640bpf_object__init_kversion(struct bpf_object *obj,
641 void *data, size_t size)
642{
Yonghong Song438363c2018-10-09 16:14:47 -0700643 __u32 kver;
Wang Nancb1e5e92015-07-01 02:13:57 +0000644
645 if (size != sizeof(kver)) {
646 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000647 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000648 }
649 memcpy(&kver, data, sizeof(kver));
650 obj->kern_version = kver;
651 pr_debug("kernel version of %s is %x\n", obj->path,
652 obj->kern_version);
653 return 0;
654}
655
Eric Leblond4708bbd2016-11-15 04:05:47 +0000656static int compare_bpf_map(const void *_a, const void *_b)
657{
658 const struct bpf_map *a = _a;
659 const struct bpf_map *b = _b;
660
661 return a->offset - b->offset;
662}
663
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800664static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
665{
666 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
667 type == BPF_MAP_TYPE_HASH_OF_MAPS)
668 return true;
669 return false;
670}
671
Daniel Borkmann1713d682019-04-09 23:20:14 +0200672static int bpf_object_search_section_size(const struct bpf_object *obj,
673 const char *name, size_t *d_size)
674{
675 const GElf_Ehdr *ep = &obj->efile.ehdr;
676 Elf *elf = obj->efile.elf;
677 Elf_Scn *scn = NULL;
678 int idx = 0;
679
680 while ((scn = elf_nextscn(elf, scn)) != NULL) {
681 const char *sec_name;
682 Elf_Data *data;
683 GElf_Shdr sh;
684
685 idx++;
686 if (gelf_getshdr(scn, &sh) != &sh) {
687 pr_warning("failed to get section(%d) header from %s\n",
688 idx, obj->path);
689 return -EIO;
690 }
691
692 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
693 if (!sec_name) {
694 pr_warning("failed to get section(%d) name from %s\n",
695 idx, obj->path);
696 return -EIO;
697 }
698
699 if (strcmp(name, sec_name))
700 continue;
701
702 data = elf_getdata(scn, 0);
703 if (!data) {
704 pr_warning("failed to get section(%d) data from %s(%s)\n",
705 idx, name, obj->path);
706 return -EIO;
707 }
708
709 *d_size = data->d_size;
710 return 0;
711 }
712
713 return -ENOENT;
714}
715
716int bpf_object__section_size(const struct bpf_object *obj, const char *name,
717 __u32 *size)
718{
719 int ret = -ENOENT;
720 size_t d_size;
721
722 *size = 0;
723 if (!name) {
724 return -EINVAL;
725 } else if (!strcmp(name, ".data")) {
726 if (obj->efile.data)
727 *size = obj->efile.data->d_size;
728 } else if (!strcmp(name, ".bss")) {
729 if (obj->efile.bss)
730 *size = obj->efile.bss->d_size;
731 } else if (!strcmp(name, ".rodata")) {
732 if (obj->efile.rodata)
733 *size = obj->efile.rodata->d_size;
734 } else {
735 ret = bpf_object_search_section_size(obj, name, &d_size);
736 if (!ret)
737 *size = d_size;
738 }
739
740 return *size ? 0 : ret;
741}
742
743int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
744 __u32 *off)
745{
746 Elf_Data *symbols = obj->efile.symbols;
747 const char *sname;
748 size_t si;
749
750 if (!name || !off)
751 return -EINVAL;
752
753 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
754 GElf_Sym sym;
755
756 if (!gelf_getsym(symbols, si, &sym))
757 continue;
758 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
759 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
760 continue;
761
762 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
763 sym.st_name);
764 if (!sname) {
765 pr_warning("failed to get sym name string for var %s\n",
766 name);
767 return -EIO;
768 }
769 if (strcmp(name, sname) == 0) {
770 *off = sym.st_value;
771 return 0;
772 }
773 }
774
775 return -ENOENT;
776}
777
Daniel Borkmannd8599002019-04-09 23:20:13 +0200778static bool bpf_object__has_maps(const struct bpf_object *obj)
779{
780 return obj->efile.maps_shndx >= 0 ||
781 obj->efile.data_shndx >= 0 ||
782 obj->efile.rodata_shndx >= 0 ||
783 obj->efile.bss_shndx >= 0;
784}
785
786static int
787bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
788 enum libbpf_map_type type, Elf_Data *data,
789 void **data_buff)
790{
791 struct bpf_map_def *def = &map->def;
792 char map_name[BPF_OBJ_NAME_LEN];
793
794 map->libbpf_type = type;
795 map->offset = ~(typeof(map->offset))0;
796 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
797 libbpf_type_to_btf_name[type]);
798 map->name = strdup(map_name);
799 if (!map->name) {
800 pr_warning("failed to alloc map name\n");
801 return -ENOMEM;
802 }
803
804 def->type = BPF_MAP_TYPE_ARRAY;
805 def->key_size = sizeof(int);
806 def->value_size = data->d_size;
807 def->max_entries = 1;
808 def->map_flags = type == LIBBPF_MAP_RODATA ?
809 BPF_F_RDONLY_PROG : 0;
810 if (data_buff) {
811 *data_buff = malloc(data->d_size);
812 if (!*data_buff) {
813 zfree(&map->name);
814 pr_warning("failed to alloc map content buffer\n");
815 return -ENOMEM;
816 }
817 memcpy(*data_buff, data->d_buf, data->d_size);
818 }
819
820 pr_debug("map %ld is \"%s\"\n", map - obj->maps, map->name);
821 return 0;
822}
823
Eric Leblond4708bbd2016-11-15 04:05:47 +0000824static int
John Fastabendc034a172018-10-15 11:19:55 -0700825bpf_object__init_maps(struct bpf_object *obj, int flags)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000826{
Daniel Borkmannd8599002019-04-09 23:20:13 +0200827 int i, map_idx, map_def_sz, nr_syms, nr_maps = 0, nr_maps_glob = 0;
John Fastabendc034a172018-10-15 11:19:55 -0700828 bool strict = !(flags & MAPS_RELAX_COMPAT);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000829 Elf_Data *symbols = obj->efile.symbols;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200830 Elf_Data *data = NULL;
831 int ret = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000832
Eric Leblond4708bbd2016-11-15 04:05:47 +0000833 if (!symbols)
834 return -EINVAL;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200835 nr_syms = symbols->d_size / sizeof(GElf_Sym);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000836
Daniel Borkmannd8599002019-04-09 23:20:13 +0200837 if (obj->efile.maps_shndx >= 0) {
838 Elf_Scn *scn = elf_getscn(obj->efile.elf,
839 obj->efile.maps_shndx);
840
841 if (scn)
842 data = elf_getdata(scn, NULL);
843 if (!scn || !data) {
844 pr_warning("failed to get Elf_Data from map section %d\n",
845 obj->efile.maps_shndx);
846 return -EINVAL;
847 }
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000848 }
849
Eric Leblond4708bbd2016-11-15 04:05:47 +0000850 /*
851 * Count number of maps. Each map has a name.
852 * Array of maps is not supported: only the first element is
853 * considered.
854 *
855 * TODO: Detect array of map and report error.
856 */
Daniel Borkmannd8599002019-04-09 23:20:13 +0200857 if (obj->efile.data_shndx >= 0)
858 nr_maps_glob++;
859 if (obj->efile.rodata_shndx >= 0)
860 nr_maps_glob++;
861 if (obj->efile.bss_shndx >= 0)
862 nr_maps_glob++;
863 for (i = 0; data && i < nr_syms; i++) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000864 GElf_Sym sym;
865
866 if (!gelf_getsym(symbols, i, &sym))
867 continue;
868 if (sym.st_shndx != obj->efile.maps_shndx)
869 continue;
870 nr_maps++;
871 }
872
873 /* Alloc obj->maps and fill nr_maps. */
874 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
875 nr_maps, data->d_size);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200876 if (!nr_maps && !nr_maps_glob)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000877 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000878
Craig Gallekb13c5c12017-10-05 10:41:57 -0400879 /* Assume equally sized map definitions */
Daniel Borkmannd8599002019-04-09 23:20:13 +0200880 if (data) {
881 map_def_sz = data->d_size / nr_maps;
882 if (!data->d_size || (data->d_size % nr_maps) != 0) {
883 pr_warning("unable to determine map definition size "
884 "section %s, %d maps in %zd bytes\n",
885 obj->path, nr_maps, data->d_size);
886 return -EINVAL;
887 }
Craig Gallekb13c5c12017-10-05 10:41:57 -0400888 }
889
Daniel Borkmannd8599002019-04-09 23:20:13 +0200890 nr_maps += nr_maps_glob;
Wang Nan9d759a92015-11-27 08:47:35 +0000891 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
892 if (!obj->maps) {
893 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000894 return -ENOMEM;
895 }
Wang Nan9d759a92015-11-27 08:47:35 +0000896 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000897
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800898 for (i = 0; i < nr_maps; i++) {
899 /*
900 * fill all fd with -1 so won't close incorrect
901 * fd (fd=0 is stdin) when failure (zclose won't close
902 * negative fd)).
903 */
Wang Nan9d759a92015-11-27 08:47:35 +0000904 obj->maps[i].fd = -1;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800905 obj->maps[i].inner_map_fd = -1;
906 }
Wang Nan9d759a92015-11-27 08:47:35 +0000907
Eric Leblond4708bbd2016-11-15 04:05:47 +0000908 /*
909 * Fill obj->maps using data in "maps" section.
910 */
Daniel Borkmannd8599002019-04-09 23:20:13 +0200911 for (i = 0, map_idx = 0; data && i < nr_syms; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000912 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000913 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000914 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000915
916 if (!gelf_getsym(symbols, i, &sym))
917 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000918 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000919 continue;
920
921 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000922 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000923 sym.st_name);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200924
925 obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000926 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400927 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000928 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
929 obj->path, map_name);
930 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000931 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000932
Wang Nan561bbcc2015-11-27 08:47:36 +0000933 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000934 if (!obj->maps[map_idx].name) {
935 pr_warning("failed to alloc map name\n");
936 return -ENOMEM;
937 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000938 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000939 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000940 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400941 /*
942 * If the definition of the map in the object file fits in
943 * bpf_map_def, copy it. Any extra fields in our version
944 * of bpf_map_def will default to zero as a result of the
945 * calloc above.
946 */
947 if (map_def_sz <= sizeof(struct bpf_map_def)) {
948 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
949 } else {
950 /*
951 * Here the map structure being read is bigger than what
952 * we expect, truncate if the excess bits are all zero.
953 * If they are not zero, reject this map as
954 * incompatible.
955 */
956 char *b;
957 for (b = ((char *)def) + sizeof(struct bpf_map_def);
958 b < ((char *)def) + map_def_sz; b++) {
959 if (*b != 0) {
960 pr_warning("maps section in %s: \"%s\" "
961 "has unrecognized, non-zero "
962 "options\n",
963 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -0700964 if (strict)
965 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400966 }
967 }
968 memcpy(&obj->maps[map_idx].def, def,
969 sizeof(struct bpf_map_def));
970 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000971 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000972 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000973
Daniel Borkmannd8599002019-04-09 23:20:13 +0200974 /*
975 * Populate rest of obj->maps with libbpf internal maps.
976 */
977 if (obj->efile.data_shndx >= 0)
978 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
979 LIBBPF_MAP_DATA,
980 obj->efile.data,
981 &obj->sections.data);
982 if (!ret && obj->efile.rodata_shndx >= 0)
983 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
984 LIBBPF_MAP_RODATA,
985 obj->efile.rodata,
986 &obj->sections.rodata);
987 if (!ret && obj->efile.bss_shndx >= 0)
988 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
989 LIBBPF_MAP_BSS,
990 obj->efile.bss, NULL);
991 if (!ret)
992 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
993 compare_bpf_map);
994 return ret;
Wang Nan561bbcc2015-11-27 08:47:36 +0000995}
996
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100997static bool section_have_execinstr(struct bpf_object *obj, int idx)
998{
999 Elf_Scn *scn;
1000 GElf_Shdr sh;
1001
1002 scn = elf_getscn(obj->efile.elf, idx);
1003 if (!scn)
1004 return false;
1005
1006 if (gelf_getshdr(scn, &sh) != &sh)
1007 return false;
1008
1009 if (sh.sh_flags & SHF_EXECINSTR)
1010 return true;
1011
1012 return false;
1013}
1014
John Fastabendc034a172018-10-15 11:19:55 -07001015static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
Wang Nan29603662015-07-01 02:13:56 +00001016{
1017 Elf *elf = obj->efile.elf;
1018 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001019 Elf_Data *btf_ext_data = NULL;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001020 Elf_Data *btf_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +00001021 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +00001022 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +00001023
1024 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1025 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1026 pr_warning("failed to get e_shstrndx from %s\n",
1027 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001028 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001029 }
1030
1031 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1032 char *name;
1033 GElf_Shdr sh;
1034 Elf_Data *data;
1035
1036 idx++;
1037 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001038 pr_warning("failed to get section(%d) header from %s\n",
1039 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001040 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001041 goto out;
1042 }
1043
1044 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1045 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001046 pr_warning("failed to get section(%d) name from %s\n",
1047 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001048 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001049 goto out;
1050 }
1051
1052 data = elf_getdata(scn, 0);
1053 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001054 pr_warning("failed to get section(%d) data from %s(%s)\n",
1055 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001056 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001057 goto out;
1058 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001059 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1060 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +00001061 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1062 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +00001063
Daniel Borkmann1713d682019-04-09 23:20:14 +02001064 if (strcmp(name, "license") == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001065 err = bpf_object__init_license(obj,
1066 data->d_buf,
1067 data->d_size);
Daniel Borkmann1713d682019-04-09 23:20:14 +02001068 } else if (strcmp(name, "version") == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001069 err = bpf_object__init_kversion(obj,
1070 data->d_buf,
1071 data->d_size);
Daniel Borkmann1713d682019-04-09 23:20:14 +02001072 } else if (strcmp(name, "maps") == 0) {
Wang Nan666810e2016-01-25 09:55:49 +00001073 obj->efile.maps_shndx = idx;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001074 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
1075 btf_data = data;
Yonghong Song2993e052018-11-19 15:29:16 -08001076 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001077 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001078 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +00001079 if (obj->efile.symbols) {
1080 pr_warning("bpf: multiple SYMTAB in %s\n",
1081 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001082 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00001083 } else {
Wang Nanbec7d682015-07-01 02:13:59 +00001084 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +00001085 obj->efile.strtabidx = sh.sh_link;
1086 }
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001087 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1088 if (sh.sh_flags & SHF_EXECINSTR) {
1089 if (strcmp(name, ".text") == 0)
1090 obj->efile.text_shndx = idx;
1091 err = bpf_object__add_program(obj, data->d_buf,
1092 data->d_size, name, idx);
1093 if (err) {
1094 char errmsg[STRERR_BUFSIZE];
1095 char *cp = libbpf_strerror_r(-err, errmsg,
1096 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001097
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001098 pr_warning("failed to alloc program %s (%s): %s",
1099 name, obj->path, cp);
1100 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001101 } else if (strcmp(name, ".data") == 0) {
1102 obj->efile.data = data;
1103 obj->efile.data_shndx = idx;
1104 } else if (strcmp(name, ".rodata") == 0) {
1105 obj->efile.rodata = data;
1106 obj->efile.rodata_shndx = idx;
1107 } else {
1108 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nana5b8bd42015-07-01 02:14:00 +00001109 }
Wang Nanb62f06e2015-07-01 02:14:01 +00001110 } else if (sh.sh_type == SHT_REL) {
1111 void *reloc = obj->efile.reloc;
1112 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01001113 int sec = sh.sh_info; /* points to other section */
1114
1115 /* Only do relo for section with exec instructions */
1116 if (!section_have_execinstr(obj, sec)) {
1117 pr_debug("skip relo %s(%d) for section(%d)\n",
1118 name, idx, sec);
1119 continue;
1120 }
Wang Nanb62f06e2015-07-01 02:14:01 +00001121
Jakub Kicinski531b0142018-07-10 14:43:05 -07001122 reloc = reallocarray(reloc, nr_reloc,
1123 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +00001124 if (!reloc) {
1125 pr_warning("realloc failed\n");
1126 err = -ENOMEM;
1127 } else {
1128 int n = nr_reloc - 1;
1129
1130 obj->efile.reloc = reloc;
1131 obj->efile.nr_reloc = nr_reloc;
1132
1133 obj->efile.reloc[n].shdr = sh;
1134 obj->efile.reloc[n].data = data;
1135 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001136 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1137 obj->efile.bss = data;
1138 obj->efile.bss_shndx = idx;
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001139 } else {
1140 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +00001141 }
Wang Nancb1e5e92015-07-01 02:13:57 +00001142 if (err)
1143 goto out;
Wang Nan29603662015-07-01 02:13:56 +00001144 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001145
Wang Nan77ba9a52015-12-08 02:25:30 +00001146 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1147 pr_warning("Corrupted ELF file: index of strtab invalid\n");
1148 return LIBBPF_ERRNO__FORMAT;
1149 }
Daniel Borkmann1713d682019-04-09 23:20:14 +02001150 if (btf_data) {
1151 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1152 if (IS_ERR(obj->btf)) {
1153 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1154 BTF_ELF_SEC, PTR_ERR(obj->btf));
1155 obj->btf = NULL;
1156 } else {
1157 err = btf__finalize_data(obj, obj->btf);
1158 if (!err)
1159 err = btf__load(obj->btf);
1160 if (err) {
1161 pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
1162 BTF_ELF_SEC, err);
1163 btf__free(obj->btf);
1164 obj->btf = NULL;
1165 err = 0;
1166 }
1167 }
1168 }
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001169 if (btf_ext_data) {
1170 if (!obj->btf) {
1171 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1172 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1173 } else {
1174 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
Yonghong Song8461ef82019-02-01 16:14:14 -08001175 btf_ext_data->d_size);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001176 if (IS_ERR(obj->btf_ext)) {
1177 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1178 BTF_EXT_ELF_SEC,
1179 PTR_ERR(obj->btf_ext));
1180 obj->btf_ext = NULL;
1181 }
1182 }
1183 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001184 if (bpf_object__has_maps(obj)) {
John Fastabendc034a172018-10-15 11:19:55 -07001185 err = bpf_object__init_maps(obj, flags);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -07001186 if (err)
1187 goto out;
1188 }
1189 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +00001190out:
1191 return err;
1192}
1193
Wang Nan34090912015-07-01 02:14:02 +00001194static struct bpf_program *
1195bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1196{
1197 struct bpf_program *prog;
1198 size_t i;
1199
1200 for (i = 0; i < obj->nr_programs; i++) {
1201 prog = &obj->programs[i];
1202 if (prog->idx == idx)
1203 return prog;
1204 }
1205 return NULL;
1206}
1207
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07001208struct bpf_program *
1209bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
1210{
1211 struct bpf_program *pos;
1212
1213 bpf_object__for_each_program(pos, obj) {
1214 if (pos->section_name && !strcmp(pos->section_name, title))
1215 return pos;
1216 }
1217 return NULL;
1218}
1219
Daniel Borkmannd8599002019-04-09 23:20:13 +02001220static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1221 int shndx)
1222{
1223 return shndx == obj->efile.data_shndx ||
1224 shndx == obj->efile.bss_shndx ||
1225 shndx == obj->efile.rodata_shndx;
1226}
1227
1228static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1229 int shndx)
1230{
1231 return shndx == obj->efile.maps_shndx;
1232}
1233
1234static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1235 int shndx)
1236{
1237 return shndx == obj->efile.text_shndx ||
1238 bpf_object__shndx_is_maps(obj, shndx) ||
1239 bpf_object__shndx_is_data(obj, shndx);
1240}
1241
1242static enum libbpf_map_type
1243bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1244{
1245 if (shndx == obj->efile.data_shndx)
1246 return LIBBPF_MAP_DATA;
1247 else if (shndx == obj->efile.bss_shndx)
1248 return LIBBPF_MAP_BSS;
1249 else if (shndx == obj->efile.rodata_shndx)
1250 return LIBBPF_MAP_RODATA;
1251 else
1252 return LIBBPF_MAP_UNSPEC;
1253}
1254
Wang Nan34090912015-07-01 02:14:02 +00001255static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001256bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1257 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +00001258{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001259 Elf_Data *symbols = obj->efile.symbols;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001260 struct bpf_map *maps = obj->maps;
1261 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +00001262 int i, nrels;
1263
1264 pr_debug("collecting relocating info for: '%s'\n",
1265 prog->section_name);
1266 nrels = shdr->sh_size / shdr->sh_entsize;
1267
1268 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1269 if (!prog->reloc_desc) {
1270 pr_warning("failed to alloc memory in relocation\n");
1271 return -ENOMEM;
1272 }
1273 prog->nr_reloc = nrels;
1274
1275 for (i = 0; i < nrels; i++) {
1276 GElf_Sym sym;
1277 GElf_Rel rel;
1278 unsigned int insn_idx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001279 unsigned int shdr_idx;
Wang Nan34090912015-07-01 02:14:02 +00001280 struct bpf_insn *insns = prog->insns;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001281 enum libbpf_map_type type;
1282 const char *name;
Wang Nan34090912015-07-01 02:14:02 +00001283 size_t map_idx;
1284
1285 if (!gelf_getrel(data, i, &rel)) {
1286 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001287 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001288 }
1289
Wang Nan34090912015-07-01 02:14:02 +00001290 if (!gelf_getsym(symbols,
1291 GELF_R_SYM(rel.r_info),
1292 &sym)) {
1293 pr_warning("relocation: symbol %"PRIx64" not found\n",
1294 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001295 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001296 }
1297
Daniel Borkmannd8599002019-04-09 23:20:13 +02001298 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1299 sym.st_name) ? : "<?>";
1300
1301 pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1302 (long long) (rel.r_info >> 32),
1303 (long long) sym.st_value, sym.st_name, name);
1304
1305 shdr_idx = sym.st_shndx;
1306 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1307 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
1308 prog->section_name, shdr_idx);
Wang Nan666810e2016-01-25 09:55:49 +00001309 return -LIBBPF_ERRNO__RELOC;
1310 }
1311
1312 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1313 pr_debug("relocation: insn_idx=%u\n", insn_idx);
1314
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001315 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1316 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1317 pr_warning("incorrect bpf_call opcode\n");
1318 return -LIBBPF_ERRNO__RELOC;
1319 }
1320 prog->reloc_desc[i].type = RELO_CALL;
1321 prog->reloc_desc[i].insn_idx = insn_idx;
1322 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001323 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001324 continue;
1325 }
1326
Wang Nan34090912015-07-01 02:14:02 +00001327 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1328 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1329 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001330 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001331 }
1332
Daniel Borkmannd8599002019-04-09 23:20:13 +02001333 if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1334 bpf_object__shndx_is_data(obj, shdr_idx)) {
1335 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1336 if (type != LIBBPF_MAP_UNSPEC &&
1337 GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1338 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1339 name, insn_idx, insns[insn_idx].code);
1340 return -LIBBPF_ERRNO__RELOC;
1341 }
1342
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001343 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001344 if (maps[map_idx].libbpf_type != type)
1345 continue;
1346 if (type != LIBBPF_MAP_UNSPEC ||
1347 (type == LIBBPF_MAP_UNSPEC &&
1348 maps[map_idx].offset == sym.st_value)) {
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001349 pr_debug("relocation: find map %zd (%s) for insn %u\n",
1350 map_idx, maps[map_idx].name, insn_idx);
1351 break;
1352 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001353 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001354
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001355 if (map_idx >= nr_maps) {
1356 pr_warning("bpf relocation: map_idx %d large than %d\n",
1357 (int)map_idx, (int)nr_maps - 1);
1358 return -LIBBPF_ERRNO__RELOC;
1359 }
Wang Nan34090912015-07-01 02:14:02 +00001360
Daniel Borkmannd8599002019-04-09 23:20:13 +02001361 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1362 RELO_DATA : RELO_LD64;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001363 prog->reloc_desc[i].insn_idx = insn_idx;
1364 prog->reloc_desc[i].map_idx = map_idx;
1365 }
Wang Nan34090912015-07-01 02:14:02 +00001366 }
1367 return 0;
1368}
1369
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001370static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1371{
1372 struct bpf_map_def *def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001373 __u32 key_type_id = 0, value_type_id = 0;
Yonghong Song96408c42019-02-04 11:00:58 -08001374 int ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001375
Daniel Borkmannd8599002019-04-09 23:20:13 +02001376 if (!bpf_map__is_internal(map)) {
1377 ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
1378 def->value_size, &key_type_id,
1379 &value_type_id);
1380 } else {
1381 /*
1382 * LLVM annotates global data differently in BTF, that is,
1383 * only as '.data', '.bss' or '.rodata'.
1384 */
1385 ret = btf__find_by_name(btf,
1386 libbpf_type_to_btf_name[map->libbpf_type]);
1387 }
1388 if (ret < 0)
Yonghong Song96408c42019-02-04 11:00:58 -08001389 return ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001390
Yonghong Song96408c42019-02-04 11:00:58 -08001391 map->btf_key_type_id = key_type_id;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001392 map->btf_value_type_id = bpf_map__is_internal(map) ?
1393 ret : value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001394 return 0;
1395}
1396
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001397int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1398{
1399 struct bpf_map_info info = {};
1400 __u32 len = sizeof(info);
1401 int new_fd, err;
1402 char *new_name;
1403
1404 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1405 if (err)
1406 return err;
1407
1408 new_name = strdup(info.name);
1409 if (!new_name)
1410 return -errno;
1411
1412 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1413 if (new_fd < 0)
1414 goto err_free_new_name;
1415
1416 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1417 if (new_fd < 0)
1418 goto err_close_new_fd;
1419
1420 err = zclose(map->fd);
1421 if (err)
1422 goto err_close_new_fd;
1423 free(map->name);
1424
1425 map->fd = new_fd;
1426 map->name = new_name;
1427 map->def.type = info.type;
1428 map->def.key_size = info.key_size;
1429 map->def.value_size = info.value_size;
1430 map->def.max_entries = info.max_entries;
1431 map->def.map_flags = info.map_flags;
1432 map->btf_key_type_id = info.btf_key_type_id;
1433 map->btf_value_type_id = info.btf_value_type_id;
1434
1435 return 0;
1436
1437err_close_new_fd:
1438 close(new_fd);
1439err_free_new_name:
1440 free(new_name);
1441 return -errno;
1442}
1443
Andrey Ignatov1a11a4c2019-02-14 15:01:42 -08001444int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1445{
1446 if (!map || !max_entries)
1447 return -EINVAL;
1448
1449 /* If map already created, its attributes can't be changed. */
1450 if (map->fd >= 0)
1451 return -EBUSY;
1452
1453 map->def.max_entries = max_entries;
1454
1455 return 0;
1456}
1457
Wang Nan52d33522015-07-01 02:14:04 +00001458static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08001459bpf_object__probe_name(struct bpf_object *obj)
1460{
1461 struct bpf_load_program_attr attr;
1462 char *cp, errmsg[STRERR_BUFSIZE];
1463 struct bpf_insn insns[] = {
1464 BPF_MOV64_IMM(BPF_REG_0, 0),
1465 BPF_EXIT_INSN(),
1466 };
1467 int ret;
1468
1469 /* make sure basic loading works */
1470
1471 memset(&attr, 0, sizeof(attr));
1472 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1473 attr.insns = insns;
1474 attr.insns_cnt = ARRAY_SIZE(insns);
1475 attr.license = "GPL";
1476
1477 ret = bpf_load_program_xattr(&attr, NULL, 0);
1478 if (ret < 0) {
1479 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1480 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1481 __func__, cp, errno);
1482 return -errno;
1483 }
1484 close(ret);
1485
1486 /* now try the same program, but with the name */
1487
1488 attr.name = "test";
1489 ret = bpf_load_program_xattr(&attr, NULL, 0);
1490 if (ret >= 0) {
1491 obj->caps.name = 1;
1492 close(ret);
1493 }
1494
1495 return 0;
1496}
1497
1498static int
1499bpf_object__probe_caps(struct bpf_object *obj)
1500{
1501 return bpf_object__probe_name(obj);
1502}
1503
1504static int
Daniel Borkmannd8599002019-04-09 23:20:13 +02001505bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
1506{
1507 char *cp, errmsg[STRERR_BUFSIZE];
1508 int err, zero = 0;
1509 __u8 *data;
1510
1511 /* Nothing to do here since kernel already zero-initializes .bss map. */
1512 if (map->libbpf_type == LIBBPF_MAP_BSS)
1513 return 0;
1514
1515 data = map->libbpf_type == LIBBPF_MAP_DATA ?
1516 obj->sections.data : obj->sections.rodata;
1517
1518 err = bpf_map_update_elem(map->fd, &zero, data, 0);
1519 /* Freeze .rodata map as read-only from syscall side. */
1520 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
1521 err = bpf_map_freeze(map->fd);
1522 if (err) {
1523 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1524 pr_warning("Error freezing map(%s) as read-only: %s\n",
1525 map->name, cp);
1526 err = 0;
1527 }
1528 }
1529 return err;
1530}
1531
1532static int
Wang Nan52d33522015-07-01 02:14:04 +00001533bpf_object__create_maps(struct bpf_object *obj)
1534{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001535 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001536 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001537 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001538
Wang Nan9d759a92015-11-27 08:47:35 +00001539 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001540 struct bpf_map *map = &obj->maps[i];
1541 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001542 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001543 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001544
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001545 if (map->fd >= 0) {
1546 pr_debug("skip map create (preset) %s: fd=%d\n",
1547 map->name, map->fd);
1548 continue;
1549 }
1550
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08001551 if (obj->caps.name)
1552 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001553 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001554 create_attr.map_type = def->type;
1555 create_attr.map_flags = def->map_flags;
1556 create_attr.key_size = def->key_size;
1557 create_attr.value_size = def->value_size;
1558 create_attr.max_entries = def->max_entries;
1559 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001560 create_attr.btf_key_type_id = 0;
1561 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08001562 if (bpf_map_type__is_map_in_map(def->type) &&
1563 map->inner_map_fd >= 0)
1564 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001565
1566 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1567 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001568 create_attr.btf_key_type_id = map->btf_key_type_id;
1569 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001570 }
1571
1572 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001573 if (*pfd < 0 && create_attr.btf_key_type_id) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001574 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001575 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001576 map->name, cp, errno);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001577 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07001578 create_attr.btf_key_type_id = 0;
1579 create_attr.btf_value_type_id = 0;
1580 map->btf_key_type_id = 0;
1581 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001582 *pfd = bpf_create_map_xattr(&create_attr);
1583 }
1584
Wang Nan52d33522015-07-01 02:14:04 +00001585 if (*pfd < 0) {
1586 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001587
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001588 err = *pfd;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001589err_out:
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001590 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Eric Leblond49bf4b32017-08-20 21:48:14 +02001591 pr_warning("failed to create map (name: '%s'): %s\n",
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001592 map->name, cp);
Wang Nan52d33522015-07-01 02:14:04 +00001593 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001594 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001595 return err;
1596 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001597
1598 if (bpf_map__is_internal(map)) {
1599 err = bpf_object__populate_internal_map(obj, map);
1600 if (err < 0) {
1601 zclose(*pfd);
1602 goto err_out;
1603 }
1604 }
1605
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001606 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001607 }
1608
Wang Nan52d33522015-07-01 02:14:04 +00001609 return 0;
1610}
1611
Wang Nan8a47a6c2015-07-01 02:14:05 +00001612static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001613check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1614 void *btf_prog_info, const char *info_name)
1615{
1616 if (err != -ENOENT) {
1617 pr_warning("Error in loading %s for sec %s.\n",
1618 info_name, prog->section_name);
1619 return err;
1620 }
1621
1622 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1623
1624 if (btf_prog_info) {
1625 /*
1626 * Some info has already been found but has problem
1627 * in the last btf_ext reloc. Must have to error
1628 * out.
1629 */
1630 pr_warning("Error in relocating %s for sec %s.\n",
1631 info_name, prog->section_name);
1632 return err;
1633 }
1634
1635 /*
1636 * Have problem loading the very first info. Ignore
1637 * the rest.
1638 */
1639 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1640 info_name, prog->section_name, info_name);
1641 return 0;
1642}
1643
1644static int
1645bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1646 const char *section_name, __u32 insn_offset)
1647{
1648 int err;
1649
1650 if (!insn_offset || prog->func_info) {
1651 /*
1652 * !insn_offset => main program
1653 *
1654 * For sub prog, the main program's func_info has to
1655 * be loaded first (i.e. prog->func_info != NULL)
1656 */
1657 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1658 section_name, insn_offset,
1659 &prog->func_info,
1660 &prog->func_info_cnt);
1661 if (err)
1662 return check_btf_ext_reloc_err(prog, err,
1663 prog->func_info,
1664 "bpf_func_info");
1665
1666 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1667 }
1668
Martin KaFai Lau3d650142018-12-07 16:42:31 -08001669 if (!insn_offset || prog->line_info) {
1670 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1671 section_name, insn_offset,
1672 &prog->line_info,
1673 &prog->line_info_cnt);
1674 if (err)
1675 return check_btf_ext_reloc_err(prog, err,
1676 prog->line_info,
1677 "bpf_line_info");
1678
1679 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1680 }
1681
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001682 if (!insn_offset)
1683 prog->btf_fd = btf__fd(obj->btf);
1684
1685 return 0;
1686}
1687
1688static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001689bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1690 struct reloc_desc *relo)
1691{
1692 struct bpf_insn *insn, *new_insn;
1693 struct bpf_program *text;
1694 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08001695 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001696
1697 if (relo->type != RELO_CALL)
1698 return -LIBBPF_ERRNO__RELOC;
1699
1700 if (prog->idx == obj->efile.text_shndx) {
1701 pr_warning("relo in .text insn %d into off %d\n",
1702 relo->insn_idx, relo->text_off);
1703 return -LIBBPF_ERRNO__RELOC;
1704 }
1705
1706 if (prog->main_prog_cnt == 0) {
1707 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1708 if (!text) {
1709 pr_warning("no .text section found yet relo into text exist\n");
1710 return -LIBBPF_ERRNO__RELOC;
1711 }
1712 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07001713 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001714 if (!new_insn) {
1715 pr_warning("oom in prog realloc\n");
1716 return -ENOMEM;
1717 }
Yonghong Song2993e052018-11-19 15:29:16 -08001718
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001719 if (obj->btf_ext) {
1720 err = bpf_program_reloc_btf_ext(prog, obj,
1721 text->section_name,
1722 prog->insns_cnt);
1723 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08001724 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08001725 }
1726
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001727 memcpy(new_insn + prog->insns_cnt, text->insns,
1728 text->insns_cnt * sizeof(*insn));
1729 prog->insns = new_insn;
1730 prog->main_prog_cnt = prog->insns_cnt;
1731 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001732 pr_debug("added %zd insn from %s to prog %s\n",
1733 text->insns_cnt, text->section_name,
1734 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001735 }
1736 insn = &prog->insns[relo->insn_idx];
1737 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001738 return 0;
1739}
1740
1741static int
Wang Nan9d759a92015-11-27 08:47:35 +00001742bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001743{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001744 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001745
Yonghong Song2993e052018-11-19 15:29:16 -08001746 if (!prog)
1747 return 0;
1748
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001749 if (obj->btf_ext) {
1750 err = bpf_program_reloc_btf_ext(prog, obj,
1751 prog->section_name, 0);
1752 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08001753 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08001754 }
1755
1756 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001757 return 0;
1758
1759 for (i = 0; i < prog->nr_reloc; i++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001760 if (prog->reloc_desc[i].type == RELO_LD64 ||
1761 prog->reloc_desc[i].type == RELO_DATA) {
1762 bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001763 struct bpf_insn *insns = prog->insns;
1764 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001765
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001766 insn_idx = prog->reloc_desc[i].insn_idx;
1767 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001768
Daniel Borkmannd8599002019-04-09 23:20:13 +02001769 if (insn_idx + 1 >= (int)prog->insns_cnt) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001770 pr_warning("relocation out of range: '%s'\n",
1771 prog->section_name);
1772 return -LIBBPF_ERRNO__RELOC;
1773 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001774
1775 if (!relo_data) {
1776 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1777 } else {
1778 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
1779 insns[insn_idx + 1].imm = insns[insn_idx].imm;
1780 }
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001781 insns[insn_idx].imm = obj->maps[map_idx].fd;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001782 } else if (prog->reloc_desc[i].type == RELO_CALL) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001783 err = bpf_program__reloc_text(prog, obj,
1784 &prog->reloc_desc[i]);
1785 if (err)
1786 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001787 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001788 }
1789
1790 zfree(&prog->reloc_desc);
1791 prog->nr_reloc = 0;
1792 return 0;
1793}
1794
1795
1796static int
1797bpf_object__relocate(struct bpf_object *obj)
1798{
1799 struct bpf_program *prog;
1800 size_t i;
1801 int err;
1802
1803 for (i = 0; i < obj->nr_programs; i++) {
1804 prog = &obj->programs[i];
1805
Wang Nan9d759a92015-11-27 08:47:35 +00001806 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001807 if (err) {
1808 pr_warning("failed to relocate '%s'\n",
1809 prog->section_name);
1810 return err;
1811 }
1812 }
1813 return 0;
1814}
1815
Wang Nan34090912015-07-01 02:14:02 +00001816static int bpf_object__collect_reloc(struct bpf_object *obj)
1817{
1818 int i, err;
1819
1820 if (!obj_elf_valid(obj)) {
1821 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001822 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001823 }
1824
1825 for (i = 0; i < obj->efile.nr_reloc; i++) {
1826 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1827 Elf_Data *data = obj->efile.reloc[i].data;
1828 int idx = shdr->sh_info;
1829 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001830
1831 if (shdr->sh_type != SHT_REL) {
1832 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001833 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001834 }
1835
1836 prog = bpf_object__find_prog_by_idx(obj, idx);
1837 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001838 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001839 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001840 }
1841
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001842 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001843 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001844 obj);
Wang Nan34090912015-07-01 02:14:02 +00001845 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001846 return err;
Wang Nan34090912015-07-01 02:14:02 +00001847 }
1848 return 0;
1849}
1850
Wang Nan55cffde2015-07-01 02:14:07 +00001851static int
Yonghong Song2993e052018-11-19 15:29:16 -08001852load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001853 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00001854{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001855 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001856 char *cp, errmsg[STRERR_BUFSIZE];
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001857 int log_buf_size = BPF_LOG_BUF_SIZE;
Wang Nan55cffde2015-07-01 02:14:07 +00001858 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001859 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001860
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001861 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08001862 load_attr.prog_type = prog->type;
1863 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08001864 if (prog->caps->name)
1865 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001866 load_attr.insns = insns;
1867 load_attr.insns_cnt = insns_cnt;
1868 load_attr.license = license;
1869 load_attr.kern_version = kern_version;
Yonghong Song2993e052018-11-19 15:29:16 -08001870 load_attr.prog_ifindex = prog->prog_ifindex;
Yonghong Song462c1242018-11-21 11:22:42 -08001871 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08001872 load_attr.func_info = prog->func_info;
1873 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001874 load_attr.func_info_cnt = prog->func_info_cnt;
Martin KaFai Lau3d650142018-12-07 16:42:31 -08001875 load_attr.line_info = prog->line_info;
1876 load_attr.line_info_rec_size = prog->line_info_rec_size;
1877 load_attr.line_info_cnt = prog->line_info_cnt;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001878 load_attr.log_level = prog->log_level;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001879 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001880 return -EINVAL;
1881
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001882retry_load:
1883 log_buf = malloc(log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00001884 if (!log_buf)
1885 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1886
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001887 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00001888
1889 if (ret >= 0) {
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001890 if (load_attr.log_level)
1891 pr_debug("verifier log:\n%s", log_buf);
Wang Nan55cffde2015-07-01 02:14:07 +00001892 *pfd = ret;
1893 ret = 0;
1894 goto out;
1895 }
1896
Alexei Starovoitovda11b412019-04-01 21:27:47 -07001897 if (errno == ENOSPC) {
1898 log_buf_size <<= 1;
1899 free(log_buf);
1900 goto retry_load;
1901 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001902 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07001903 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02001904 pr_warning("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00001905
Wang Nan6371ca3b2015-11-06 13:49:37 +00001906 if (log_buf && log_buf[0] != '\0') {
1907 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001908 pr_warning("-- BEGIN DUMP LOG ---\n");
1909 pr_warning("\n%s\n", log_buf);
1910 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001911 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1912 pr_warning("Program too large (%zu insns), at most %d insns\n",
1913 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001914 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001915 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001916 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001917 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001918 int fd;
1919
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001920 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1921 load_attr.expected_attach_type = 0;
1922 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001923 if (fd >= 0) {
1924 close(fd);
1925 ret = -LIBBPF_ERRNO__PROGTYPE;
1926 goto out;
1927 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001928 }
Wang Nan705fa212016-07-13 10:44:02 +00001929
1930 if (log_buf)
1931 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001932 }
1933
1934out:
1935 free(log_buf);
1936 return ret;
1937}
1938
Joe Stringer29cd77f2018-10-02 13:35:39 -07001939int
Wang Nan55cffde2015-07-01 02:14:07 +00001940bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07001941 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00001942{
Wang Nanb5805632015-11-16 12:10:09 +00001943 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001944
Wang Nanb5805632015-11-16 12:10:09 +00001945 if (prog->instances.nr < 0 || !prog->instances.fds) {
1946 if (prog->preprocessor) {
1947 pr_warning("Internal error: can't load program '%s'\n",
1948 prog->section_name);
1949 return -LIBBPF_ERRNO__INTERNAL;
1950 }
Wang Nan55cffde2015-07-01 02:14:07 +00001951
Wang Nanb5805632015-11-16 12:10:09 +00001952 prog->instances.fds = malloc(sizeof(int));
1953 if (!prog->instances.fds) {
1954 pr_warning("Not enough memory for BPF fds\n");
1955 return -ENOMEM;
1956 }
1957 prog->instances.nr = 1;
1958 prog->instances.fds[0] = -1;
1959 }
1960
1961 if (!prog->preprocessor) {
1962 if (prog->instances.nr != 1) {
1963 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1964 prog->section_name, prog->instances.nr);
1965 }
Yonghong Song2993e052018-11-19 15:29:16 -08001966 err = load_program(prog, prog->insns, prog->insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001967 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001968 if (!err)
1969 prog->instances.fds[0] = fd;
1970 goto out;
1971 }
1972
1973 for (i = 0; i < prog->instances.nr; i++) {
1974 struct bpf_prog_prep_result result;
1975 bpf_program_prep_t preprocessor = prog->preprocessor;
1976
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -08001977 memset(&result, 0, sizeof(result));
Wang Nanb5805632015-11-16 12:10:09 +00001978 err = preprocessor(prog, i, prog->insns,
1979 prog->insns_cnt, &result);
1980 if (err) {
1981 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1982 i, prog->section_name);
1983 goto out;
1984 }
1985
1986 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1987 pr_debug("Skip loading the %dth instance of program '%s'\n",
1988 i, prog->section_name);
1989 prog->instances.fds[i] = -1;
1990 if (result.pfd)
1991 *result.pfd = -1;
1992 continue;
1993 }
1994
Yonghong Song2993e052018-11-19 15:29:16 -08001995 err = load_program(prog, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001996 result.new_insn_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001997 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001998
1999 if (err) {
2000 pr_warning("Loading the %dth instance of program '%s' failed\n",
2001 i, prog->section_name);
2002 goto out;
2003 }
2004
2005 if (result.pfd)
2006 *result.pfd = fd;
2007 prog->instances.fds[i] = fd;
2008 }
2009out:
Wang Nan55cffde2015-07-01 02:14:07 +00002010 if (err)
2011 pr_warning("failed to load program '%s'\n",
2012 prog->section_name);
2013 zfree(&prog->insns);
2014 prog->insns_cnt = 0;
2015 return err;
2016}
2017
Jakub Kicinski9a94f272018-06-28 14:41:38 -07002018static bool bpf_program__is_function_storage(struct bpf_program *prog,
2019 struct bpf_object *obj)
2020{
2021 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
2022}
2023
Wang Nan55cffde2015-07-01 02:14:07 +00002024static int
2025bpf_object__load_progs(struct bpf_object *obj)
2026{
2027 size_t i;
2028 int err;
2029
2030 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07002031 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002032 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00002033 err = bpf_program__load(&obj->programs[i],
2034 obj->license,
2035 obj->kern_version);
2036 if (err)
2037 return err;
2038 }
2039 return 0;
2040}
2041
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002042static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00002043{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002044 switch (type) {
2045 case BPF_PROG_TYPE_SOCKET_FILTER:
2046 case BPF_PROG_TYPE_SCHED_CLS:
2047 case BPF_PROG_TYPE_SCHED_ACT:
2048 case BPF_PROG_TYPE_XDP:
2049 case BPF_PROG_TYPE_CGROUP_SKB:
2050 case BPF_PROG_TYPE_CGROUP_SOCK:
2051 case BPF_PROG_TYPE_LWT_IN:
2052 case BPF_PROG_TYPE_LWT_OUT:
2053 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01002054 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002055 case BPF_PROG_TYPE_SOCK_OPS:
2056 case BPF_PROG_TYPE_SK_SKB:
2057 case BPF_PROG_TYPE_CGROUP_DEVICE:
2058 case BPF_PROG_TYPE_SK_MSG:
2059 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01002060 case BPF_PROG_TYPE_LIRC_MODE2:
Martin KaFai Lau6bc8529c2018-08-08 01:01:30 -07002061 case BPF_PROG_TYPE_SK_REUSEPORT:
Petar Penkovc22fbae2018-09-14 07:46:20 -07002062 case BPF_PROG_TYPE_FLOW_DISSECTOR:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002063 case BPF_PROG_TYPE_UNSPEC:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002064 case BPF_PROG_TYPE_TRACEPOINT:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002065 case BPF_PROG_TYPE_RAW_TRACEPOINT:
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -08002066 case BPF_PROG_TYPE_PERF_EVENT:
2067 return false;
2068 case BPF_PROG_TYPE_KPROBE:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002069 default:
2070 return true;
2071 }
2072}
2073
2074static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
2075{
2076 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00002077 pr_warning("%s doesn't provide kernel version\n",
2078 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002079 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00002080 }
2081 return 0;
2082}
2083
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002084static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002085__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
John Fastabendc034a172018-10-15 11:19:55 -07002086 bool needs_kver, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002087{
2088 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00002089 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002090
2091 if (elf_version(EV_CURRENT) == EV_NONE) {
2092 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002093 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002094 }
2095
Wang Nan6c956392015-07-01 02:13:54 +00002096 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002097 if (IS_ERR(obj))
2098 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002099
Wang Nan6371ca3b2015-11-06 13:49:37 +00002100 CHECK_ERR(bpf_object__elf_init(obj), err, out);
2101 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
John Fastabendc034a172018-10-15 11:19:55 -07002102 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002103 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002104 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002105
2106 bpf_object__elf_finish(obj);
2107 return obj;
2108out:
2109 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002110 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002111}
2112
John Fastabendc034a172018-10-15 11:19:55 -07002113struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
2114 int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002115{
2116 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002117 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002118 return NULL;
2119
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002120 pr_debug("loading %s\n", attr->file);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002121
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002122 return __bpf_object__open(attr->file, NULL, 0,
John Fastabendc034a172018-10-15 11:19:55 -07002123 bpf_prog_type__needs_kver(attr->prog_type),
2124 flags);
2125}
2126
2127struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
2128{
2129 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07002130}
2131
2132struct bpf_object *bpf_object__open(const char *path)
2133{
2134 struct bpf_object_open_attr attr = {
2135 .file = path,
2136 .prog_type = BPF_PROG_TYPE_UNSPEC,
2137 };
2138
2139 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00002140}
2141
2142struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00002143 size_t obj_buf_sz,
2144 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00002145{
Wang Nanacf860a2015-08-27 02:30:55 +00002146 char tmp_name[64];
2147
Wang Nan6c956392015-07-01 02:13:54 +00002148 /* param validation */
2149 if (!obj_buf || obj_buf_sz <= 0)
2150 return NULL;
2151
Wang Nanacf860a2015-08-27 02:30:55 +00002152 if (!name) {
2153 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
2154 (unsigned long)obj_buf,
2155 (unsigned long)obj_buf_sz);
2156 tmp_name[sizeof(tmp_name) - 1] = '\0';
2157 name = tmp_name;
2158 }
2159 pr_debug("loading object '%s' from buffer\n",
2160 name);
Wang Nan6c956392015-07-01 02:13:54 +00002161
John Fastabendc034a172018-10-15 11:19:55 -07002162 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002163}
2164
Wang Nan52d33522015-07-01 02:14:04 +00002165int bpf_object__unload(struct bpf_object *obj)
2166{
2167 size_t i;
2168
2169 if (!obj)
2170 return -EINVAL;
2171
Wang Nan9d759a92015-11-27 08:47:35 +00002172 for (i = 0; i < obj->nr_maps; i++)
2173 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00002174
Wang Nan55cffde2015-07-01 02:14:07 +00002175 for (i = 0; i < obj->nr_programs; i++)
2176 bpf_program__unload(&obj->programs[i]);
2177
Wang Nan52d33522015-07-01 02:14:04 +00002178 return 0;
2179}
2180
2181int bpf_object__load(struct bpf_object *obj)
2182{
Wang Nan6371ca3b2015-11-06 13:49:37 +00002183 int err;
2184
Wang Nan52d33522015-07-01 02:14:04 +00002185 if (!obj)
2186 return -EINVAL;
2187
2188 if (obj->loaded) {
2189 pr_warning("object should not be loaded twice\n");
2190 return -EINVAL;
2191 }
2192
2193 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00002194
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002195 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002196 CHECK_ERR(bpf_object__create_maps(obj), err, out);
2197 CHECK_ERR(bpf_object__relocate(obj), err, out);
2198 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00002199
2200 return 0;
2201out:
2202 bpf_object__unload(obj);
2203 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002204 return err;
Wang Nan52d33522015-07-01 02:14:04 +00002205}
2206
Joe Stringerf3675402017-01-26 13:19:56 -08002207static int check_path(const char *path)
2208{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002209 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08002210 struct statfs st_fs;
2211 char *dname, *dir;
2212 int err = 0;
2213
2214 if (path == NULL)
2215 return -EINVAL;
2216
2217 dname = strdup(path);
2218 if (dname == NULL)
2219 return -ENOMEM;
2220
2221 dir = dirname(dname);
2222 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07002223 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002224 pr_warning("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08002225 err = -errno;
2226 }
2227 free(dname);
2228
2229 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
2230 pr_warning("specified path %s is not on BPF FS\n", path);
2231 err = -EINVAL;
2232 }
2233
2234 return err;
2235}
2236
2237int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
2238 int instance)
2239{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002240 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08002241 int err;
2242
2243 err = check_path(path);
2244 if (err)
2245 return err;
2246
2247 if (prog == NULL) {
2248 pr_warning("invalid program pointer\n");
2249 return -EINVAL;
2250 }
2251
2252 if (instance < 0 || instance >= prog->instances.nr) {
2253 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
2254 instance, prog->section_name, prog->instances.nr);
2255 return -EINVAL;
2256 }
2257
2258 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07002259 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002260 pr_warning("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08002261 return -errno;
2262 }
2263 pr_debug("pinned program '%s'\n", path);
2264
2265 return 0;
2266}
2267
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002268int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
2269 int instance)
2270{
2271 int err;
2272
2273 err = check_path(path);
2274 if (err)
2275 return err;
2276
2277 if (prog == NULL) {
2278 pr_warning("invalid program pointer\n");
2279 return -EINVAL;
2280 }
2281
2282 if (instance < 0 || instance >= prog->instances.nr) {
2283 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
2284 instance, prog->section_name, prog->instances.nr);
2285 return -EINVAL;
2286 }
2287
2288 err = unlink(path);
2289 if (err != 0)
2290 return -errno;
2291 pr_debug("unpinned program '%s'\n", path);
2292
2293 return 0;
2294}
2295
Joe Stringerf3675402017-01-26 13:19:56 -08002296static int make_dir(const char *path)
2297{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002298 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08002299 int err = 0;
2300
2301 if (mkdir(path, 0700) && errno != EEXIST)
2302 err = -errno;
2303
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002304 if (err) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07002305 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002306 pr_warning("failed to mkdir %s: %s\n", path, cp);
2307 }
Joe Stringerf3675402017-01-26 13:19:56 -08002308 return err;
2309}
2310
2311int bpf_program__pin(struct bpf_program *prog, const char *path)
2312{
2313 int i, err;
2314
2315 err = check_path(path);
2316 if (err)
2317 return err;
2318
2319 if (prog == NULL) {
2320 pr_warning("invalid program pointer\n");
2321 return -EINVAL;
2322 }
2323
2324 if (prog->instances.nr <= 0) {
2325 pr_warning("no instances of prog %s to pin\n",
2326 prog->section_name);
2327 return -EINVAL;
2328 }
2329
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08002330 if (prog->instances.nr == 1) {
2331 /* don't create subdirs when pinning single instance */
2332 return bpf_program__pin_instance(prog, path, 0);
2333 }
2334
Joe Stringerf3675402017-01-26 13:19:56 -08002335 err = make_dir(path);
2336 if (err)
2337 return err;
2338
2339 for (i = 0; i < prog->instances.nr; i++) {
2340 char buf[PATH_MAX];
2341 int len;
2342
2343 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002344 if (len < 0) {
2345 err = -EINVAL;
2346 goto err_unpin;
2347 } else if (len >= PATH_MAX) {
2348 err = -ENAMETOOLONG;
2349 goto err_unpin;
2350 }
2351
2352 err = bpf_program__pin_instance(prog, buf, i);
2353 if (err)
2354 goto err_unpin;
2355 }
2356
2357 return 0;
2358
2359err_unpin:
2360 for (i = i - 1; i >= 0; i--) {
2361 char buf[PATH_MAX];
2362 int len;
2363
2364 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2365 if (len < 0)
2366 continue;
2367 else if (len >= PATH_MAX)
2368 continue;
2369
2370 bpf_program__unpin_instance(prog, buf, i);
2371 }
2372
2373 rmdir(path);
2374
2375 return err;
2376}
2377
2378int bpf_program__unpin(struct bpf_program *prog, const char *path)
2379{
2380 int i, err;
2381
2382 err = check_path(path);
2383 if (err)
2384 return err;
2385
2386 if (prog == NULL) {
2387 pr_warning("invalid program pointer\n");
2388 return -EINVAL;
2389 }
2390
2391 if (prog->instances.nr <= 0) {
2392 pr_warning("no instances of prog %s to pin\n",
2393 prog->section_name);
2394 return -EINVAL;
2395 }
2396
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08002397 if (prog->instances.nr == 1) {
2398 /* don't create subdirs when pinning single instance */
2399 return bpf_program__unpin_instance(prog, path, 0);
2400 }
2401
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002402 for (i = 0; i < prog->instances.nr; i++) {
2403 char buf[PATH_MAX];
2404 int len;
2405
2406 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08002407 if (len < 0)
2408 return -EINVAL;
2409 else if (len >= PATH_MAX)
2410 return -ENAMETOOLONG;
2411
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002412 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08002413 if (err)
2414 return err;
2415 }
2416
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002417 err = rmdir(path);
2418 if (err)
2419 return -errno;
2420
Joe Stringerf3675402017-01-26 13:19:56 -08002421 return 0;
2422}
2423
Joe Stringerb6989f32017-01-26 13:19:57 -08002424int bpf_map__pin(struct bpf_map *map, const char *path)
2425{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002426 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08002427 int err;
2428
2429 err = check_path(path);
2430 if (err)
2431 return err;
2432
2433 if (map == NULL) {
2434 pr_warning("invalid map pointer\n");
2435 return -EINVAL;
2436 }
2437
2438 if (bpf_obj_pin(map->fd, path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07002439 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002440 pr_warning("failed to pin map: %s\n", cp);
Joe Stringerb6989f32017-01-26 13:19:57 -08002441 return -errno;
2442 }
2443
2444 pr_debug("pinned map '%s'\n", path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002445
Joe Stringerb6989f32017-01-26 13:19:57 -08002446 return 0;
2447}
2448
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002449int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08002450{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002451 int err;
2452
2453 err = check_path(path);
2454 if (err)
2455 return err;
2456
2457 if (map == NULL) {
2458 pr_warning("invalid map pointer\n");
2459 return -EINVAL;
2460 }
2461
2462 err = unlink(path);
2463 if (err != 0)
2464 return -errno;
2465 pr_debug("unpinned map '%s'\n", path);
2466
2467 return 0;
2468}
2469
2470int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2471{
Joe Stringerd5148d82017-01-26 13:19:58 -08002472 struct bpf_map *map;
2473 int err;
2474
2475 if (!obj)
2476 return -ENOENT;
2477
2478 if (!obj->loaded) {
2479 pr_warning("object not yet loaded; load it first\n");
2480 return -ENOENT;
2481 }
2482
2483 err = make_dir(path);
2484 if (err)
2485 return err;
2486
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08002487 bpf_object__for_each_map(map, obj) {
Joe Stringerd5148d82017-01-26 13:19:58 -08002488 char buf[PATH_MAX];
2489 int len;
2490
2491 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2492 bpf_map__name(map));
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002493 if (len < 0) {
2494 err = -EINVAL;
2495 goto err_unpin_maps;
2496 } else if (len >= PATH_MAX) {
2497 err = -ENAMETOOLONG;
2498 goto err_unpin_maps;
2499 }
2500
2501 err = bpf_map__pin(map, buf);
2502 if (err)
2503 goto err_unpin_maps;
2504 }
2505
2506 return 0;
2507
2508err_unpin_maps:
2509 while ((map = bpf_map__prev(map, obj))) {
2510 char buf[PATH_MAX];
2511 int len;
2512
2513 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2514 bpf_map__name(map));
2515 if (len < 0)
2516 continue;
2517 else if (len >= PATH_MAX)
2518 continue;
2519
2520 bpf_map__unpin(map, buf);
2521 }
2522
2523 return err;
2524}
2525
2526int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2527{
2528 struct bpf_map *map;
2529 int err;
2530
2531 if (!obj)
2532 return -ENOENT;
2533
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08002534 bpf_object__for_each_map(map, obj) {
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002535 char buf[PATH_MAX];
2536 int len;
2537
2538 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2539 bpf_map__name(map));
Joe Stringerd5148d82017-01-26 13:19:58 -08002540 if (len < 0)
2541 return -EINVAL;
2542 else if (len >= PATH_MAX)
2543 return -ENAMETOOLONG;
2544
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002545 err = bpf_map__unpin(map, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002546 if (err)
2547 return err;
2548 }
2549
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002550 return 0;
2551}
2552
2553int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2554{
2555 struct bpf_program *prog;
2556 int err;
2557
2558 if (!obj)
2559 return -ENOENT;
2560
2561 if (!obj->loaded) {
2562 pr_warning("object not yet loaded; load it first\n");
2563 return -ENOENT;
2564 }
2565
2566 err = make_dir(path);
2567 if (err)
2568 return err;
2569
2570 bpf_object__for_each_program(prog, obj) {
2571 char buf[PATH_MAX];
2572 int len;
2573
2574 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002575 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002576 if (len < 0) {
2577 err = -EINVAL;
2578 goto err_unpin_programs;
2579 } else if (len >= PATH_MAX) {
2580 err = -ENAMETOOLONG;
2581 goto err_unpin_programs;
2582 }
2583
2584 err = bpf_program__pin(prog, buf);
2585 if (err)
2586 goto err_unpin_programs;
2587 }
2588
2589 return 0;
2590
2591err_unpin_programs:
2592 while ((prog = bpf_program__prev(prog, obj))) {
2593 char buf[PATH_MAX];
2594 int len;
2595
2596 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002597 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002598 if (len < 0)
2599 continue;
2600 else if (len >= PATH_MAX)
2601 continue;
2602
2603 bpf_program__unpin(prog, buf);
2604 }
2605
2606 return err;
2607}
2608
2609int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2610{
2611 struct bpf_program *prog;
2612 int err;
2613
2614 if (!obj)
2615 return -ENOENT;
2616
Joe Stringerd5148d82017-01-26 13:19:58 -08002617 bpf_object__for_each_program(prog, obj) {
2618 char buf[PATH_MAX];
2619 int len;
2620
2621 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08002622 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08002623 if (len < 0)
2624 return -EINVAL;
2625 else if (len >= PATH_MAX)
2626 return -ENAMETOOLONG;
2627
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002628 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08002629 if (err)
2630 return err;
2631 }
2632
2633 return 0;
2634}
2635
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002636int bpf_object__pin(struct bpf_object *obj, const char *path)
2637{
2638 int err;
2639
2640 err = bpf_object__pin_maps(obj, path);
2641 if (err)
2642 return err;
2643
2644 err = bpf_object__pin_programs(obj, path);
2645 if (err) {
2646 bpf_object__unpin_maps(obj, path);
2647 return err;
2648 }
2649
2650 return 0;
2651}
2652
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002653void bpf_object__close(struct bpf_object *obj)
2654{
Wang Nana5b8bd42015-07-01 02:14:00 +00002655 size_t i;
2656
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002657 if (!obj)
2658 return;
2659
Wang Nan10931d22016-11-26 07:03:26 +00002660 if (obj->clear_priv)
2661 obj->clear_priv(obj, obj->priv);
2662
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002663 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00002664 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002665 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08002666 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002667
Wang Nan9d759a92015-11-27 08:47:35 +00002668 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00002669 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00002670 if (obj->maps[i].clear_priv)
2671 obj->maps[i].clear_priv(&obj->maps[i],
2672 obj->maps[i].priv);
2673 obj->maps[i].priv = NULL;
2674 obj->maps[i].clear_priv = NULL;
2675 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02002676
2677 zfree(&obj->sections.rodata);
2678 zfree(&obj->sections.data);
Wang Nan9d759a92015-11-27 08:47:35 +00002679 zfree(&obj->maps);
2680 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00002681
2682 if (obj->programs && obj->nr_programs) {
2683 for (i = 0; i < obj->nr_programs; i++)
2684 bpf_program__exit(&obj->programs[i]);
2685 }
2686 zfree(&obj->programs);
2687
Wang Nan9a208ef2015-07-01 02:14:10 +00002688 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00002689 free(obj);
2690}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002691
Wang Nan9a208ef2015-07-01 02:14:10 +00002692struct bpf_object *
2693bpf_object__next(struct bpf_object *prev)
2694{
2695 struct bpf_object *next;
2696
2697 if (!prev)
2698 next = list_first_entry(&bpf_objects_list,
2699 struct bpf_object,
2700 list);
2701 else
2702 next = list_next_entry(prev, list);
2703
2704 /* Empty list is noticed here so don't need checking on entry. */
2705 if (&next->list == &bpf_objects_list)
2706 return NULL;
2707
2708 return next;
2709}
2710
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002711const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00002712{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002713 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00002714}
2715
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002716unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00002717{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002718 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00002719}
2720
Andrey Ignatov789f6ba2019-02-14 15:01:43 -08002721struct btf *bpf_object__btf(struct bpf_object *obj)
2722{
2723 return obj ? obj->btf : NULL;
2724}
2725
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002726int bpf_object__btf_fd(const struct bpf_object *obj)
2727{
2728 return obj->btf ? btf__fd(obj->btf) : -1;
2729}
2730
Wang Nan10931d22016-11-26 07:03:26 +00002731int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2732 bpf_object_clear_priv_t clear_priv)
2733{
2734 if (obj->priv && obj->clear_priv)
2735 obj->clear_priv(obj, obj->priv);
2736
2737 obj->priv = priv;
2738 obj->clear_priv = clear_priv;
2739 return 0;
2740}
2741
2742void *bpf_object__priv(struct bpf_object *obj)
2743{
2744 return obj ? obj->priv : ERR_PTR(-EINVAL);
2745}
2746
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002747static struct bpf_program *
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002748__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002749{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002750 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002751 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002752
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002753 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002754 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002755
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002756 if (!p)
2757 /* Iter from the beginning */
2758 return forward ? &obj->programs[0] :
2759 &obj->programs[nr_programs - 1];
2760
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002761 if (p->obj != obj) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002762 pr_warning("error: program handler doesn't match object\n");
2763 return NULL;
2764 }
2765
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002766 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002767 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002768 return NULL;
2769 return &obj->programs[idx];
2770}
2771
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002772struct bpf_program *
2773bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2774{
2775 struct bpf_program *prog = prev;
2776
2777 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002778 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002779 } while (prog && bpf_program__is_function_storage(prog, obj));
2780
2781 return prog;
2782}
2783
2784struct bpf_program *
2785bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2786{
2787 struct bpf_program *prog = next;
2788
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08002789 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08002790 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07002791 } while (prog && bpf_program__is_function_storage(prog, obj));
2792
2793 return prog;
2794}
2795
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002796int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2797 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002798{
2799 if (prog->priv && prog->clear_priv)
2800 prog->clear_priv(prog, prog->priv);
2801
2802 prog->priv = priv;
2803 prog->clear_priv = clear_priv;
2804 return 0;
2805}
2806
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002807void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002808{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03002809 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002810}
2811
Jakub Kicinski9aba3612018-06-28 14:41:37 -07002812void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2813{
2814 prog->prog_ifindex = ifindex;
2815}
2816
Namhyung Kim715f8db2015-11-03 20:21:05 +09002817const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002818{
2819 const char *title;
2820
2821 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09002822 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002823 title = strdup(title);
2824 if (!title) {
2825 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00002826 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002827 }
2828 }
2829
2830 return title;
2831}
2832
2833int bpf_program__fd(struct bpf_program *prog)
2834{
Wang Nanb5805632015-11-16 12:10:09 +00002835 return bpf_program__nth_fd(prog, 0);
2836}
2837
2838int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2839 bpf_program_prep_t prep)
2840{
2841 int *instances_fds;
2842
2843 if (nr_instances <= 0 || !prep)
2844 return -EINVAL;
2845
2846 if (prog->instances.nr > 0 || prog->instances.fds) {
2847 pr_warning("Can't set pre-processor after loading\n");
2848 return -EINVAL;
2849 }
2850
2851 instances_fds = malloc(sizeof(int) * nr_instances);
2852 if (!instances_fds) {
2853 pr_warning("alloc memory failed for fds\n");
2854 return -ENOMEM;
2855 }
2856
2857 /* fill all fd with -1 */
2858 memset(instances_fds, -1, sizeof(int) * nr_instances);
2859
2860 prog->instances.nr = nr_instances;
2861 prog->instances.fds = instances_fds;
2862 prog->preprocessor = prep;
2863 return 0;
2864}
2865
2866int bpf_program__nth_fd(struct bpf_program *prog, int n)
2867{
2868 int fd;
2869
Jakub Kicinski1e960042018-07-26 14:32:18 -07002870 if (!prog)
2871 return -EINVAL;
2872
Wang Nanb5805632015-11-16 12:10:09 +00002873 if (n >= prog->instances.nr || n < 0) {
2874 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2875 n, prog->section_name, prog->instances.nr);
2876 return -EINVAL;
2877 }
2878
2879 fd = prog->instances.fds[n];
2880 if (fd < 0) {
2881 pr_warning("%dth instance of program '%s' is invalid\n",
2882 n, prog->section_name);
2883 return -ENOENT;
2884 }
2885
2886 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00002887}
Wang Nan9d759a92015-11-27 08:47:35 +00002888
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07002889void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00002890{
2891 prog->type = type;
2892}
2893
Wang Nan5f44e4c82016-07-13 10:44:01 +00002894static bool bpf_program__is_type(struct bpf_program *prog,
2895 enum bpf_prog_type type)
2896{
2897 return prog ? (prog->type == type) : false;
2898}
2899
Joe Stringered794072017-01-22 17:11:23 -08002900#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2901int bpf_program__set_##NAME(struct bpf_program *prog) \
2902{ \
2903 if (!prog) \
2904 return -EINVAL; \
2905 bpf_program__set_type(prog, TYPE); \
2906 return 0; \
2907} \
2908 \
2909bool bpf_program__is_##NAME(struct bpf_program *prog) \
2910{ \
2911 return bpf_program__is_type(prog, TYPE); \
2912} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00002913
Joe Stringer7803ba72017-01-22 17:11:24 -08002914BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08002915BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08002916BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2917BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08002918BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07002919BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08002920BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2921BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002922
John Fastabend16962b22018-04-23 14:30:38 -07002923void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2924 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002925{
2926 prog->expected_attach_type = type;
2927}
2928
Andrey Ignatov36153532018-10-31 12:57:18 -07002929#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2930 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002931
Andrey Ignatov956b6202018-09-26 15:24:53 -07002932/* Programs that can NOT be attached. */
Andrey Ignatov36153532018-10-31 12:57:18 -07002933#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002934
Andrey Ignatov956b6202018-09-26 15:24:53 -07002935/* Programs that can be attached. */
2936#define BPF_APROG_SEC(string, ptype, atype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002937 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07002938
Andrey Ignatov956b6202018-09-26 15:24:53 -07002939/* Programs that must specify expected attach type at load time. */
2940#define BPF_EAPROG_SEC(string, ptype, eatype) \
Andrey Ignatov36153532018-10-31 12:57:18 -07002941 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
Andrey Ignatov956b6202018-09-26 15:24:53 -07002942
2943/* Programs that can be attached but attach type can't be identified by section
2944 * name. Kept for backward compatibility.
2945 */
2946#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002947
Roman Gushchin583c9002017-12-13 15:18:51 +00002948static const struct {
2949 const char *sec;
2950 size_t len;
2951 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002952 enum bpf_attach_type expected_attach_type;
Andrey Ignatov36153532018-10-31 12:57:18 -07002953 int is_attachable;
Andrey Ignatov956b6202018-09-26 15:24:53 -07002954 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002955} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07002956 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2957 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2958 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2959 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2960 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2961 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2962 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2963 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2964 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2965 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2966 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2967 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2968 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07002969 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2970 BPF_CGROUP_INET_INGRESS),
2971 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2972 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002973 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2974 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2975 BPF_CGROUP_INET_SOCK_CREATE),
2976 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2977 BPF_CGROUP_INET4_POST_BIND),
2978 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2979 BPF_CGROUP_INET6_POST_BIND),
2980 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2981 BPF_CGROUP_DEVICE),
2982 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2983 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07002984 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2985 BPF_SK_SKB_STREAM_PARSER),
2986 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2987 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07002988 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2989 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2990 BPF_SK_MSG_VERDICT),
2991 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2992 BPF_LIRC_MODE2),
2993 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2994 BPF_FLOW_DISSECTOR),
2995 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2996 BPF_CGROUP_INET4_BIND),
2997 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2998 BPF_CGROUP_INET6_BIND),
2999 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3000 BPF_CGROUP_INET4_CONNECT),
3001 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3002 BPF_CGROUP_INET6_CONNECT),
3003 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3004 BPF_CGROUP_UDP4_SENDMSG),
3005 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3006 BPF_CGROUP_UDP6_SENDMSG),
Roman Gushchin583c9002017-12-13 15:18:51 +00003007};
Roman Gushchin583c9002017-12-13 15:18:51 +00003008
Andrey Ignatov956b6202018-09-26 15:24:53 -07003009#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003010#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07003011#undef BPF_APROG_SEC
3012#undef BPF_EAPROG_SEC
3013#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003014
Taeung Songc76e4c22019-01-21 22:06:38 +09003015#define MAX_TYPE_NAME_SIZE 32
3016
3017static char *libbpf_get_type_names(bool attach_type)
3018{
3019 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
3020 char *buf;
3021
3022 buf = malloc(len);
3023 if (!buf)
3024 return NULL;
3025
3026 buf[0] = '\0';
3027 /* Forge string buf with all available names */
3028 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
3029 if (attach_type && !section_names[i].is_attachable)
3030 continue;
3031
3032 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
3033 free(buf);
3034 return NULL;
3035 }
3036 strcat(buf, " ");
3037 strcat(buf, section_names[i].sec);
3038 }
3039
3040 return buf;
3041}
3042
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07003043int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
3044 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00003045{
Taeung Songc76e4c22019-01-21 22:06:38 +09003046 char *type_names;
Roman Gushchin583c9002017-12-13 15:18:51 +00003047 int i;
3048
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07003049 if (!name)
3050 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00003051
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07003052 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
3053 if (strncmp(name, section_names[i].sec, section_names[i].len))
3054 continue;
3055 *prog_type = section_names[i].prog_type;
3056 *expected_attach_type = section_names[i].expected_attach_type;
3057 return 0;
3058 }
Taeung Songc76e4c22019-01-21 22:06:38 +09003059 pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
3060 type_names = libbpf_get_type_names(false);
3061 if (type_names != NULL) {
3062 pr_info("supported section(type) names are:%s\n", type_names);
3063 free(type_names);
3064 }
3065
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07003066 return -EINVAL;
3067}
Roman Gushchin583c9002017-12-13 15:18:51 +00003068
Andrey Ignatov956b6202018-09-26 15:24:53 -07003069int libbpf_attach_type_by_name(const char *name,
3070 enum bpf_attach_type *attach_type)
3071{
Taeung Songc76e4c22019-01-21 22:06:38 +09003072 char *type_names;
Andrey Ignatov956b6202018-09-26 15:24:53 -07003073 int i;
3074
3075 if (!name)
3076 return -EINVAL;
3077
3078 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
3079 if (strncmp(name, section_names[i].sec, section_names[i].len))
3080 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07003081 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07003082 return -EINVAL;
3083 *attach_type = section_names[i].attach_type;
3084 return 0;
3085 }
Taeung Songc76e4c22019-01-21 22:06:38 +09003086 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
3087 type_names = libbpf_get_type_names(true);
3088 if (type_names != NULL) {
3089 pr_info("attachable section(type) names are:%s\n", type_names);
3090 free(type_names);
3091 }
3092
Andrey Ignatov956b6202018-09-26 15:24:53 -07003093 return -EINVAL;
3094}
3095
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07003096static int
3097bpf_program__identify_section(struct bpf_program *prog,
3098 enum bpf_prog_type *prog_type,
3099 enum bpf_attach_type *expected_attach_type)
3100{
3101 return libbpf_prog_type_by_name(prog->section_name, prog_type,
3102 expected_attach_type);
Roman Gushchin583c9002017-12-13 15:18:51 +00003103}
3104
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03003105int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00003106{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03003107 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00003108}
3109
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03003110const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00003111{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03003112 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00003113}
3114
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03003115const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00003116{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03003117 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00003118}
3119
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07003120__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003121{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07003122 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003123}
3124
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07003125__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003126{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07003127 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07003128}
3129
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03003130int bpf_map__set_priv(struct bpf_map *map, void *priv,
3131 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00003132{
3133 if (!map)
3134 return -EINVAL;
3135
3136 if (map->priv) {
3137 if (map->clear_priv)
3138 map->clear_priv(map, map->priv);
3139 }
3140
3141 map->priv = priv;
3142 map->clear_priv = clear_priv;
3143 return 0;
3144}
3145
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03003146void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00003147{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03003148 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00003149}
3150
Jakub Kicinskif83fb222018-07-10 14:43:01 -07003151bool bpf_map__is_offload_neutral(struct bpf_map *map)
3152{
3153 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
3154}
3155
Daniel Borkmannd8599002019-04-09 23:20:13 +02003156bool bpf_map__is_internal(struct bpf_map *map)
3157{
3158 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
3159}
3160
Jakub Kicinski9aba3612018-06-28 14:41:37 -07003161void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
3162{
3163 map->map_ifindex = ifindex;
3164}
3165
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08003166int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
3167{
3168 if (!bpf_map_type__is_map_in_map(map->def.type)) {
3169 pr_warning("error: unsupported map type\n");
3170 return -EINVAL;
3171 }
3172 if (map->inner_map_fd != -1) {
3173 pr_warning("error: inner_map_fd already specified\n");
3174 return -EINVAL;
3175 }
3176 map->inner_map_fd = fd;
3177 return 0;
3178}
3179
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003180static struct bpf_map *
3181__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00003182{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003183 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00003184 struct bpf_map *s, *e;
3185
3186 if (!obj || !obj->maps)
3187 return NULL;
3188
3189 s = obj->maps;
3190 e = obj->maps + obj->nr_maps;
3191
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003192 if ((m < s) || (m >= e)) {
Wang Nan9d759a92015-11-27 08:47:35 +00003193 pr_warning("error in %s: map handler doesn't belong to object\n",
3194 __func__);
3195 return NULL;
3196 }
3197
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003198 idx = (m - obj->maps) + i;
3199 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00003200 return NULL;
3201 return &obj->maps[idx];
3202}
Wang Nan561bbcc2015-11-27 08:47:36 +00003203
3204struct bpf_map *
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08003205bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
3206{
3207 if (prev == NULL)
3208 return obj->maps;
3209
3210 return __bpf_map__iter(prev, obj, 1);
3211}
3212
3213struct bpf_map *
3214bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
3215{
3216 if (next == NULL) {
3217 if (!obj->nr_maps)
3218 return NULL;
3219 return obj->maps + obj->nr_maps - 1;
3220 }
3221
3222 return __bpf_map__iter(next, obj, -1);
3223}
3224
3225struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03003226bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00003227{
3228 struct bpf_map *pos;
3229
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08003230 bpf_object__for_each_map(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00003231 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00003232 return pos;
3233 }
3234 return NULL;
3235}
Wang Nan5a6acad2016-11-26 07:03:27 +00003236
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01003237int
3238bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
3239{
3240 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
3241}
3242
Wang Nan5a6acad2016-11-26 07:03:27 +00003243struct bpf_map *
3244bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
3245{
3246 int i;
3247
3248 for (i = 0; i < obj->nr_maps; i++) {
3249 if (obj->maps[i].offset == offset)
3250 return &obj->maps[i];
3251 }
3252 return ERR_PTR(-ENOENT);
3253}
Joe Stringere28ff1a2017-01-22 17:11:25 -08003254
3255long libbpf_get_error(const void *ptr)
3256{
3257 if (IS_ERR(ptr))
3258 return PTR_ERR(ptr);
3259 return 0;
3260}
John Fastabend6f6d33f2017-08-15 22:34:22 -07003261
3262int bpf_prog_load(const char *file, enum bpf_prog_type type,
3263 struct bpf_object **pobj, int *prog_fd)
3264{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003265 struct bpf_prog_load_attr attr;
3266
3267 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
3268 attr.file = file;
3269 attr.prog_type = type;
3270 attr.expected_attach_type = 0;
3271
3272 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
3273}
3274
3275int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
3276 struct bpf_object **pobj, int *prog_fd)
3277{
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003278 struct bpf_object_open_attr open_attr = {
3279 .file = attr->file,
3280 .prog_type = attr->prog_type,
3281 };
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003282 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003283 enum bpf_attach_type expected_attach_type;
3284 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07003285 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07003286 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07003287 int err;
3288
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003289 if (!attr)
3290 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07003291 if (!attr->file)
3292 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003293
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003294 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07003295 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07003296 return -ENOENT;
3297
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003298 bpf_object__for_each_program(prog, obj) {
3299 /*
3300 * If type is not specified, try to guess it based on
3301 * section name.
3302 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003303 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07003304 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003305 expected_attach_type = attr->expected_attach_type;
3306 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07003307 err = bpf_program__identify_section(prog, &prog_type,
3308 &expected_attach_type);
3309 if (err < 0) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003310 bpf_object__close(obj);
3311 return -EINVAL;
3312 }
3313 }
3314
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003315 bpf_program__set_type(prog, prog_type);
3316 bpf_program__set_expected_attach_type(prog,
3317 expected_attach_type);
3318
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003319 prog->log_level = attr->log_level;
Taeung Song69495d22018-09-03 08:30:07 +09003320 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003321 first_prog = prog;
3322 }
3323
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08003324 bpf_object__for_each_map(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07003325 if (!bpf_map__is_offload_neutral(map))
3326 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07003327 }
3328
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003329 if (!first_prog) {
3330 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07003331 bpf_object__close(obj);
3332 return -ENOENT;
3333 }
3334
John Fastabend6f6d33f2017-08-15 22:34:22 -07003335 err = bpf_object__load(obj);
3336 if (err) {
3337 bpf_object__close(obj);
3338 return -EINVAL;
3339 }
3340
3341 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003342 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07003343 return 0;
3344}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003345
3346enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003347bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
3348 void **copy_mem, size_t *copy_size,
3349 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003350{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003351 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02003352 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003353 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003354 void *base = ((__u8 *)header) + page_size;
3355 int ret = LIBBPF_PERF_EVENT_CONT;
3356 struct perf_event_header *ehdr;
3357 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003358
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003359 while (data_head != data_tail) {
3360 ehdr = base + (data_tail & (mmap_size - 1));
3361 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003362
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003363 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
3364 void *copy_start = ehdr;
3365 size_t len_first = base + mmap_size - copy_start;
3366 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003367
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003368 if (*copy_size < ehdr_size) {
3369 free(*copy_mem);
3370 *copy_mem = malloc(ehdr_size);
3371 if (!*copy_mem) {
3372 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003373 ret = LIBBPF_PERF_EVENT_ERROR;
3374 break;
3375 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003376 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003377 }
3378
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003379 memcpy(*copy_mem, copy_start, len_first);
3380 memcpy(*copy_mem + len_first, base, len_secnd);
3381 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003382 }
3383
Daniel Borkmann3dca2112018-10-21 02:09:28 +02003384 ret = fn(ehdr, private_data);
3385 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003386 if (ret != LIBBPF_PERF_EVENT_CONT)
3387 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003388 }
3389
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02003390 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07003391 return ret;
3392}
Song Liu34be16462019-03-11 22:30:38 -07003393
3394struct bpf_prog_info_array_desc {
3395 int array_offset; /* e.g. offset of jited_prog_insns */
3396 int count_offset; /* e.g. offset of jited_prog_len */
3397 int size_offset; /* > 0: offset of rec size,
3398 * < 0: fix size of -size_offset
3399 */
3400};
3401
3402static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
3403 [BPF_PROG_INFO_JITED_INSNS] = {
3404 offsetof(struct bpf_prog_info, jited_prog_insns),
3405 offsetof(struct bpf_prog_info, jited_prog_len),
3406 -1,
3407 },
3408 [BPF_PROG_INFO_XLATED_INSNS] = {
3409 offsetof(struct bpf_prog_info, xlated_prog_insns),
3410 offsetof(struct bpf_prog_info, xlated_prog_len),
3411 -1,
3412 },
3413 [BPF_PROG_INFO_MAP_IDS] = {
3414 offsetof(struct bpf_prog_info, map_ids),
3415 offsetof(struct bpf_prog_info, nr_map_ids),
3416 -(int)sizeof(__u32),
3417 },
3418 [BPF_PROG_INFO_JITED_KSYMS] = {
3419 offsetof(struct bpf_prog_info, jited_ksyms),
3420 offsetof(struct bpf_prog_info, nr_jited_ksyms),
3421 -(int)sizeof(__u64),
3422 },
3423 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
3424 offsetof(struct bpf_prog_info, jited_func_lens),
3425 offsetof(struct bpf_prog_info, nr_jited_func_lens),
3426 -(int)sizeof(__u32),
3427 },
3428 [BPF_PROG_INFO_FUNC_INFO] = {
3429 offsetof(struct bpf_prog_info, func_info),
3430 offsetof(struct bpf_prog_info, nr_func_info),
3431 offsetof(struct bpf_prog_info, func_info_rec_size),
3432 },
3433 [BPF_PROG_INFO_LINE_INFO] = {
3434 offsetof(struct bpf_prog_info, line_info),
3435 offsetof(struct bpf_prog_info, nr_line_info),
3436 offsetof(struct bpf_prog_info, line_info_rec_size),
3437 },
3438 [BPF_PROG_INFO_JITED_LINE_INFO] = {
3439 offsetof(struct bpf_prog_info, jited_line_info),
3440 offsetof(struct bpf_prog_info, nr_jited_line_info),
3441 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
3442 },
3443 [BPF_PROG_INFO_PROG_TAGS] = {
3444 offsetof(struct bpf_prog_info, prog_tags),
3445 offsetof(struct bpf_prog_info, nr_prog_tags),
3446 -(int)sizeof(__u8) * BPF_TAG_SIZE,
3447 },
3448
3449};
3450
3451static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
3452{
3453 __u32 *array = (__u32 *)info;
3454
3455 if (offset >= 0)
3456 return array[offset / sizeof(__u32)];
3457 return -(int)offset;
3458}
3459
3460static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
3461{
3462 __u64 *array = (__u64 *)info;
3463
3464 if (offset >= 0)
3465 return array[offset / sizeof(__u64)];
3466 return -(int)offset;
3467}
3468
3469static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
3470 __u32 val)
3471{
3472 __u32 *array = (__u32 *)info;
3473
3474 if (offset >= 0)
3475 array[offset / sizeof(__u32)] = val;
3476}
3477
3478static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
3479 __u64 val)
3480{
3481 __u64 *array = (__u64 *)info;
3482
3483 if (offset >= 0)
3484 array[offset / sizeof(__u64)] = val;
3485}
3486
3487struct bpf_prog_info_linear *
3488bpf_program__get_prog_info_linear(int fd, __u64 arrays)
3489{
3490 struct bpf_prog_info_linear *info_linear;
3491 struct bpf_prog_info info = {};
3492 __u32 info_len = sizeof(info);
3493 __u32 data_len = 0;
3494 int i, err;
3495 void *ptr;
3496
3497 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
3498 return ERR_PTR(-EINVAL);
3499
3500 /* step 1: get array dimensions */
3501 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
3502 if (err) {
3503 pr_debug("can't get prog info: %s", strerror(errno));
3504 return ERR_PTR(-EFAULT);
3505 }
3506
3507 /* step 2: calculate total size of all arrays */
3508 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3509 bool include_array = (arrays & (1UL << i)) > 0;
3510 struct bpf_prog_info_array_desc *desc;
3511 __u32 count, size;
3512
3513 desc = bpf_prog_info_array_desc + i;
3514
3515 /* kernel is too old to support this field */
3516 if (info_len < desc->array_offset + sizeof(__u32) ||
3517 info_len < desc->count_offset + sizeof(__u32) ||
3518 (desc->size_offset > 0 && info_len < desc->size_offset))
3519 include_array = false;
3520
3521 if (!include_array) {
3522 arrays &= ~(1UL << i); /* clear the bit */
3523 continue;
3524 }
3525
3526 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3527 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3528
3529 data_len += count * size;
3530 }
3531
3532 /* step 3: allocate continuous memory */
3533 data_len = roundup(data_len, sizeof(__u64));
3534 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
3535 if (!info_linear)
3536 return ERR_PTR(-ENOMEM);
3537
3538 /* step 4: fill data to info_linear->info */
3539 info_linear->arrays = arrays;
3540 memset(&info_linear->info, 0, sizeof(info));
3541 ptr = info_linear->data;
3542
3543 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3544 struct bpf_prog_info_array_desc *desc;
3545 __u32 count, size;
3546
3547 if ((arrays & (1UL << i)) == 0)
3548 continue;
3549
3550 desc = bpf_prog_info_array_desc + i;
3551 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3552 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3553 bpf_prog_info_set_offset_u32(&info_linear->info,
3554 desc->count_offset, count);
3555 bpf_prog_info_set_offset_u32(&info_linear->info,
3556 desc->size_offset, size);
3557 bpf_prog_info_set_offset_u64(&info_linear->info,
3558 desc->array_offset,
3559 ptr_to_u64(ptr));
3560 ptr += count * size;
3561 }
3562
3563 /* step 5: call syscall again to get required arrays */
3564 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
3565 if (err) {
3566 pr_debug("can't get prog info: %s", strerror(errno));
3567 free(info_linear);
3568 return ERR_PTR(-EFAULT);
3569 }
3570
3571 /* step 6: verify the data */
3572 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3573 struct bpf_prog_info_array_desc *desc;
3574 __u32 v1, v2;
3575
3576 if ((arrays & (1UL << i)) == 0)
3577 continue;
3578
3579 desc = bpf_prog_info_array_desc + i;
3580 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3581 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3582 desc->count_offset);
3583 if (v1 != v2)
3584 pr_warning("%s: mismatch in element count\n", __func__);
3585
3586 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3587 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3588 desc->size_offset);
3589 if (v1 != v2)
3590 pr_warning("%s: mismatch in rec size\n", __func__);
3591 }
3592
3593 /* step 7: update info_len and data_len */
3594 info_linear->info_len = sizeof(struct bpf_prog_info);
3595 info_linear->data_len = data_len;
3596
3597 return info_linear;
3598}
3599
3600void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
3601{
3602 int i;
3603
3604 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3605 struct bpf_prog_info_array_desc *desc;
3606 __u64 addr, offs;
3607
3608 if ((info_linear->arrays & (1UL << i)) == 0)
3609 continue;
3610
3611 desc = bpf_prog_info_array_desc + i;
3612 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
3613 desc->array_offset);
3614 offs = addr - ptr_to_u64(info_linear->data);
3615 bpf_prog_info_set_offset_u64(&info_linear->info,
3616 desc->array_offset, offs);
3617 }
3618}
3619
3620void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
3621{
3622 int i;
3623
3624 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3625 struct bpf_prog_info_array_desc *desc;
3626 __u64 addr, offs;
3627
3628 if ((info_linear->arrays & (1UL << i)) == 0)
3629 continue;
3630
3631 desc = bpf_prog_info_array_desc + i;
3632 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
3633 desc->array_offset);
3634 addr = offs + ptr_to_u64(info_linear->data);
3635 bpf_prog_info_set_offset_u64(&info_linear->info,
3636 desc->array_offset, addr);
3637 }
3638}