blob: c9f90bfd2f0d8d822709187ef9cb70d2f0248974 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Daniel Borkmannd8599002019-04-09 23:20:13 +020010 * Copyright (C) 2019 Isovalent, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000011 */
12
Yonghong Songb4269952018-11-29 15:31:45 -080013#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070014#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080015#endif
Wang Nan1b76c132015-07-01 02:13:51 +000016#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000017#include <stdio.h>
18#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080019#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000020#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000021#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <unistd.h>
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -030023#include <endian.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000024#include <fcntl.h>
25#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000026#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080027#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000028#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000029#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070030#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080031#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000032#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080033#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070034#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020035#include <linux/ring_buffer.h>
Andrii Nakryiko5e61f272019-10-04 15:40:34 -070036#include <linux/version.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070037#include <sys/epoll.h>
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -070038#include <sys/ioctl.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070039#include <sys/mman.h>
Joe Stringerf3675402017-01-26 13:19:56 -080040#include <sys/stat.h>
41#include <sys/types.h>
42#include <sys/vfs.h>
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070043#include <sys/utsname.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070044#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000045#include <libelf.h>
46#include <gelf.h>
Andrii Nakryiko166750b2019-12-13 17:47:08 -080047#include <zlib.h>
Wang Nan1b76c132015-07-01 02:13:51 +000048
49#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000050#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070051#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030052#include "str_error.h"
Andrii Nakryikod7c4b392019-05-10 14:13:15 -070053#include "libbpf_internal.h"
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070054#include "hashmap.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000055
Wang Nan9b161372016-07-18 06:01:08 +000056#ifndef EM_BPF
57#define EM_BPF 247
58#endif
59
Joe Stringerf3675402017-01-26 13:19:56 -080060#ifndef BPF_FS_MAGIC
61#define BPF_FS_MAGIC 0xcafe4a11
62#endif
63
Andrey Ignatovff466b52019-04-06 22:37:34 -070064/* vsprintf() in __base_pr() uses nonliteral format string. It may break
65 * compilation if user enables corresponding warning. Disable it explicitly.
66 */
67#pragma GCC diagnostic ignored "-Wformat-nonliteral"
68
Wang Nanb3f59d62015-07-01 02:13:52 +000069#define __printf(a, b) __attribute__((format(printf, a, b)))
70
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080071static int __base_pr(enum libbpf_print_level level, const char *format,
72 va_list args)
Wang Nanb3f59d62015-07-01 02:13:52 +000073{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080074 if (level == LIBBPF_DEBUG)
75 return 0;
76
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080077 return vfprintf(stderr, format, args);
Wang Nanb3f59d62015-07-01 02:13:52 +000078}
79
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080080static libbpf_print_fn_t __libbpf_pr = __base_pr;
Wang Nanb3f59d62015-07-01 02:13:52 +000081
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070082libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
Wang Nanb3f59d62015-07-01 02:13:52 +000083{
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070084 libbpf_print_fn_t old_print_fn = __libbpf_pr;
85
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080086 __libbpf_pr = fn;
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070087 return old_print_fn;
Wang Nanb3f59d62015-07-01 02:13:52 +000088}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000089
Yonghong Song8461ef82019-02-01 16:14:14 -080090__printf(2, 3)
91void libbpf_print(enum libbpf_print_level level, const char *format, ...)
92{
93 va_list args;
94
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080095 if (!__libbpf_pr)
96 return;
97
Yonghong Song8461ef82019-02-01 16:14:14 -080098 va_start(args, format);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080099 __libbpf_pr(level, format, args);
Yonghong Song8461ef82019-02-01 16:14:14 -0800100 va_end(args);
101}
102
Wang Nan6371ca3b2015-11-06 13:49:37 +0000103#define STRERR_BUFSIZE 128
104
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000105/* Copied from tools/perf/util/util.h */
106#ifndef zfree
107# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
108#endif
109
110#ifndef zclose
111# define zclose(fd) ({ \
112 int ___err = 0; \
113 if ((fd) >= 0) \
114 ___err = close((fd)); \
115 fd = -1; \
116 ___err; })
117#endif
118
119#ifdef HAVE_LIBELF_MMAP_SUPPORT
120# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
121#else
122# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
123#endif
124
Song Liu34be16462019-03-11 22:30:38 -0700125static inline __u64 ptr_to_u64(const void *ptr)
126{
127 return (__u64) (unsigned long) ptr;
128}
129
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800130struct bpf_capabilities {
131 /* v4.14: kernel support for program & map names. */
132 __u32 name:1;
Daniel Borkmann8837fe52019-04-24 00:45:56 +0200133 /* v5.2: kernel support for global data sections. */
134 __u32 global_data:1;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -0700135 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
136 __u32 btf_func:1;
137 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
138 __u32 btf_datasec:1;
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -0800139 /* BPF_F_MMAPABLE is supported for arrays */
140 __u32 array_mmap:1;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800141};
142
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800143enum reloc_type {
144 RELO_LD64,
145 RELO_CALL,
146 RELO_DATA,
147 RELO_EXTERN,
148};
149
150struct reloc_desc {
151 enum reloc_type type;
152 int insn_idx;
153 int map_idx;
154 int sym_off;
155};
156
Wang Nana5b8bd42015-07-01 02:14:00 +0000157/*
158 * bpf_prog should be a better name but it has been used in
159 * linux/filter.h.
160 */
161struct bpf_program {
162 /* Index in elf obj file, for relocation use. */
163 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700164 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700165 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000166 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800167 /* section_name with / replaced by _; makes recursive pinning
168 * in bpf_object__pin_programs easier
169 */
170 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000171 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800172 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000173 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000174
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800175 struct reloc_desc *reloc_desc;
Wang Nan34090912015-07-01 02:14:02 +0000176 int nr_reloc;
Alexei Starovoitovda11b412019-04-01 21:27:47 -0700177 int log_level;
Wang Nan55cffde2015-07-01 02:14:07 +0000178
Wang Nanb5805632015-11-16 12:10:09 +0000179 struct {
180 int nr;
181 int *fds;
182 } instances;
183 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000184
185 struct bpf_object *obj;
186 void *priv;
187 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700188
189 enum bpf_attach_type expected_attach_type;
Alexei Starovoitov12a86542019-10-30 15:32:12 -0700190 __u32 attach_btf_id;
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -0800191 __u32 attach_prog_fd;
Yonghong Song2993e052018-11-19 15:29:16 -0800192 void *func_info;
193 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800194 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800195
196 struct bpf_capabilities *caps;
Martin KaFai Lau3d650142018-12-07 16:42:31 -0800197
198 void *line_info;
199 __u32 line_info_rec_size;
200 __u32 line_info_cnt;
Jiong Wang04656192019-05-24 23:25:19 +0100201 __u32 prog_flags;
Wang Nana5b8bd42015-07-01 02:14:00 +0000202};
203
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800204#define DATA_SEC ".data"
205#define BSS_SEC ".bss"
206#define RODATA_SEC ".rodata"
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800207#define EXTERN_SEC ".extern"
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800208
Daniel Borkmannd8599002019-04-09 23:20:13 +0200209enum libbpf_map_type {
210 LIBBPF_MAP_UNSPEC,
211 LIBBPF_MAP_DATA,
212 LIBBPF_MAP_BSS,
213 LIBBPF_MAP_RODATA,
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800214 LIBBPF_MAP_EXTERN,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200215};
216
217static const char * const libbpf_type_to_btf_name[] = {
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800218 [LIBBPF_MAP_DATA] = DATA_SEC,
219 [LIBBPF_MAP_BSS] = BSS_SEC,
220 [LIBBPF_MAP_RODATA] = RODATA_SEC,
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800221 [LIBBPF_MAP_EXTERN] = EXTERN_SEC,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200222};
223
Wang Nan9d759a92015-11-27 08:47:35 +0000224struct bpf_map {
Wang Nan561bbcc2015-11-27 08:47:36 +0000225 char *name;
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -0800226 int fd;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700227 int sec_idx;
228 size_t sec_offset;
David Beckettf0307a72018-05-16 14:02:49 -0700229 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800230 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000231 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700232 __u32 btf_key_type_id;
233 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000234 void *priv;
235 bpf_map_clear_priv_t clear_priv;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200236 enum libbpf_map_type libbpf_type;
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800237 void *mmaped;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +0100238 char *pin_path;
239 bool pinned;
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +0100240 bool reused;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200241};
242
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800243enum extern_type {
244 EXT_UNKNOWN,
245 EXT_CHAR,
246 EXT_BOOL,
247 EXT_INT,
248 EXT_TRISTATE,
249 EXT_CHAR_ARR,
250};
251
252struct extern_desc {
253 const char *name;
254 int sym_idx;
255 int btf_id;
256 enum extern_type type;
257 int sz;
258 int align;
259 int data_off;
260 bool is_signed;
261 bool is_weak;
262 bool is_set;
263};
264
Wang Nan9a208ef2015-07-01 02:14:10 +0000265static LIST_HEAD(bpf_objects_list);
266
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000267struct bpf_object {
Daniel Borkmannd8599002019-04-09 23:20:13 +0200268 char name[BPF_OBJ_NAME_LEN];
Wang Nancb1e5e92015-07-01 02:13:57 +0000269 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700270 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000271
Wang Nana5b8bd42015-07-01 02:14:00 +0000272 struct bpf_program *programs;
273 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000274 struct bpf_map *maps;
275 size_t nr_maps;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700276 size_t maps_cap;
Wang Nan9d759a92015-11-27 08:47:35 +0000277
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800278 char *kconfig_path;
279 struct extern_desc *externs;
280 int nr_extern;
281 int extern_map_idx;
282
Wang Nan52d33522015-07-01 02:14:04 +0000283 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700284 bool has_pseudo_calls;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -0700285 bool relaxed_core_relocs;
Wang Nana5b8bd42015-07-01 02:14:00 +0000286
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000287 /*
288 * Information when doing elf related work. Only valid if fd
289 * is valid.
290 */
291 struct {
292 int fd;
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700293 const void *obj_buf;
Wang Nan6c956392015-07-01 02:13:54 +0000294 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000295 Elf *elf;
296 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000297 Elf_Data *symbols;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200298 Elf_Data *data;
299 Elf_Data *rodata;
300 Elf_Data *bss;
Wang Nan77ba9a52015-12-08 02:25:30 +0000301 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000302 struct {
303 GElf_Shdr shdr;
304 Elf_Data *data;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -0800305 } *reloc_sects;
306 int nr_reloc_sects;
Wang Nan666810e2016-01-25 09:55:49 +0000307 int maps_shndx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700308 int btf_maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800309 int text_shndx;
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800310 int symbols_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200311 int data_shndx;
312 int rodata_shndx;
313 int bss_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000314 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000315 /*
316 * All loaded bpf_object is linked in a list, which is
317 * hidden to caller. bpf_objects__<func> handlers deal with
318 * all objects.
319 */
320 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000321
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700322 struct btf *btf;
Yonghong Song2993e052018-11-19 15:29:16 -0800323 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700324
Wang Nan10931d22016-11-26 07:03:26 +0000325 void *priv;
326 bpf_object_clear_priv_t clear_priv;
327
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800328 struct bpf_capabilities caps;
329
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000330 char path[];
331};
332#define obj_elf_valid(o) ((o)->efile.elf)
333
Joe Stringer29cd77f2018-10-02 13:35:39 -0700334void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000335{
Wang Nanb5805632015-11-16 12:10:09 +0000336 int i;
337
Wang Nan55cffde2015-07-01 02:14:07 +0000338 if (!prog)
339 return;
340
Wang Nanb5805632015-11-16 12:10:09 +0000341 /*
342 * If the object is opened but the program was never loaded,
343 * it is possible that prog->instances.nr == -1.
344 */
345 if (prog->instances.nr > 0) {
346 for (i = 0; i < prog->instances.nr; i++)
347 zclose(prog->instances.fds[i]);
348 } else if (prog->instances.nr != -1) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800349 pr_warn("Internal error: instances.nr is %d\n",
350 prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +0000351 }
352
353 prog->instances.nr = -1;
354 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800355
Yonghong Song2993e052018-11-19 15:29:16 -0800356 zfree(&prog->func_info);
Prashant Bhole07a09d12018-12-17 16:57:50 +0900357 zfree(&prog->line_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000358}
359
Wang Nana5b8bd42015-07-01 02:14:00 +0000360static void bpf_program__exit(struct bpf_program *prog)
361{
362 if (!prog)
363 return;
364
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000365 if (prog->clear_priv)
366 prog->clear_priv(prog, prog->priv);
367
368 prog->priv = NULL;
369 prog->clear_priv = NULL;
370
Wang Nan55cffde2015-07-01 02:14:07 +0000371 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700372 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000373 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800374 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000375 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000376 zfree(&prog->reloc_desc);
377
378 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000379 prog->insns_cnt = 0;
380 prog->idx = -1;
381}
382
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800383static char *__bpf_program__pin_name(struct bpf_program *prog)
384{
385 char *name, *p;
386
387 name = p = strdup(prog->section_name);
388 while ((p = strchr(p, '/')))
389 *p = '_';
390
391 return name;
392}
393
Wang Nana5b8bd42015-07-01 02:14:00 +0000394static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700395bpf_program__init(void *data, size_t size, char *section_name, int idx,
396 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000397{
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700398 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
399
400 if (size == 0 || size % bpf_insn_sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800401 pr_warn("corrupted section '%s', size: %zu\n",
402 section_name, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000403 return -EINVAL;
404 }
405
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -0800406 memset(prog, 0, sizeof(*prog));
Wang Nana5b8bd42015-07-01 02:14:00 +0000407
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700408 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000409 if (!prog->section_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800410 pr_warn("failed to alloc name for prog under section(%d) %s\n",
411 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000412 goto errout;
413 }
414
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800415 prog->pin_name = __bpf_program__pin_name(prog);
416 if (!prog->pin_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800417 pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
418 idx, section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800419 goto errout;
420 }
421
Wang Nana5b8bd42015-07-01 02:14:00 +0000422 prog->insns = malloc(size);
423 if (!prog->insns) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800424 pr_warn("failed to alloc insns for prog under section %s\n",
425 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000426 goto errout;
427 }
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700428 prog->insns_cnt = size / bpf_insn_sz;
429 memcpy(prog->insns, data, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000430 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000431 prog->instances.fds = NULL;
432 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800433 prog->type = BPF_PROG_TYPE_UNSPEC;
Wang Nana5b8bd42015-07-01 02:14:00 +0000434
435 return 0;
436errout:
437 bpf_program__exit(prog);
438 return -ENOMEM;
439}
440
441static int
442bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700443 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000444{
445 struct bpf_program prog, *progs;
446 int nr_progs, err;
447
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700448 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000449 if (err)
450 return err;
451
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800452 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000453 progs = obj->programs;
454 nr_progs = obj->nr_programs;
455
Jakub Kicinski531b0142018-07-10 14:43:05 -0700456 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000457 if (!progs) {
458 /*
459 * In this case the original obj->programs
460 * is still valid, so don't need special treat for
461 * bpf_close_object().
462 */
Kefeng Wangbe180102019-10-21 13:55:32 +0800463 pr_warn("failed to alloc a new program under section '%s'\n",
464 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000465 bpf_program__exit(&prog);
466 return -ENOMEM;
467 }
468
469 pr_debug("found program %s\n", prog.section_name);
470 obj->programs = progs;
471 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000472 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000473 progs[nr_progs] = prog;
474 return 0;
475}
476
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700477static int
478bpf_object__init_prog_names(struct bpf_object *obj)
479{
480 Elf_Data *symbols = obj->efile.symbols;
481 struct bpf_program *prog;
482 size_t pi, si;
483
484 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800485 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700486
487 prog = &obj->programs[pi];
488
489 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
490 si++) {
491 GElf_Sym sym;
492
493 if (!gelf_getsym(symbols, si, &sym))
494 continue;
495 if (sym.st_shndx != prog->idx)
496 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000497 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
498 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700499
500 name = elf_strptr(obj->efile.elf,
501 obj->efile.strtabidx,
502 sym.st_name);
503 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800504 pr_warn("failed to get sym name string for prog %s\n",
505 prog->section_name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700506 return -LIBBPF_ERRNO__LIBELF;
507 }
508 }
509
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700510 if (!name && prog->idx == obj->efile.text_shndx)
511 name = ".text";
512
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700513 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800514 pr_warn("failed to find sym for prog %s\n",
515 prog->section_name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700516 return -EINVAL;
517 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700518
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700519 prog->name = strdup(name);
520 if (!prog->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800521 pr_warn("failed to allocate memory for prog sym %s\n",
522 name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700523 return -ENOMEM;
524 }
525 }
526
527 return 0;
528}
529
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700530static __u32 get_kernel_version(void)
531{
532 __u32 major, minor, patch;
533 struct utsname info;
534
535 uname(&info);
536 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
537 return 0;
538 return KERNEL_VERSION(major, minor, patch);
539}
540
Wang Nan6c956392015-07-01 02:13:54 +0000541static struct bpf_object *bpf_object__new(const char *path,
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700542 const void *obj_buf,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700543 size_t obj_buf_sz,
544 const char *obj_name)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000545{
546 struct bpf_object *obj;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200547 char *end;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000548
549 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
550 if (!obj) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800551 pr_warn("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000552 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000553 }
554
555 strcpy(obj->path, path);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700556 if (obj_name) {
557 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
558 obj->name[sizeof(obj->name) - 1] = 0;
559 } else {
560 /* Using basename() GNU version which doesn't modify arg. */
561 strncpy(obj->name, basename((void *)path),
562 sizeof(obj->name) - 1);
563 end = strchr(obj->name, '.');
564 if (end)
565 *end = 0;
566 }
Wang Nan6c956392015-07-01 02:13:54 +0000567
Daniel Borkmannd8599002019-04-09 23:20:13 +0200568 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000569 /*
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700570 * Caller of this function should also call
Wang Nan6c956392015-07-01 02:13:54 +0000571 * bpf_object__elf_finish() after data collection to return
572 * obj_buf to user. If not, we should duplicate the buffer to
573 * avoid user freeing them before elf finish.
574 */
575 obj->efile.obj_buf = obj_buf;
576 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000577 obj->efile.maps_shndx = -1;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700578 obj->efile.btf_maps_shndx = -1;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200579 obj->efile.data_shndx = -1;
580 obj->efile.rodata_shndx = -1;
581 obj->efile.bss_shndx = -1;
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800582 obj->extern_map_idx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000583
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700584 obj->kern_version = get_kernel_version();
Wang Nan52d33522015-07-01 02:14:04 +0000585 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000586
587 INIT_LIST_HEAD(&obj->list);
588 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000589 return obj;
590}
591
592static void bpf_object__elf_finish(struct bpf_object *obj)
593{
594 if (!obj_elf_valid(obj))
595 return;
596
597 if (obj->efile.elf) {
598 elf_end(obj->efile.elf);
599 obj->efile.elf = NULL;
600 }
Wang Nanbec7d682015-07-01 02:13:59 +0000601 obj->efile.symbols = NULL;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200602 obj->efile.data = NULL;
603 obj->efile.rodata = NULL;
604 obj->efile.bss = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000605
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -0800606 zfree(&obj->efile.reloc_sects);
607 obj->efile.nr_reloc_sects = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000608 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000609 obj->efile.obj_buf = NULL;
610 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000611}
612
613static int bpf_object__elf_init(struct bpf_object *obj)
614{
615 int err = 0;
616 GElf_Ehdr *ep;
617
618 if (obj_elf_valid(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800619 pr_warn("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000620 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000621 }
622
Wang Nan6c956392015-07-01 02:13:54 +0000623 if (obj->efile.obj_buf_sz > 0) {
624 /*
625 * obj_buf should have been validated by
626 * bpf_object__open_buffer().
627 */
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700628 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
Wang Nan6c956392015-07-01 02:13:54 +0000629 obj->efile.obj_buf_sz);
630 } else {
631 obj->efile.fd = open(obj->path, O_RDONLY);
632 if (obj->efile.fd < 0) {
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700633 char errmsg[STRERR_BUFSIZE], *cp;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200634
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700635 err = -errno;
636 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +0800637 pr_warn("failed to open %s: %s\n", obj->path, cp);
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700638 return err;
Wang Nan6c956392015-07-01 02:13:54 +0000639 }
640
641 obj->efile.elf = elf_begin(obj->efile.fd,
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700642 LIBBPF_ELF_C_READ_MMAP, NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000643 }
644
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000645 if (!obj->efile.elf) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800646 pr_warn("failed to open %s as ELF file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000647 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000648 goto errout;
649 }
650
651 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800652 pr_warn("failed to get EHDR from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000653 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000654 goto errout;
655 }
656 ep = &obj->efile.ehdr;
657
Wang Nan9b161372016-07-18 06:01:08 +0000658 /* Old LLVM set e_machine to EM_NONE */
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700659 if (ep->e_type != ET_REL ||
660 (ep->e_machine && ep->e_machine != EM_BPF)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800661 pr_warn("%s is not an eBPF object file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000662 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000663 goto errout;
664 }
665
666 return 0;
667errout:
668 bpf_object__elf_finish(obj);
669 return err;
670}
671
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700672static int bpf_object__check_endianness(struct bpf_object *obj)
Wang Nancc4228d2015-07-01 02:13:55 +0000673{
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -0300674#if __BYTE_ORDER == __LITTLE_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700675 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
676 return 0;
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -0300677#elif __BYTE_ORDER == __BIG_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700678 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
679 return 0;
680#else
681# error "Unrecognized __BYTE_ORDER__"
682#endif
Kefeng Wangbe180102019-10-21 13:55:32 +0800683 pr_warn("endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000684 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000685}
686
Wang Nancb1e5e92015-07-01 02:13:57 +0000687static int
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700688bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
Wang Nancb1e5e92015-07-01 02:13:57 +0000689{
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700690 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
Wang Nancb1e5e92015-07-01 02:13:57 +0000691 pr_debug("license of %s is %s\n", obj->path, obj->license);
692 return 0;
693}
694
John Fastabend54b86252019-10-18 07:41:26 -0700695static int
696bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
697{
698 __u32 kver;
699
700 if (size != sizeof(kver)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800701 pr_warn("invalid kver section in %s\n", obj->path);
John Fastabend54b86252019-10-18 07:41:26 -0700702 return -LIBBPF_ERRNO__FORMAT;
703 }
704 memcpy(&kver, data, sizeof(kver));
705 obj->kern_version = kver;
706 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
707 return 0;
708}
709
Eric Leblond4708bbd2016-11-15 04:05:47 +0000710static int compare_bpf_map(const void *_a, const void *_b)
711{
712 const struct bpf_map *a = _a;
713 const struct bpf_map *b = _b;
714
Andrii Nakryikodb488142019-06-17 12:26:54 -0700715 if (a->sec_idx != b->sec_idx)
716 return a->sec_idx - b->sec_idx;
717 return a->sec_offset - b->sec_offset;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000718}
719
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800720static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
721{
722 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
723 type == BPF_MAP_TYPE_HASH_OF_MAPS)
724 return true;
725 return false;
726}
727
Daniel Borkmann1713d682019-04-09 23:20:14 +0200728static int bpf_object_search_section_size(const struct bpf_object *obj,
729 const char *name, size_t *d_size)
730{
731 const GElf_Ehdr *ep = &obj->efile.ehdr;
732 Elf *elf = obj->efile.elf;
733 Elf_Scn *scn = NULL;
734 int idx = 0;
735
736 while ((scn = elf_nextscn(elf, scn)) != NULL) {
737 const char *sec_name;
738 Elf_Data *data;
739 GElf_Shdr sh;
740
741 idx++;
742 if (gelf_getshdr(scn, &sh) != &sh) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800743 pr_warn("failed to get section(%d) header from %s\n",
744 idx, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200745 return -EIO;
746 }
747
748 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
749 if (!sec_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800750 pr_warn("failed to get section(%d) name from %s\n",
751 idx, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200752 return -EIO;
753 }
754
755 if (strcmp(name, sec_name))
756 continue;
757
758 data = elf_getdata(scn, 0);
759 if (!data) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800760 pr_warn("failed to get section(%d) data from %s(%s)\n",
761 idx, name, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200762 return -EIO;
763 }
764
765 *d_size = data->d_size;
766 return 0;
767 }
768
769 return -ENOENT;
770}
771
772int bpf_object__section_size(const struct bpf_object *obj, const char *name,
773 __u32 *size)
774{
775 int ret = -ENOENT;
776 size_t d_size;
777
778 *size = 0;
779 if (!name) {
780 return -EINVAL;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800781 } else if (!strcmp(name, DATA_SEC)) {
Daniel Borkmann1713d682019-04-09 23:20:14 +0200782 if (obj->efile.data)
783 *size = obj->efile.data->d_size;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800784 } else if (!strcmp(name, BSS_SEC)) {
Daniel Borkmann1713d682019-04-09 23:20:14 +0200785 if (obj->efile.bss)
786 *size = obj->efile.bss->d_size;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -0800787 } else if (!strcmp(name, RODATA_SEC)) {
Daniel Borkmann1713d682019-04-09 23:20:14 +0200788 if (obj->efile.rodata)
789 *size = obj->efile.rodata->d_size;
790 } else {
791 ret = bpf_object_search_section_size(obj, name, &d_size);
792 if (!ret)
793 *size = d_size;
794 }
795
796 return *size ? 0 : ret;
797}
798
799int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
800 __u32 *off)
801{
802 Elf_Data *symbols = obj->efile.symbols;
803 const char *sname;
804 size_t si;
805
806 if (!name || !off)
807 return -EINVAL;
808
809 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
810 GElf_Sym sym;
811
812 if (!gelf_getsym(symbols, si, &sym))
813 continue;
814 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
815 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
816 continue;
817
818 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
819 sym.st_name);
820 if (!sname) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800821 pr_warn("failed to get sym name string for var %s\n",
822 name);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200823 return -EIO;
824 }
825 if (strcmp(name, sname) == 0) {
826 *off = sym.st_value;
827 return 0;
828 }
829 }
830
831 return -ENOENT;
832}
833
Andrii Nakryikobf829272019-06-17 12:26:53 -0700834static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
Daniel Borkmannd8599002019-04-09 23:20:13 +0200835{
Andrii Nakryikobf829272019-06-17 12:26:53 -0700836 struct bpf_map *new_maps;
837 size_t new_cap;
838 int i;
839
840 if (obj->nr_maps < obj->maps_cap)
841 return &obj->maps[obj->nr_maps++];
842
Ivan Khoronzhuk95064972019-06-26 13:38:37 +0300843 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700844 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
845 if (!new_maps) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800846 pr_warn("alloc maps for object failed\n");
Andrii Nakryikobf829272019-06-17 12:26:53 -0700847 return ERR_PTR(-ENOMEM);
848 }
849
850 obj->maps_cap = new_cap;
851 obj->maps = new_maps;
852
853 /* zero out new maps */
854 memset(obj->maps + obj->nr_maps, 0,
855 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
856 /*
857 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
858 * when failure (zclose won't close negative fd)).
859 */
860 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
861 obj->maps[i].fd = -1;
862 obj->maps[i].inner_map_fd = -1;
863 }
864
865 return &obj->maps[obj->nr_maps++];
Daniel Borkmannd8599002019-04-09 23:20:13 +0200866}
867
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800868static size_t bpf_map_mmap_sz(const struct bpf_map *map)
869{
870 long page_sz = sysconf(_SC_PAGE_SIZE);
871 size_t map_sz;
872
873 map_sz = roundup(map->def.value_size, 8) * map->def.max_entries;
874 map_sz = roundup(map_sz, page_sz);
875 return map_sz;
876}
877
Daniel Borkmannd8599002019-04-09 23:20:13 +0200878static int
Andrii Nakryikobf829272019-06-17 12:26:53 -0700879bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800880 int sec_idx, void *data, size_t data_sz)
Daniel Borkmannd8599002019-04-09 23:20:13 +0200881{
Daniel Borkmannd8599002019-04-09 23:20:13 +0200882 char map_name[BPF_OBJ_NAME_LEN];
Andrii Nakryikobf829272019-06-17 12:26:53 -0700883 struct bpf_map_def *def;
884 struct bpf_map *map;
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800885 int err;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700886
887 map = bpf_object__add_map(obj);
888 if (IS_ERR(map))
889 return PTR_ERR(map);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200890
891 map->libbpf_type = type;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700892 map->sec_idx = sec_idx;
893 map->sec_offset = 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200894 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
895 libbpf_type_to_btf_name[type]);
896 map->name = strdup(map_name);
897 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800898 pr_warn("failed to alloc map name\n");
Daniel Borkmannd8599002019-04-09 23:20:13 +0200899 return -ENOMEM;
900 }
901
Andrii Nakryikobf829272019-06-17 12:26:53 -0700902 def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200903 def->type = BPF_MAP_TYPE_ARRAY;
904 def->key_size = sizeof(int);
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800905 def->value_size = data_sz;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200906 def->max_entries = 1;
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800907 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_EXTERN
908 ? BPF_F_RDONLY_PROG : 0;
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -0800909 def->map_flags |= BPF_F_MMAPABLE;
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -0800910
911 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
912 map_name, map->sec_idx, map->sec_offset, def->map_flags);
913
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800914 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
915 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
916 if (map->mmaped == MAP_FAILED) {
917 err = -errno;
918 map->mmaped = NULL;
919 pr_warn("failed to alloc map '%s' content buffer: %d\n",
920 map->name, err);
921 zfree(&map->name);
922 return err;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200923 }
924
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800925 if (data)
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800926 memcpy(map->mmaped, data, data_sz);
927
Andrii Nakryikoe1d1dc42019-04-16 11:47:17 -0700928 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200929 return 0;
930}
931
Andrii Nakryikobf829272019-06-17 12:26:53 -0700932static int bpf_object__init_global_data_maps(struct bpf_object *obj)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000933{
Andrii Nakryikobf829272019-06-17 12:26:53 -0700934 int err;
935
Andrii Nakryikobf829272019-06-17 12:26:53 -0700936 /*
937 * Populate obj->maps with libbpf internal maps.
938 */
939 if (obj->efile.data_shndx >= 0) {
940 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700941 obj->efile.data_shndx,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800942 obj->efile.data->d_buf,
943 obj->efile.data->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700944 if (err)
945 return err;
946 }
947 if (obj->efile.rodata_shndx >= 0) {
948 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700949 obj->efile.rodata_shndx,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800950 obj->efile.rodata->d_buf,
951 obj->efile.rodata->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700952 if (err)
953 return err;
954 }
955 if (obj->efile.bss_shndx >= 0) {
956 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700957 obj->efile.bss_shndx,
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -0800958 NULL,
959 obj->efile.bss->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700960 if (err)
961 return err;
962 }
963 return 0;
964}
965
Andrii Nakryiko166750b2019-12-13 17:47:08 -0800966
967static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
968 const void *name)
969{
970 int i;
971
972 for (i = 0; i < obj->nr_extern; i++) {
973 if (strcmp(obj->externs[i].name, name) == 0)
974 return &obj->externs[i];
975 }
976 return NULL;
977}
978
979static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
980 char value)
981{
982 switch (ext->type) {
983 case EXT_BOOL:
984 if (value == 'm') {
985 pr_warn("extern %s=%c should be tristate or char\n",
986 ext->name, value);
987 return -EINVAL;
988 }
989 *(bool *)ext_val = value == 'y' ? true : false;
990 break;
991 case EXT_TRISTATE:
992 if (value == 'y')
993 *(enum libbpf_tristate *)ext_val = TRI_YES;
994 else if (value == 'm')
995 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
996 else /* value == 'n' */
997 *(enum libbpf_tristate *)ext_val = TRI_NO;
998 break;
999 case EXT_CHAR:
1000 *(char *)ext_val = value;
1001 break;
1002 case EXT_UNKNOWN:
1003 case EXT_INT:
1004 case EXT_CHAR_ARR:
1005 default:
1006 pr_warn("extern %s=%c should be bool, tristate, or char\n",
1007 ext->name, value);
1008 return -EINVAL;
1009 }
1010 ext->is_set = true;
1011 return 0;
1012}
1013
1014static int set_ext_value_str(struct extern_desc *ext, char *ext_val,
1015 const char *value)
1016{
1017 size_t len;
1018
1019 if (ext->type != EXT_CHAR_ARR) {
1020 pr_warn("extern %s=%s should char array\n", ext->name, value);
1021 return -EINVAL;
1022 }
1023
1024 len = strlen(value);
1025 if (value[len - 1] != '"') {
1026 pr_warn("extern '%s': invalid string config '%s'\n",
1027 ext->name, value);
1028 return -EINVAL;
1029 }
1030
1031 /* strip quotes */
1032 len -= 2;
1033 if (len >= ext->sz) {
1034 pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1035 ext->name, value, len, ext->sz - 1);
1036 len = ext->sz - 1;
1037 }
1038 memcpy(ext_val, value + 1, len);
1039 ext_val[len] = '\0';
1040 ext->is_set = true;
1041 return 0;
1042}
1043
1044static int parse_u64(const char *value, __u64 *res)
1045{
1046 char *value_end;
1047 int err;
1048
1049 errno = 0;
1050 *res = strtoull(value, &value_end, 0);
1051 if (errno) {
1052 err = -errno;
1053 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1054 return err;
1055 }
1056 if (*value_end) {
1057 pr_warn("failed to parse '%s' as integer completely\n", value);
1058 return -EINVAL;
1059 }
1060 return 0;
1061}
1062
1063static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
1064{
1065 int bit_sz = ext->sz * 8;
1066
1067 if (ext->sz == 8)
1068 return true;
1069
1070 /* Validate that value stored in u64 fits in integer of `ext->sz`
1071 * bytes size without any loss of information. If the target integer
1072 * is signed, we rely on the following limits of integer type of
1073 * Y bits and subsequent transformation:
1074 *
1075 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1076 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1077 * 0 <= X + 2^(Y-1) < 2^Y
1078 *
1079 * For unsigned target integer, check that all the (64 - Y) bits are
1080 * zero.
1081 */
1082 if (ext->is_signed)
1083 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1084 else
1085 return (v >> bit_sz) == 0;
1086}
1087
1088static int set_ext_value_num(struct extern_desc *ext, void *ext_val,
1089 __u64 value)
1090{
1091 if (ext->type != EXT_INT && ext->type != EXT_CHAR) {
1092 pr_warn("extern %s=%llu should be integer\n",
1093 ext->name, value);
1094 return -EINVAL;
1095 }
1096 if (!is_ext_value_in_range(ext, value)) {
1097 pr_warn("extern %s=%llu value doesn't fit in %d bytes\n",
1098 ext->name, value, ext->sz);
1099 return -ERANGE;
1100 }
1101 switch (ext->sz) {
1102 case 1: *(__u8 *)ext_val = value; break;
1103 case 2: *(__u16 *)ext_val = value; break;
1104 case 4: *(__u32 *)ext_val = value; break;
1105 case 8: *(__u64 *)ext_val = value; break;
1106 default:
1107 return -EINVAL;
1108 }
1109 ext->is_set = true;
1110 return 0;
1111}
1112
1113static int bpf_object__read_kernel_config(struct bpf_object *obj,
1114 const char *config_path,
1115 void *data)
1116{
1117 char buf[PATH_MAX], *sep, *value;
1118 struct extern_desc *ext;
1119 int len, err = 0;
1120 void *ext_val;
1121 __u64 num;
1122 gzFile file;
1123
1124 if (config_path) {
1125 file = gzopen(config_path, "r");
1126 } else {
1127 struct utsname uts;
1128
1129 uname(&uts);
1130 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1131 if (len < 0)
1132 return -EINVAL;
1133 else if (len >= PATH_MAX)
1134 return -ENAMETOOLONG;
1135 /* gzopen also accepts uncompressed files. */
1136 file = gzopen(buf, "r");
1137 if (!file)
1138 file = gzopen("/proc/config.gz", "r");
1139 }
1140 if (!file) {
1141 pr_warn("failed to read kernel config at '%s'\n", config_path);
1142 return -ENOENT;
1143 }
1144
1145 while (gzgets(file, buf, sizeof(buf))) {
1146 if (strncmp(buf, "CONFIG_", 7))
1147 continue;
1148
1149 sep = strchr(buf, '=');
1150 if (!sep) {
1151 err = -EINVAL;
1152 pr_warn("failed to parse '%s': no separator\n", buf);
1153 goto out;
1154 }
1155 /* Trim ending '\n' */
1156 len = strlen(buf);
1157 if (buf[len - 1] == '\n')
1158 buf[len - 1] = '\0';
1159 /* Split on '=' and ensure that a value is present. */
1160 *sep = '\0';
1161 if (!sep[1]) {
1162 err = -EINVAL;
1163 *sep = '=';
1164 pr_warn("failed to parse '%s': no value\n", buf);
1165 goto out;
1166 }
1167
1168 ext = find_extern_by_name(obj, buf);
1169 if (!ext)
1170 continue;
1171 if (ext->is_set) {
1172 err = -EINVAL;
1173 pr_warn("re-defining extern '%s' not allowed\n", buf);
1174 goto out;
1175 }
1176
1177 ext_val = data + ext->data_off;
1178 value = sep + 1;
1179
1180 switch (*value) {
1181 case 'y': case 'n': case 'm':
1182 err = set_ext_value_tri(ext, ext_val, *value);
1183 break;
1184 case '"':
1185 err = set_ext_value_str(ext, ext_val, value);
1186 break;
1187 default:
1188 /* assume integer */
1189 err = parse_u64(value, &num);
1190 if (err) {
1191 pr_warn("extern %s=%s should be integer\n",
1192 ext->name, value);
1193 goto out;
1194 }
1195 err = set_ext_value_num(ext, ext_val, num);
1196 break;
1197 }
1198 if (err)
1199 goto out;
1200 pr_debug("extern %s=%s\n", ext->name, value);
1201 }
1202
1203out:
1204 gzclose(file);
1205 return err;
1206}
1207
1208static int bpf_object__init_extern_map(struct bpf_object *obj)
1209{
1210 struct extern_desc *last_ext;
1211 size_t map_sz;
1212 int err;
1213
1214 if (obj->nr_extern == 0)
1215 return 0;
1216
1217 last_ext = &obj->externs[obj->nr_extern - 1];
1218 map_sz = last_ext->data_off + last_ext->sz;
1219
1220 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_EXTERN,
1221 obj->efile.symbols_shndx,
1222 NULL, map_sz);
1223 if (err)
1224 return err;
1225
1226 obj->extern_map_idx = obj->nr_maps - 1;
1227
1228 return 0;
1229}
1230
Andrii Nakryikobf829272019-06-17 12:26:53 -07001231static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1232{
Eric Leblond4708bbd2016-11-15 04:05:47 +00001233 Elf_Data *symbols = obj->efile.symbols;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001234 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001235 Elf_Data *data = NULL;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001236 Elf_Scn *scn;
1237
1238 if (obj->efile.maps_shndx < 0)
1239 return 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001240
Eric Leblond4708bbd2016-11-15 04:05:47 +00001241 if (!symbols)
1242 return -EINVAL;
1243
Andrii Nakryikobf829272019-06-17 12:26:53 -07001244 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
1245 if (scn)
1246 data = elf_getdata(scn, NULL);
1247 if (!scn || !data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001248 pr_warn("failed to get Elf_Data from map section %d\n",
1249 obj->efile.maps_shndx);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001250 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +00001251 }
1252
Eric Leblond4708bbd2016-11-15 04:05:47 +00001253 /*
1254 * Count number of maps. Each map has a name.
1255 * Array of maps is not supported: only the first element is
1256 * considered.
1257 *
1258 * TODO: Detect array of map and report error.
1259 */
Andrii Nakryikobf829272019-06-17 12:26:53 -07001260 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1261 for (i = 0; i < nr_syms; i++) {
Eric Leblond4708bbd2016-11-15 04:05:47 +00001262 GElf_Sym sym;
1263
1264 if (!gelf_getsym(symbols, i, &sym))
1265 continue;
1266 if (sym.st_shndx != obj->efile.maps_shndx)
1267 continue;
1268 nr_maps++;
1269 }
Craig Gallekb13c5c12017-10-05 10:41:57 -04001270 /* Assume equally sized map definitions */
Andrii Nakryikobf829272019-06-17 12:26:53 -07001271 pr_debug("maps in %s: %d maps in %zd bytes\n",
1272 obj->path, nr_maps, data->d_size);
Daniel Borkmann4f8827d2019-04-24 00:45:57 +02001273
Andrii Nakryiko98e527af2019-11-06 18:08:55 -08001274 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001275 pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
Kefeng Wangbe180102019-10-21 13:55:32 +08001276 obj->path, nr_maps, data->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001277 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001278 }
Andrii Nakryiko98e527af2019-11-06 18:08:55 -08001279 map_def_sz = data->d_size / nr_maps;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001280
Andrii Nakryikobf829272019-06-17 12:26:53 -07001281 /* Fill obj->maps using data in "maps" section. */
1282 for (i = 0; i < nr_syms; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00001283 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +00001284 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001285 struct bpf_map_def *def;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001286 struct bpf_map *map;
Wang Nan561bbcc2015-11-27 08:47:36 +00001287
1288 if (!gelf_getsym(symbols, i, &sym))
1289 continue;
Wang Nan666810e2016-01-25 09:55:49 +00001290 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +00001291 continue;
1292
Andrii Nakryikobf829272019-06-17 12:26:53 -07001293 map = bpf_object__add_map(obj);
1294 if (IS_ERR(map))
1295 return PTR_ERR(map);
1296
1297 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +00001298 sym.st_name);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -07001299 if (!map_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001300 pr_warn("failed to get map #%d name sym string for obj %s\n",
1301 i, obj->path);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -07001302 return -LIBBPF_ERRNO__FORMAT;
1303 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001304
Andrii Nakryikobf829272019-06-17 12:26:53 -07001305 map->libbpf_type = LIBBPF_MAP_UNSPEC;
Andrii Nakryikodb488142019-06-17 12:26:54 -07001306 map->sec_idx = sym.st_shndx;
1307 map->sec_offset = sym.st_value;
1308 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1309 map_name, map->sec_idx, map->sec_offset);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001310 if (sym.st_value + map_def_sz > data->d_size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001311 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1312 obj->path, map_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +00001313 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +00001314 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001315
Andrii Nakryikobf829272019-06-17 12:26:53 -07001316 map->name = strdup(map_name);
1317 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001318 pr_warn("failed to alloc map name\n");
Wang Nan973170e2015-12-08 02:25:29 +00001319 return -ENOMEM;
1320 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001321 pr_debug("map %d is \"%s\"\n", i, map->name);
Eric Leblond4708bbd2016-11-15 04:05:47 +00001322 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001323 /*
1324 * If the definition of the map in the object file fits in
1325 * bpf_map_def, copy it. Any extra fields in our version
1326 * of bpf_map_def will default to zero as a result of the
1327 * calloc above.
1328 */
1329 if (map_def_sz <= sizeof(struct bpf_map_def)) {
Andrii Nakryikobf829272019-06-17 12:26:53 -07001330 memcpy(&map->def, def, map_def_sz);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001331 } else {
1332 /*
1333 * Here the map structure being read is bigger than what
1334 * we expect, truncate if the excess bits are all zero.
1335 * If they are not zero, reject this map as
1336 * incompatible.
1337 */
1338 char *b;
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001339
Craig Gallekb13c5c12017-10-05 10:41:57 -04001340 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1341 b < ((char *)def) + map_def_sz; b++) {
1342 if (*b != 0) {
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001343 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
Kefeng Wangbe180102019-10-21 13:55:32 +08001344 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -07001345 if (strict)
1346 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001347 }
1348 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001349 memcpy(&map->def, def, sizeof(struct bpf_map_def));
Craig Gallekb13c5c12017-10-05 10:41:57 -04001350 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001351 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001352 return 0;
1353}
Eric Leblond4708bbd2016-11-15 04:05:47 +00001354
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001355static const struct btf_type *
1356skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001357{
1358 const struct btf_type *t = btf__type_by_id(btf, id);
1359
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001360 if (res_id)
1361 *res_id = id;
1362
1363 while (btf_is_mod(t) || btf_is_typedef(t)) {
1364 if (res_id)
1365 *res_id = t->type;
1366 t = btf__type_by_id(btf, t->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001367 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001368
1369 return t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001370}
1371
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001372/*
1373 * Fetch integer attribute of BTF map definition. Such attributes are
1374 * represented using a pointer to an array, in which dimensionality of array
1375 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1376 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1377 * type definition, while using only sizeof(void *) space in ELF data section.
1378 */
1379static bool get_map_field_int(const char *map_name, const struct btf *btf,
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001380 const struct btf_type *def,
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001381 const struct btf_member *m, __u32 *res)
1382{
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001383 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001384 const char *name = btf__name_by_offset(btf, m->name_off);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001385 const struct btf_array *arr_info;
1386 const struct btf_type *arr_t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001387
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001388 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001389 pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
1390 map_name, name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001391 return false;
1392 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001393
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001394 arr_t = btf__type_by_id(btf, t->type);
1395 if (!arr_t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001396 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1397 map_name, name, t->type);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001398 return false;
1399 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001400 if (!btf_is_array(arr_t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001401 pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
1402 map_name, name, btf_kind(arr_t));
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001403 return false;
1404 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001405 arr_info = btf_array(arr_t);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001406 *res = arr_info->nelems;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001407 return true;
1408}
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001409
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001410static int build_map_pin_path(struct bpf_map *map, const char *path)
1411{
1412 char buf[PATH_MAX];
1413 int err, len;
1414
1415 if (!path)
1416 path = "/sys/fs/bpf";
1417
1418 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1419 if (len < 0)
1420 return -EINVAL;
1421 else if (len >= PATH_MAX)
1422 return -ENAMETOOLONG;
1423
1424 err = bpf_map__set_pin_path(map, buf);
1425 if (err)
1426 return err;
1427
1428 return 0;
1429}
1430
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001431static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1432 const struct btf_type *sec,
1433 int var_idx, int sec_idx,
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001434 const Elf_Data *data, bool strict,
1435 const char *pin_root_path)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001436{
1437 const struct btf_type *var, *def, *t;
1438 const struct btf_var_secinfo *vi;
1439 const struct btf_var *var_extra;
1440 const struct btf_member *m;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001441 const char *map_name;
1442 struct bpf_map *map;
1443 int vlen, i;
1444
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001445 vi = btf_var_secinfos(sec) + var_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001446 var = btf__type_by_id(obj->btf, vi->type);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001447 var_extra = btf_var(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001448 map_name = btf__name_by_offset(obj->btf, var->name_off);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001449 vlen = btf_vlen(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001450
1451 if (map_name == NULL || map_name[0] == '\0') {
Kefeng Wangbe180102019-10-21 13:55:32 +08001452 pr_warn("map #%d: empty name.\n", var_idx);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001453 return -EINVAL;
1454 }
1455 if ((__u64)vi->offset + vi->size > data->d_size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001456 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001457 return -EINVAL;
1458 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001459 if (!btf_is_var(var)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001460 pr_warn("map '%s': unexpected var kind %u.\n",
1461 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001462 return -EINVAL;
1463 }
1464 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1465 var_extra->linkage != BTF_VAR_STATIC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001466 pr_warn("map '%s': unsupported var linkage %u.\n",
1467 map_name, var_extra->linkage);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001468 return -EOPNOTSUPP;
1469 }
1470
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001471 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001472 if (!btf_is_struct(def)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001473 pr_warn("map '%s': unexpected def kind %u.\n",
1474 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001475 return -EINVAL;
1476 }
1477 if (def->size > vi->size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001478 pr_warn("map '%s': invalid def size.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001479 return -EINVAL;
1480 }
1481
1482 map = bpf_object__add_map(obj);
1483 if (IS_ERR(map))
1484 return PTR_ERR(map);
1485 map->name = strdup(map_name);
1486 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001487 pr_warn("map '%s': failed to alloc map name.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001488 return -ENOMEM;
1489 }
1490 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1491 map->def.type = BPF_MAP_TYPE_UNSPEC;
1492 map->sec_idx = sec_idx;
1493 map->sec_offset = vi->offset;
1494 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1495 map_name, map->sec_idx, map->sec_offset);
1496
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001497 vlen = btf_vlen(def);
1498 m = btf_members(def);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001499 for (i = 0; i < vlen; i++, m++) {
1500 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1501
1502 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001503 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001504 return -EINVAL;
1505 }
1506 if (strcmp(name, "type") == 0) {
1507 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001508 &map->def.type))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001509 return -EINVAL;
1510 pr_debug("map '%s': found type = %u.\n",
1511 map_name, map->def.type);
1512 } else if (strcmp(name, "max_entries") == 0) {
1513 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001514 &map->def.max_entries))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001515 return -EINVAL;
1516 pr_debug("map '%s': found max_entries = %u.\n",
1517 map_name, map->def.max_entries);
1518 } else if (strcmp(name, "map_flags") == 0) {
1519 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001520 &map->def.map_flags))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001521 return -EINVAL;
1522 pr_debug("map '%s': found map_flags = %u.\n",
1523 map_name, map->def.map_flags);
1524 } else if (strcmp(name, "key_size") == 0) {
1525 __u32 sz;
1526
1527 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001528 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001529 return -EINVAL;
1530 pr_debug("map '%s': found key_size = %u.\n",
1531 map_name, sz);
1532 if (map->def.key_size && map->def.key_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001533 pr_warn("map '%s': conflicting key size %u != %u.\n",
1534 map_name, map->def.key_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001535 return -EINVAL;
1536 }
1537 map->def.key_size = sz;
1538 } else if (strcmp(name, "key") == 0) {
1539 __s64 sz;
1540
1541 t = btf__type_by_id(obj->btf, m->type);
1542 if (!t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001543 pr_warn("map '%s': key type [%d] not found.\n",
1544 map_name, m->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001545 return -EINVAL;
1546 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001547 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001548 pr_warn("map '%s': key spec is not PTR: %u.\n",
1549 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001550 return -EINVAL;
1551 }
1552 sz = btf__resolve_size(obj->btf, t->type);
1553 if (sz < 0) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08001554 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
1555 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001556 return sz;
1557 }
Andrii Nakryiko679152d2019-12-12 09:19:18 -08001558 pr_debug("map '%s': found key [%u], sz = %zd.\n",
1559 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001560 if (map->def.key_size && map->def.key_size != sz) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08001561 pr_warn("map '%s': conflicting key size %u != %zd.\n",
1562 map_name, map->def.key_size, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001563 return -EINVAL;
1564 }
1565 map->def.key_size = sz;
1566 map->btf_key_type_id = t->type;
1567 } else if (strcmp(name, "value_size") == 0) {
1568 __u32 sz;
1569
1570 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001571 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001572 return -EINVAL;
1573 pr_debug("map '%s': found value_size = %u.\n",
1574 map_name, sz);
1575 if (map->def.value_size && map->def.value_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001576 pr_warn("map '%s': conflicting value size %u != %u.\n",
1577 map_name, map->def.value_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001578 return -EINVAL;
1579 }
1580 map->def.value_size = sz;
1581 } else if (strcmp(name, "value") == 0) {
1582 __s64 sz;
1583
1584 t = btf__type_by_id(obj->btf, m->type);
1585 if (!t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001586 pr_warn("map '%s': value type [%d] not found.\n",
1587 map_name, m->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001588 return -EINVAL;
1589 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001590 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001591 pr_warn("map '%s': value spec is not PTR: %u.\n",
1592 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001593 return -EINVAL;
1594 }
1595 sz = btf__resolve_size(obj->btf, t->type);
1596 if (sz < 0) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08001597 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
1598 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001599 return sz;
1600 }
Andrii Nakryiko679152d2019-12-12 09:19:18 -08001601 pr_debug("map '%s': found value [%u], sz = %zd.\n",
1602 map_name, t->type, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001603 if (map->def.value_size && map->def.value_size != sz) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08001604 pr_warn("map '%s': conflicting value size %u != %zd.\n",
1605 map_name, map->def.value_size, (ssize_t)sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001606 return -EINVAL;
1607 }
1608 map->def.value_size = sz;
1609 map->btf_value_type_id = t->type;
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001610 } else if (strcmp(name, "pinning") == 0) {
1611 __u32 val;
1612 int err;
1613
1614 if (!get_map_field_int(map_name, obj->btf, def, m,
1615 &val))
1616 return -EINVAL;
1617 pr_debug("map '%s': found pinning = %u.\n",
1618 map_name, val);
1619
1620 if (val != LIBBPF_PIN_NONE &&
1621 val != LIBBPF_PIN_BY_NAME) {
1622 pr_warn("map '%s': invalid pinning value %u.\n",
1623 map_name, val);
1624 return -EINVAL;
1625 }
1626 if (val == LIBBPF_PIN_BY_NAME) {
1627 err = build_map_pin_path(map, pin_root_path);
1628 if (err) {
1629 pr_warn("map '%s': couldn't build pin path.\n",
1630 map_name);
1631 return err;
1632 }
1633 }
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001634 } else {
1635 if (strict) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001636 pr_warn("map '%s': unknown field '%s'.\n",
1637 map_name, name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001638 return -ENOTSUP;
1639 }
1640 pr_debug("map '%s': ignoring unknown field '%s'.\n",
1641 map_name, name);
1642 }
1643 }
1644
1645 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001646 pr_warn("map '%s': map type isn't specified.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001647 return -EINVAL;
1648 }
1649
1650 return 0;
1651}
1652
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001653static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
1654 const char *pin_root_path)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001655{
1656 const struct btf_type *sec = NULL;
1657 int nr_types, i, vlen, err;
1658 const struct btf_type *t;
1659 const char *name;
1660 Elf_Data *data;
1661 Elf_Scn *scn;
1662
1663 if (obj->efile.btf_maps_shndx < 0)
1664 return 0;
1665
1666 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
1667 if (scn)
1668 data = elf_getdata(scn, NULL);
1669 if (!scn || !data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001670 pr_warn("failed to get Elf_Data from map section %d (%s)\n",
1671 obj->efile.maps_shndx, MAPS_ELF_SEC);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001672 return -EINVAL;
1673 }
1674
1675 nr_types = btf__get_nr_types(obj->btf);
1676 for (i = 1; i <= nr_types; i++) {
1677 t = btf__type_by_id(obj->btf, i);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001678 if (!btf_is_datasec(t))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001679 continue;
1680 name = btf__name_by_offset(obj->btf, t->name_off);
1681 if (strcmp(name, MAPS_ELF_SEC) == 0) {
1682 sec = t;
1683 break;
1684 }
1685 }
1686
1687 if (!sec) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001688 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001689 return -ENOENT;
1690 }
1691
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001692 vlen = btf_vlen(sec);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001693 for (i = 0; i < vlen; i++) {
1694 err = bpf_object__init_user_btf_map(obj, sec, i,
1695 obj->efile.btf_maps_shndx,
Andrii Nakryiko8983b732019-11-20 23:07:42 -08001696 data, strict,
1697 pin_root_path);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001698 if (err)
1699 return err;
1700 }
1701
1702 return 0;
1703}
1704
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08001705static int bpf_object__init_maps(struct bpf_object *obj,
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08001706 const struct bpf_object_open_opts *opts)
Andrii Nakryikobf829272019-06-17 12:26:53 -07001707{
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001708 const char *pin_root_path;
1709 bool strict;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001710 int err;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001711
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001712 strict = !OPTS_GET(opts, relaxed_maps, false);
1713 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
1714
Andrii Nakryikobf829272019-06-17 12:26:53 -07001715 err = bpf_object__init_user_maps(obj, strict);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001716 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
1717 err = err ?: bpf_object__init_global_data_maps(obj);
1718 err = err ?: bpf_object__init_extern_map(obj);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001719 if (err)
1720 return err;
1721
1722 if (obj->nr_maps) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001723 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1724 compare_bpf_map);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001725 }
1726 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +00001727}
1728
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01001729static bool section_have_execinstr(struct bpf_object *obj, int idx)
1730{
1731 Elf_Scn *scn;
1732 GElf_Shdr sh;
1733
1734 scn = elf_getscn(obj->efile.elf, idx);
1735 if (!scn)
1736 return false;
1737
1738 if (gelf_getshdr(scn, &sh) != &sh)
1739 return false;
1740
1741 if (sh.sh_flags & SHF_EXECINSTR)
1742 return true;
1743
1744 return false;
1745}
1746
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001747static void bpf_object__sanitize_btf(struct bpf_object *obj)
1748{
1749 bool has_datasec = obj->caps.btf_datasec;
1750 bool has_func = obj->caps.btf_func;
1751 struct btf *btf = obj->btf;
1752 struct btf_type *t;
1753 int i, j, vlen;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001754
1755 if (!obj->btf || (has_func && has_datasec))
1756 return;
1757
1758 for (i = 1; i <= btf__get_nr_types(btf); i++) {
1759 t = (struct btf_type *)btf__type_by_id(btf, i);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001760
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001761 if (!has_datasec && btf_is_var(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001762 /* replace VAR with INT */
1763 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
Andrii Nakryiko1d4126c2019-07-19 12:46:03 -07001764 /*
1765 * using size = 1 is the safest choice, 4 will be too
1766 * big and cause kernel BTF validation failure if
1767 * original variable took less than 4 bytes
1768 */
1769 t->size = 1;
Jakub Kicinski708852d2019-08-13 16:24:57 -07001770 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001771 } else if (!has_datasec && btf_is_datasec(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001772 /* replace DATASEC with STRUCT */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001773 const struct btf_var_secinfo *v = btf_var_secinfos(t);
1774 struct btf_member *m = btf_members(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001775 struct btf_type *vt;
1776 char *name;
1777
1778 name = (char *)btf__name_by_offset(btf, t->name_off);
1779 while (*name) {
1780 if (*name == '.')
1781 *name = '_';
1782 name++;
1783 }
1784
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001785 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001786 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1787 for (j = 0; j < vlen; j++, v++, m++) {
1788 /* order of field assignments is important */
1789 m->offset = v->offset * 8;
1790 m->type = v->type;
1791 /* preserve variable name as member name */
1792 vt = (void *)btf__type_by_id(btf, v->type);
1793 m->name_off = vt->name_off;
1794 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001795 } else if (!has_func && btf_is_func_proto(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001796 /* replace FUNC_PROTO with ENUM */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001797 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001798 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1799 t->size = sizeof(__u32); /* kernel enforced */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001800 } else if (!has_func && btf_is_func(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001801 /* replace FUNC with TYPEDEF */
1802 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1803 }
1804 }
1805}
1806
1807static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1808{
1809 if (!obj->btf_ext)
1810 return;
1811
1812 if (!obj->caps.btf_func) {
1813 btf_ext__free(obj->btf_ext);
1814 obj->btf_ext = NULL;
1815 }
1816}
1817
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001818static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
1819{
1820 return obj->efile.btf_maps_shndx >= 0;
1821}
1822
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001823static int bpf_object__init_btf(struct bpf_object *obj,
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001824 Elf_Data *btf_data,
1825 Elf_Data *btf_ext_data)
1826{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001827 bool btf_required = bpf_object__is_btf_mandatory(obj);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001828 int err = 0;
1829
1830 if (btf_data) {
1831 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1832 if (IS_ERR(obj->btf)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001833 pr_warn("Error loading ELF section %s: %d.\n",
1834 BTF_ELF_SEC, err);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001835 goto out;
1836 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001837 }
1838 if (btf_ext_data) {
1839 if (!obj->btf) {
1840 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1841 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1842 goto out;
1843 }
1844 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1845 btf_ext_data->d_size);
1846 if (IS_ERR(obj->btf_ext)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001847 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
1848 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001849 obj->btf_ext = NULL;
1850 goto out;
1851 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001852 }
1853out:
1854 if (err || IS_ERR(obj->btf)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001855 if (btf_required)
1856 err = err ? : PTR_ERR(obj->btf);
1857 else
1858 err = 0;
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001859 if (!IS_ERR_OR_NULL(obj->btf))
1860 btf__free(obj->btf);
1861 obj->btf = NULL;
1862 }
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001863 if (btf_required && !obj->btf) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001864 pr_warn("BTF is required, but is missing or corrupted.\n");
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001865 return err == 0 ? -ENOENT : err;
1866 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001867 return 0;
1868}
1869
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001870static int bpf_object__finalize_btf(struct bpf_object *obj)
1871{
1872 int err;
1873
1874 if (!obj->btf)
1875 return 0;
1876
1877 err = btf__finalize_data(obj, obj->btf);
1878 if (!err)
1879 return 0;
1880
1881 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
1882 btf__free(obj->btf);
1883 obj->btf = NULL;
1884 btf_ext__free(obj->btf_ext);
1885 obj->btf_ext = NULL;
1886
1887 if (bpf_object__is_btf_mandatory(obj)) {
1888 pr_warn("BTF is required, but is missing or corrupted.\n");
1889 return -ENOENT;
1890 }
1891 return 0;
1892}
1893
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001894static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1895{
1896 int err = 0;
1897
1898 if (!obj->btf)
1899 return 0;
1900
1901 bpf_object__sanitize_btf(obj);
1902 bpf_object__sanitize_btf_ext(obj);
1903
1904 err = btf__load(obj->btf);
1905 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001906 pr_warn("Error loading %s into kernel: %d.\n",
1907 BTF_ELF_SEC, err);
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001908 btf__free(obj->btf);
1909 obj->btf = NULL;
Andrii Nakryiko04efe592019-07-19 12:32:42 -07001910 /* btf_ext can't exist without btf, so free it as well */
1911 if (obj->btf_ext) {
1912 btf_ext__free(obj->btf_ext);
1913 obj->btf_ext = NULL;
1914 }
1915
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001916 if (bpf_object__is_btf_mandatory(obj))
1917 return err;
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001918 }
1919 return 0;
1920}
1921
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08001922static int bpf_object__elf_collect(struct bpf_object *obj)
Wang Nan29603662015-07-01 02:13:56 +00001923{
1924 Elf *elf = obj->efile.elf;
1925 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001926 Elf_Data *btf_ext_data = NULL;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001927 Elf_Data *btf_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +00001928 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +00001929 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +00001930
1931 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1932 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001933 pr_warn("failed to get e_shstrndx from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001934 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001935 }
1936
1937 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1938 char *name;
1939 GElf_Shdr sh;
1940 Elf_Data *data;
1941
1942 idx++;
1943 if (gelf_getshdr(scn, &sh) != &sh) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001944 pr_warn("failed to get section(%d) header from %s\n",
1945 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001946 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001947 }
1948
1949 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1950 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001951 pr_warn("failed to get section(%d) name from %s\n",
1952 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001953 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001954 }
1955
1956 data = elf_getdata(scn, 0);
1957 if (!data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001958 pr_warn("failed to get section(%d) data from %s(%s)\n",
1959 idx, name, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001960 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001961 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001962 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1963 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +00001964 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1965 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +00001966
Daniel Borkmann1713d682019-04-09 23:20:14 +02001967 if (strcmp(name, "license") == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001968 err = bpf_object__init_license(obj,
1969 data->d_buf,
1970 data->d_size);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001971 if (err)
1972 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001973 } else if (strcmp(name, "version") == 0) {
John Fastabend54b86252019-10-18 07:41:26 -07001974 err = bpf_object__init_kversion(obj,
1975 data->d_buf,
1976 data->d_size);
1977 if (err)
1978 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001979 } else if (strcmp(name, "maps") == 0) {
Wang Nan666810e2016-01-25 09:55:49 +00001980 obj->efile.maps_shndx = idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001981 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
1982 obj->efile.btf_maps_shndx = idx;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001983 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
1984 btf_data = data;
Yonghong Song2993e052018-11-19 15:29:16 -08001985 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001986 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001987 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +00001988 if (obj->efile.symbols) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001989 pr_warn("bpf: multiple SYMTAB in %s\n",
1990 obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001991 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00001992 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001993 obj->efile.symbols = data;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08001994 obj->efile.symbols_shndx = idx;
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001995 obj->efile.strtabidx = sh.sh_link;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001996 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1997 if (sh.sh_flags & SHF_EXECINSTR) {
1998 if (strcmp(name, ".text") == 0)
1999 obj->efile.text_shndx = idx;
2000 err = bpf_object__add_program(obj, data->d_buf,
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002001 data->d_size,
2002 name, idx);
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02002003 if (err) {
2004 char errmsg[STRERR_BUFSIZE];
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002005 char *cp;
Wang Nan6371ca3b2015-11-06 13:49:37 +00002006
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002007 cp = libbpf_strerror_r(-err, errmsg,
2008 sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002009 pr_warn("failed to alloc program %s (%s): %s",
2010 name, obj->path, cp);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002011 return err;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02002012 }
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08002013 } else if (strcmp(name, DATA_SEC) == 0) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002014 obj->efile.data = data;
2015 obj->efile.data_shndx = idx;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08002016 } else if (strcmp(name, RODATA_SEC) == 0) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002017 obj->efile.rodata = data;
2018 obj->efile.rodata_shndx = idx;
2019 } else {
2020 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nana5b8bd42015-07-01 02:14:00 +00002021 }
Wang Nanb62f06e2015-07-01 02:14:01 +00002022 } else if (sh.sh_type == SHT_REL) {
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002023 int nr_sects = obj->efile.nr_reloc_sects;
2024 void *sects = obj->efile.reloc_sects;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01002025 int sec = sh.sh_info; /* points to other section */
2026
2027 /* Only do relo for section with exec instructions */
2028 if (!section_have_execinstr(obj, sec)) {
2029 pr_debug("skip relo %s(%d) for section(%d)\n",
2030 name, idx, sec);
2031 continue;
2032 }
Wang Nanb62f06e2015-07-01 02:14:01 +00002033
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002034 sects = reallocarray(sects, nr_sects + 1,
2035 sizeof(*obj->efile.reloc_sects));
2036 if (!sects) {
2037 pr_warn("reloc_sects realloc failed\n");
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002038 return -ENOMEM;
Wang Nanb62f06e2015-07-01 02:14:01 +00002039 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002040
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002041 obj->efile.reloc_sects = sects;
2042 obj->efile.nr_reloc_sects++;
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07002043
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002044 obj->efile.reloc_sects[nr_sects].shdr = sh;
2045 obj->efile.reloc_sects[nr_sects].data = data;
Andrii Nakryikoac9d1382019-12-13 17:47:07 -08002046 } else if (sh.sh_type == SHT_NOBITS &&
2047 strcmp(name, BSS_SEC) == 0) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002048 obj->efile.bss = data;
2049 obj->efile.bss_shndx = idx;
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01002050 } else {
2051 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +00002052 }
Wang Nan29603662015-07-01 02:13:56 +00002053 }
Wang Nan561bbcc2015-11-27 08:47:36 +00002054
Andrii Nakryikod3a3aa02019-10-28 16:37:27 -07002055 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002056 pr_warn("Corrupted ELF file: index of strtab invalid\n");
Andrii Nakryikof1021542019-05-29 10:36:07 -07002057 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00002058 }
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08002059 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
Wang Nan29603662015-07-01 02:13:56 +00002060}
2061
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002062static bool sym_is_extern(const GElf_Sym *sym)
2063{
2064 int bind = GELF_ST_BIND(sym->st_info);
2065 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2066 return sym->st_shndx == SHN_UNDEF &&
2067 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2068 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2069}
2070
2071static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2072{
2073 const struct btf_type *t;
2074 const char *var_name;
2075 int i, n;
2076
2077 if (!btf)
2078 return -ESRCH;
2079
2080 n = btf__get_nr_types(btf);
2081 for (i = 1; i <= n; i++) {
2082 t = btf__type_by_id(btf, i);
2083
2084 if (!btf_is_var(t))
2085 continue;
2086
2087 var_name = btf__name_by_offset(btf, t->name_off);
2088 if (strcmp(var_name, ext_name))
2089 continue;
2090
2091 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2092 return -EINVAL;
2093
2094 return i;
2095 }
2096
2097 return -ENOENT;
2098}
2099
2100static enum extern_type find_extern_type(const struct btf *btf, int id,
2101 bool *is_signed)
2102{
2103 const struct btf_type *t;
2104 const char *name;
2105
2106 t = skip_mods_and_typedefs(btf, id, NULL);
2107 name = btf__name_by_offset(btf, t->name_off);
2108
2109 if (is_signed)
2110 *is_signed = false;
2111 switch (btf_kind(t)) {
2112 case BTF_KIND_INT: {
2113 int enc = btf_int_encoding(t);
2114
2115 if (enc & BTF_INT_BOOL)
2116 return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN;
2117 if (is_signed)
2118 *is_signed = enc & BTF_INT_SIGNED;
2119 if (t->size == 1)
2120 return EXT_CHAR;
2121 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
2122 return EXT_UNKNOWN;
2123 return EXT_INT;
2124 }
2125 case BTF_KIND_ENUM:
2126 if (t->size != 4)
2127 return EXT_UNKNOWN;
2128 if (strcmp(name, "libbpf_tristate"))
2129 return EXT_UNKNOWN;
2130 return EXT_TRISTATE;
2131 case BTF_KIND_ARRAY:
2132 if (btf_array(t)->nelems == 0)
2133 return EXT_UNKNOWN;
2134 if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR)
2135 return EXT_UNKNOWN;
2136 return EXT_CHAR_ARR;
2137 default:
2138 return EXT_UNKNOWN;
2139 }
2140}
2141
2142static int cmp_externs(const void *_a, const void *_b)
2143{
2144 const struct extern_desc *a = _a;
2145 const struct extern_desc *b = _b;
2146
2147 /* descending order by alignment requirements */
2148 if (a->align != b->align)
2149 return a->align > b->align ? -1 : 1;
2150 /* ascending order by size, within same alignment class */
2151 if (a->sz != b->sz)
2152 return a->sz < b->sz ? -1 : 1;
2153 /* resolve ties by name */
2154 return strcmp(a->name, b->name);
2155}
2156
2157static int bpf_object__collect_externs(struct bpf_object *obj)
2158{
2159 const struct btf_type *t;
2160 struct extern_desc *ext;
2161 int i, n, off, btf_id;
2162 struct btf_type *sec;
2163 const char *ext_name;
2164 Elf_Scn *scn;
2165 GElf_Shdr sh;
2166
2167 if (!obj->efile.symbols)
2168 return 0;
2169
2170 scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
2171 if (!scn)
2172 return -LIBBPF_ERRNO__FORMAT;
2173 if (gelf_getshdr(scn, &sh) != &sh)
2174 return -LIBBPF_ERRNO__FORMAT;
2175 n = sh.sh_size / sh.sh_entsize;
2176
2177 pr_debug("looking for externs among %d symbols...\n", n);
2178 for (i = 0; i < n; i++) {
2179 GElf_Sym sym;
2180
2181 if (!gelf_getsym(obj->efile.symbols, i, &sym))
2182 return -LIBBPF_ERRNO__FORMAT;
2183 if (!sym_is_extern(&sym))
2184 continue;
2185 ext_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
2186 sym.st_name);
2187 if (!ext_name || !ext_name[0])
2188 continue;
2189
2190 ext = obj->externs;
2191 ext = reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
2192 if (!ext)
2193 return -ENOMEM;
2194 obj->externs = ext;
2195 ext = &ext[obj->nr_extern];
2196 memset(ext, 0, sizeof(*ext));
2197 obj->nr_extern++;
2198
2199 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
2200 if (ext->btf_id <= 0) {
2201 pr_warn("failed to find BTF for extern '%s': %d\n",
2202 ext_name, ext->btf_id);
2203 return ext->btf_id;
2204 }
2205 t = btf__type_by_id(obj->btf, ext->btf_id);
2206 ext->name = btf__name_by_offset(obj->btf, t->name_off);
2207 ext->sym_idx = i;
2208 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
2209 ext->sz = btf__resolve_size(obj->btf, t->type);
2210 if (ext->sz <= 0) {
2211 pr_warn("failed to resolve size of extern '%s': %d\n",
2212 ext_name, ext->sz);
2213 return ext->sz;
2214 }
2215 ext->align = btf__align_of(obj->btf, t->type);
2216 if (ext->align <= 0) {
2217 pr_warn("failed to determine alignment of extern '%s': %d\n",
2218 ext_name, ext->align);
2219 return -EINVAL;
2220 }
2221 ext->type = find_extern_type(obj->btf, t->type,
2222 &ext->is_signed);
2223 if (ext->type == EXT_UNKNOWN) {
2224 pr_warn("extern '%s' type is unsupported\n", ext_name);
2225 return -ENOTSUP;
2226 }
2227 }
2228 pr_debug("collected %d externs total\n", obj->nr_extern);
2229
2230 if (!obj->nr_extern)
2231 return 0;
2232
2233 /* sort externs by (alignment, size, name) and calculate their offsets
2234 * within a map */
2235 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
2236 off = 0;
2237 for (i = 0; i < obj->nr_extern; i++) {
2238 ext = &obj->externs[i];
2239 ext->data_off = roundup(off, ext->align);
2240 off = ext->data_off + ext->sz;
2241 pr_debug("extern #%d: symbol %d, off %u, name %s\n",
2242 i, ext->sym_idx, ext->data_off, ext->name);
2243 }
2244
2245 btf_id = btf__find_by_name(obj->btf, EXTERN_SEC);
2246 if (btf_id <= 0) {
2247 pr_warn("no BTF info found for '%s' datasec\n", EXTERN_SEC);
2248 return -ESRCH;
2249 }
2250
2251 sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id);
2252 sec->size = off;
2253 n = btf_vlen(sec);
2254 for (i = 0; i < n; i++) {
2255 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
2256
2257 t = btf__type_by_id(obj->btf, vs->type);
2258 ext_name = btf__name_by_offset(obj->btf, t->name_off);
2259 ext = find_extern_by_name(obj, ext_name);
2260 if (!ext) {
2261 pr_warn("failed to find extern definition for BTF var '%s'\n",
2262 ext_name);
2263 return -ESRCH;
2264 }
2265 vs->offset = ext->data_off;
2266 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
2267 }
2268
2269 return 0;
2270}
2271
Wang Nan34090912015-07-01 02:14:02 +00002272static struct bpf_program *
2273bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
2274{
2275 struct bpf_program *prog;
2276 size_t i;
2277
2278 for (i = 0; i < obj->nr_programs; i++) {
2279 prog = &obj->programs[i];
2280 if (prog->idx == idx)
2281 return prog;
2282 }
2283 return NULL;
2284}
2285
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07002286struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07002287bpf_object__find_program_by_title(const struct bpf_object *obj,
2288 const char *title)
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07002289{
2290 struct bpf_program *pos;
2291
2292 bpf_object__for_each_program(pos, obj) {
2293 if (pos->section_name && !strcmp(pos->section_name, title))
2294 return pos;
2295 }
2296 return NULL;
2297}
2298
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08002299struct bpf_program *
2300bpf_object__find_program_by_name(const struct bpf_object *obj,
2301 const char *name)
2302{
2303 struct bpf_program *prog;
2304
2305 bpf_object__for_each_program(prog, obj) {
2306 if (!strcmp(prog->name, name))
2307 return prog;
2308 }
2309 return NULL;
2310}
2311
Daniel Borkmannd8599002019-04-09 23:20:13 +02002312static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
2313 int shndx)
2314{
2315 return shndx == obj->efile.data_shndx ||
2316 shndx == obj->efile.bss_shndx ||
2317 shndx == obj->efile.rodata_shndx;
2318}
2319
2320static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
2321 int shndx)
2322{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002323 return shndx == obj->efile.maps_shndx ||
2324 shndx == obj->efile.btf_maps_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002325}
2326
Daniel Borkmannd8599002019-04-09 23:20:13 +02002327static enum libbpf_map_type
2328bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
2329{
2330 if (shndx == obj->efile.data_shndx)
2331 return LIBBPF_MAP_DATA;
2332 else if (shndx == obj->efile.bss_shndx)
2333 return LIBBPF_MAP_BSS;
2334 else if (shndx == obj->efile.rodata_shndx)
2335 return LIBBPF_MAP_RODATA;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002336 else if (shndx == obj->efile.symbols_shndx)
2337 return LIBBPF_MAP_EXTERN;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002338 else
2339 return LIBBPF_MAP_UNSPEC;
2340}
2341
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002342static int bpf_program__record_reloc(struct bpf_program *prog,
2343 struct reloc_desc *reloc_desc,
2344 __u32 insn_idx, const char *name,
2345 const GElf_Sym *sym, const GElf_Rel *rel)
2346{
2347 struct bpf_insn *insn = &prog->insns[insn_idx];
2348 size_t map_idx, nr_maps = prog->obj->nr_maps;
2349 struct bpf_object *obj = prog->obj;
2350 __u32 shdr_idx = sym->st_shndx;
2351 enum libbpf_map_type type;
2352 struct bpf_map *map;
2353
2354 /* sub-program call relocation */
2355 if (insn->code == (BPF_JMP | BPF_CALL)) {
2356 if (insn->src_reg != BPF_PSEUDO_CALL) {
2357 pr_warn("incorrect bpf_call opcode\n");
2358 return -LIBBPF_ERRNO__RELOC;
2359 }
2360 /* text_shndx can be 0, if no default "main" program exists */
2361 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
2362 pr_warn("bad call relo against section %u\n", shdr_idx);
2363 return -LIBBPF_ERRNO__RELOC;
2364 }
2365 if (sym->st_value % 8) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002366 pr_warn("bad call relo offset: %zu\n",
2367 (size_t)sym->st_value);
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002368 return -LIBBPF_ERRNO__RELOC;
2369 }
2370 reloc_desc->type = RELO_CALL;
2371 reloc_desc->insn_idx = insn_idx;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08002372 reloc_desc->sym_off = sym->st_value;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002373 obj->has_pseudo_calls = true;
2374 return 0;
2375 }
2376
2377 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002378 pr_warn("invalid relo for insns[%d].code 0x%x\n",
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002379 insn_idx, insn->code);
2380 return -LIBBPF_ERRNO__RELOC;
2381 }
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002382
2383 if (sym_is_extern(sym)) {
2384 int sym_idx = GELF_R_SYM(rel->r_info);
2385 int i, n = obj->nr_extern;
2386 struct extern_desc *ext;
2387
2388 for (i = 0; i < n; i++) {
2389 ext = &obj->externs[i];
2390 if (ext->sym_idx == sym_idx)
2391 break;
2392 }
2393 if (i >= n) {
2394 pr_warn("extern relo failed to find extern for sym %d\n",
2395 sym_idx);
2396 return -LIBBPF_ERRNO__RELOC;
2397 }
2398 pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n",
2399 i, ext->name, ext->sym_idx, ext->data_off, insn_idx);
2400 reloc_desc->type = RELO_EXTERN;
2401 reloc_desc->insn_idx = insn_idx;
2402 reloc_desc->sym_off = ext->data_off;
2403 return 0;
2404 }
2405
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002406 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
Andrii Nakryiko393cdfb2019-11-20 23:07:43 -08002407 pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
2408 name, shdr_idx);
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002409 return -LIBBPF_ERRNO__RELOC;
2410 }
2411
2412 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
2413
2414 /* generic map reference relocation */
2415 if (type == LIBBPF_MAP_UNSPEC) {
2416 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
2417 pr_warn("bad map relo against section %u\n",
2418 shdr_idx);
2419 return -LIBBPF_ERRNO__RELOC;
2420 }
2421 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
2422 map = &obj->maps[map_idx];
2423 if (map->libbpf_type != type ||
2424 map->sec_idx != sym->st_shndx ||
2425 map->sec_offset != sym->st_value)
2426 continue;
2427 pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
2428 map_idx, map->name, map->sec_idx,
2429 map->sec_offset, insn_idx);
2430 break;
2431 }
2432 if (map_idx >= nr_maps) {
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002433 pr_warn("map relo failed to find map for sec %u, off %zu\n",
2434 shdr_idx, (size_t)sym->st_value);
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002435 return -LIBBPF_ERRNO__RELOC;
2436 }
2437 reloc_desc->type = RELO_LD64;
2438 reloc_desc->insn_idx = insn_idx;
2439 reloc_desc->map_idx = map_idx;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08002440 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002441 return 0;
2442 }
2443
2444 /* global data map relocation */
2445 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
2446 pr_warn("bad data relo against section %u\n", shdr_idx);
2447 return -LIBBPF_ERRNO__RELOC;
2448 }
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002449 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
2450 map = &obj->maps[map_idx];
2451 if (map->libbpf_type != type)
2452 continue;
2453 pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
2454 map_idx, map->name, map->sec_idx, map->sec_offset,
2455 insn_idx);
2456 break;
2457 }
2458 if (map_idx >= nr_maps) {
2459 pr_warn("data relo failed to find map for sec %u\n",
2460 shdr_idx);
2461 return -LIBBPF_ERRNO__RELOC;
2462 }
2463
2464 reloc_desc->type = RELO_DATA;
2465 reloc_desc->insn_idx = insn_idx;
2466 reloc_desc->map_idx = map_idx;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08002467 reloc_desc->sym_off = sym->st_value;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002468 return 0;
2469}
2470
Wang Nan34090912015-07-01 02:14:02 +00002471static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002472bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
2473 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +00002474{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002475 Elf_Data *symbols = obj->efile.symbols;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002476 int err, i, nrels;
Wang Nan34090912015-07-01 02:14:02 +00002477
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002478 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
Wang Nan34090912015-07-01 02:14:02 +00002479 nrels = shdr->sh_size / shdr->sh_entsize;
2480
2481 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
2482 if (!prog->reloc_desc) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002483 pr_warn("failed to alloc memory in relocation\n");
Wang Nan34090912015-07-01 02:14:02 +00002484 return -ENOMEM;
2485 }
2486 prog->nr_reloc = nrels;
2487
2488 for (i = 0; i < nrels; i++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002489 const char *name;
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002490 __u32 insn_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002491 GElf_Sym sym;
2492 GElf_Rel rel;
Wang Nan34090912015-07-01 02:14:02 +00002493
2494 if (!gelf_getrel(data, i, &rel)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002495 pr_warn("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +00002496 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00002497 }
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002498 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002499 pr_warn("relocation: symbol %"PRIx64" not found\n",
2500 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +00002501 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00002502 }
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002503 if (rel.r_offset % sizeof(struct bpf_insn))
2504 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00002505
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002506 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
Daniel Borkmannd8599002019-04-09 23:20:13 +02002507 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
2508 sym.st_name) ? : "<?>";
2509
Andrii Nakryiko679152d2019-12-12 09:19:18 -08002510 pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
2511 (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info),
2512 (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info),
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002513 GELF_ST_BIND(sym.st_info), sym.st_name, name,
2514 insn_idx);
Daniel Borkmannd8599002019-04-09 23:20:13 +02002515
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08002516 err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
2517 insn_idx, name, &sym, &rel);
2518 if (err)
2519 return err;
Wang Nan34090912015-07-01 02:14:02 +00002520 }
2521 return 0;
2522}
2523
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002524static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002525{
2526 struct bpf_map_def *def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002527 __u32 key_type_id = 0, value_type_id = 0;
Yonghong Song96408c42019-02-04 11:00:58 -08002528 int ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002529
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002530 /* if it's BTF-defined map, we don't need to search for type IDs */
2531 if (map->sec_idx == obj->efile.btf_maps_shndx)
2532 return 0;
2533
Daniel Borkmannd8599002019-04-09 23:20:13 +02002534 if (!bpf_map__is_internal(map)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002535 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
Daniel Borkmannd8599002019-04-09 23:20:13 +02002536 def->value_size, &key_type_id,
2537 &value_type_id);
2538 } else {
2539 /*
2540 * LLVM annotates global data differently in BTF, that is,
2541 * only as '.data', '.bss' or '.rodata'.
2542 */
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002543 ret = btf__find_by_name(obj->btf,
Daniel Borkmannd8599002019-04-09 23:20:13 +02002544 libbpf_type_to_btf_name[map->libbpf_type]);
2545 }
2546 if (ret < 0)
Yonghong Song96408c42019-02-04 11:00:58 -08002547 return ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002548
Yonghong Song96408c42019-02-04 11:00:58 -08002549 map->btf_key_type_id = key_type_id;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002550 map->btf_value_type_id = bpf_map__is_internal(map) ?
2551 ret : value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002552 return 0;
2553}
2554
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002555int bpf_map__reuse_fd(struct bpf_map *map, int fd)
2556{
2557 struct bpf_map_info info = {};
2558 __u32 len = sizeof(info);
2559 int new_fd, err;
2560 char *new_name;
2561
2562 err = bpf_obj_get_info_by_fd(fd, &info, &len);
2563 if (err)
2564 return err;
2565
2566 new_name = strdup(info.name);
2567 if (!new_name)
2568 return -errno;
2569
2570 new_fd = open("/", O_RDONLY | O_CLOEXEC);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002571 if (new_fd < 0) {
2572 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002573 goto err_free_new_name;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002574 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002575
2576 new_fd = dup3(fd, new_fd, O_CLOEXEC);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002577 if (new_fd < 0) {
2578 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002579 goto err_close_new_fd;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002580 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002581
2582 err = zclose(map->fd);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002583 if (err) {
2584 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002585 goto err_close_new_fd;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002586 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002587 free(map->name);
2588
2589 map->fd = new_fd;
2590 map->name = new_name;
2591 map->def.type = info.type;
2592 map->def.key_size = info.key_size;
2593 map->def.value_size = info.value_size;
2594 map->def.max_entries = info.max_entries;
2595 map->def.map_flags = info.map_flags;
2596 map->btf_key_type_id = info.btf_key_type_id;
2597 map->btf_value_type_id = info.btf_value_type_id;
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +01002598 map->reused = true;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002599
2600 return 0;
2601
2602err_close_new_fd:
2603 close(new_fd);
2604err_free_new_name:
2605 free(new_name);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002606 return err;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002607}
2608
Andrey Ignatov1a11a4c2019-02-14 15:01:42 -08002609int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
2610{
2611 if (!map || !max_entries)
2612 return -EINVAL;
2613
2614 /* If map already created, its attributes can't be changed. */
2615 if (map->fd >= 0)
2616 return -EBUSY;
2617
2618 map->def.max_entries = max_entries;
2619
2620 return 0;
2621}
2622
Wang Nan52d33522015-07-01 02:14:04 +00002623static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002624bpf_object__probe_name(struct bpf_object *obj)
2625{
2626 struct bpf_load_program_attr attr;
2627 char *cp, errmsg[STRERR_BUFSIZE];
2628 struct bpf_insn insns[] = {
2629 BPF_MOV64_IMM(BPF_REG_0, 0),
2630 BPF_EXIT_INSN(),
2631 };
2632 int ret;
2633
2634 /* make sure basic loading works */
2635
2636 memset(&attr, 0, sizeof(attr));
2637 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2638 attr.insns = insns;
2639 attr.insns_cnt = ARRAY_SIZE(insns);
2640 attr.license = "GPL";
2641
2642 ret = bpf_load_program_xattr(&attr, NULL, 0);
2643 if (ret < 0) {
2644 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002645 pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
2646 __func__, cp, errno);
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002647 return -errno;
2648 }
2649 close(ret);
2650
2651 /* now try the same program, but with the name */
2652
2653 attr.name = "test";
2654 ret = bpf_load_program_xattr(&attr, NULL, 0);
2655 if (ret >= 0) {
2656 obj->caps.name = 1;
2657 close(ret);
2658 }
2659
2660 return 0;
2661}
2662
2663static int
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002664bpf_object__probe_global_data(struct bpf_object *obj)
2665{
2666 struct bpf_load_program_attr prg_attr;
2667 struct bpf_create_map_attr map_attr;
2668 char *cp, errmsg[STRERR_BUFSIZE];
2669 struct bpf_insn insns[] = {
2670 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
2671 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
2672 BPF_MOV64_IMM(BPF_REG_0, 0),
2673 BPF_EXIT_INSN(),
2674 };
2675 int ret, map;
2676
2677 memset(&map_attr, 0, sizeof(map_attr));
2678 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
2679 map_attr.key_size = sizeof(int);
2680 map_attr.value_size = 32;
2681 map_attr.max_entries = 1;
2682
2683 map = bpf_create_map_xattr(&map_attr);
2684 if (map < 0) {
2685 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002686 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
2687 __func__, cp, errno);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002688 return -errno;
2689 }
2690
2691 insns[0].imm = map;
2692
2693 memset(&prg_attr, 0, sizeof(prg_attr));
2694 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2695 prg_attr.insns = insns;
2696 prg_attr.insns_cnt = ARRAY_SIZE(insns);
2697 prg_attr.license = "GPL";
2698
2699 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
2700 if (ret >= 0) {
2701 obj->caps.global_data = 1;
2702 close(ret);
2703 }
2704
2705 close(map);
2706 return 0;
2707}
2708
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002709static int bpf_object__probe_btf_func(struct bpf_object *obj)
2710{
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002711 static const char strs[] = "\0int\0x\0a";
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002712 /* void x(int a) {} */
2713 __u32 types[] = {
2714 /* int */
2715 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2716 /* FUNC_PROTO */ /* [2] */
2717 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
2718 BTF_PARAM_ENC(7, 1),
2719 /* FUNC x */ /* [3] */
2720 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
2721 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02002722 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002723
Michal Rosteckicfd49212019-05-29 20:31:09 +02002724 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2725 strs, sizeof(strs));
2726 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002727 obj->caps.btf_func = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02002728 close(btf_fd);
2729 return 1;
2730 }
2731
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002732 return 0;
2733}
2734
2735static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
2736{
Andrii Nakryiko8983b732019-11-20 23:07:42 -08002737 static const char strs[] = "\0x\0.data";
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002738 /* static int a; */
2739 __u32 types[] = {
2740 /* int */
2741 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2742 /* VAR x */ /* [2] */
2743 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
2744 BTF_VAR_STATIC,
2745 /* DATASEC val */ /* [3] */
2746 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
2747 BTF_VAR_SECINFO_ENC(2, 0, 4),
2748 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02002749 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002750
Michal Rosteckicfd49212019-05-29 20:31:09 +02002751 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2752 strs, sizeof(strs));
2753 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002754 obj->caps.btf_datasec = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02002755 close(btf_fd);
2756 return 1;
2757 }
2758
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002759 return 0;
2760}
2761
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -08002762static int bpf_object__probe_array_mmap(struct bpf_object *obj)
2763{
2764 struct bpf_create_map_attr attr = {
2765 .map_type = BPF_MAP_TYPE_ARRAY,
2766 .map_flags = BPF_F_MMAPABLE,
2767 .key_size = sizeof(int),
2768 .value_size = sizeof(int),
2769 .max_entries = 1,
2770 };
2771 int fd;
2772
2773 fd = bpf_create_map_xattr(&attr);
2774 if (fd >= 0) {
2775 obj->caps.array_mmap = 1;
2776 close(fd);
2777 return 1;
2778 }
2779
2780 return 0;
2781}
2782
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002783static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002784bpf_object__probe_caps(struct bpf_object *obj)
2785{
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002786 int (*probe_fn[])(struct bpf_object *obj) = {
2787 bpf_object__probe_name,
2788 bpf_object__probe_global_data,
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002789 bpf_object__probe_btf_func,
2790 bpf_object__probe_btf_datasec,
Andrii Nakryiko7fe74b42019-11-17 09:28:05 -08002791 bpf_object__probe_array_mmap,
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002792 };
2793 int i, ret;
2794
2795 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
2796 ret = probe_fn[i](obj);
2797 if (ret < 0)
Stanislav Fomichev15ea1642019-05-14 20:38:49 -07002798 pr_debug("Probe #%d failed with %d.\n", i, ret);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002799 }
2800
2801 return 0;
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002802}
2803
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002804static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
2805{
2806 struct bpf_map_info map_info = {};
2807 char msg[STRERR_BUFSIZE];
2808 __u32 map_info_len;
2809
2810 map_info_len = sizeof(map_info);
2811
2812 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
2813 pr_warn("failed to get map info for map FD %d: %s\n",
2814 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
2815 return false;
2816 }
2817
2818 return (map_info.type == map->def.type &&
2819 map_info.key_size == map->def.key_size &&
2820 map_info.value_size == map->def.value_size &&
2821 map_info.max_entries == map->def.max_entries &&
2822 map_info.map_flags == map->def.map_flags);
2823}
2824
2825static int
2826bpf_object__reuse_map(struct bpf_map *map)
2827{
2828 char *cp, errmsg[STRERR_BUFSIZE];
2829 int err, pin_fd;
2830
2831 pin_fd = bpf_obj_get(map->pin_path);
2832 if (pin_fd < 0) {
2833 err = -errno;
2834 if (err == -ENOENT) {
2835 pr_debug("found no pinned map to reuse at '%s'\n",
2836 map->pin_path);
2837 return 0;
2838 }
2839
2840 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
2841 pr_warn("couldn't retrieve pinned map '%s': %s\n",
2842 map->pin_path, cp);
2843 return err;
2844 }
2845
2846 if (!map_is_reuse_compat(map, pin_fd)) {
2847 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
2848 map->pin_path);
2849 close(pin_fd);
2850 return -EINVAL;
2851 }
2852
2853 err = bpf_map__reuse_fd(map, pin_fd);
2854 if (err) {
2855 close(pin_fd);
2856 return err;
2857 }
2858 map->pinned = true;
2859 pr_debug("reused pinned map at '%s'\n", map->pin_path);
2860
2861 return 0;
2862}
2863
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002864static int
Daniel Borkmannd8599002019-04-09 23:20:13 +02002865bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
2866{
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002867 enum libbpf_map_type map_type = map->libbpf_type;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002868 char *cp, errmsg[STRERR_BUFSIZE];
2869 int err, zero = 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002870
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002871 /* kernel already zero-initializes .bss map. */
2872 if (map_type == LIBBPF_MAP_BSS)
Daniel Borkmannd8599002019-04-09 23:20:13 +02002873 return 0;
2874
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08002875 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
2876 if (err) {
2877 err = -errno;
2878 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2879 pr_warn("Error setting initial map(%s) contents: %s\n",
2880 map->name, cp);
2881 return err;
2882 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02002883
Andrii Nakryiko166750b2019-12-13 17:47:08 -08002884 /* Freeze .rodata and .extern map as read-only from syscall side. */
2885 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_EXTERN) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02002886 err = bpf_map_freeze(map->fd);
2887 if (err) {
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08002888 err = -errno;
2889 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002890 pr_warn("Error freezing map(%s) as read-only: %s\n",
2891 map->name, cp);
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08002892 return err;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002893 }
2894 }
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08002895 return 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002896}
2897
2898static int
Wang Nan52d33522015-07-01 02:14:04 +00002899bpf_object__create_maps(struct bpf_object *obj)
2900{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002901 struct bpf_create_map_attr create_attr = {};
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002902 int nr_cpus = 0;
Wang Nan52d33522015-07-01 02:14:04 +00002903 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002904 int err;
Wang Nan52d33522015-07-01 02:14:04 +00002905
Wang Nan9d759a92015-11-27 08:47:35 +00002906 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002907 struct bpf_map *map = &obj->maps[i];
2908 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002909 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002910 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00002911
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002912 if (map->pin_path) {
2913 err = bpf_object__reuse_map(map);
2914 if (err) {
2915 pr_warn("error reusing pinned map %s\n",
2916 map->name);
2917 return err;
2918 }
2919 }
2920
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002921 if (map->fd >= 0) {
2922 pr_debug("skip map create (preset) %s: fd=%d\n",
2923 map->name, map->fd);
2924 continue;
2925 }
2926
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08002927 if (obj->caps.name)
2928 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07002929 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002930 create_attr.map_type = def->type;
2931 create_attr.map_flags = def->map_flags;
2932 create_attr.key_size = def->key_size;
2933 create_attr.value_size = def->value_size;
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002934 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
2935 !def->max_entries) {
2936 if (!nr_cpus)
2937 nr_cpus = libbpf_num_possible_cpus();
2938 if (nr_cpus < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002939 pr_warn("failed to determine number of system CPUs: %d\n",
2940 nr_cpus);
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002941 err = nr_cpus;
2942 goto err_out;
2943 }
2944 pr_debug("map '%s': setting size to %d\n",
2945 map->name, nr_cpus);
2946 create_attr.max_entries = nr_cpus;
2947 } else {
2948 create_attr.max_entries = def->max_entries;
2949 }
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002950 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002951 create_attr.btf_key_type_id = 0;
2952 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08002953 if (bpf_map_type__is_map_in_map(def->type) &&
2954 map->inner_map_fd >= 0)
2955 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002956
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002957 if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002958 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002959 create_attr.btf_key_type_id = map->btf_key_type_id;
2960 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002961 }
2962
2963 *pfd = bpf_create_map_xattr(&create_attr);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002964 if (*pfd < 0 && (create_attr.btf_key_type_id ||
2965 create_attr.btf_value_type_id)) {
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002966 err = -errno;
2967 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002968 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
2969 map->name, cp, err);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002970 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002971 create_attr.btf_key_type_id = 0;
2972 create_attr.btf_value_type_id = 0;
2973 map->btf_key_type_id = 0;
2974 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002975 *pfd = bpf_create_map_xattr(&create_attr);
2976 }
2977
Wang Nan52d33522015-07-01 02:14:04 +00002978 if (*pfd < 0) {
2979 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00002980
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002981 err = -errno;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002982err_out:
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002983 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002984 pr_warn("failed to create map (name: '%s'): %s(%d)\n",
2985 map->name, cp, err);
Wang Nan52d33522015-07-01 02:14:04 +00002986 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00002987 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00002988 return err;
2989 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02002990
2991 if (bpf_map__is_internal(map)) {
2992 err = bpf_object__populate_internal_map(obj, map);
2993 if (err < 0) {
2994 zclose(*pfd);
2995 goto err_out;
2996 }
2997 }
2998
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002999 if (map->pin_path && !map->pinned) {
3000 err = bpf_map__pin(map, NULL);
3001 if (err) {
3002 pr_warn("failed to auto-pin map name '%s' at '%s'\n",
3003 map->name, map->pin_path);
3004 return err;
3005 }
3006 }
3007
Andrii Nakryiko76e10222019-05-29 10:36:10 -07003008 pr_debug("created map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00003009 }
3010
Wang Nan52d33522015-07-01 02:14:04 +00003011 return 0;
3012}
3013
Wang Nan8a47a6c2015-07-01 02:14:05 +00003014static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003015check_btf_ext_reloc_err(struct bpf_program *prog, int err,
3016 void *btf_prog_info, const char *info_name)
3017{
3018 if (err != -ENOENT) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003019 pr_warn("Error in loading %s for sec %s.\n",
3020 info_name, prog->section_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003021 return err;
3022 }
3023
3024 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
3025
3026 if (btf_prog_info) {
3027 /*
3028 * Some info has already been found but has problem
Andrii Nakryiko399dc652019-05-29 10:36:11 -07003029 * in the last btf_ext reloc. Must have to error out.
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003030 */
Kefeng Wangbe180102019-10-21 13:55:32 +08003031 pr_warn("Error in relocating %s for sec %s.\n",
3032 info_name, prog->section_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003033 return err;
3034 }
3035
Andrii Nakryiko399dc652019-05-29 10:36:11 -07003036 /* Have problem loading the very first info. Ignore the rest. */
Kefeng Wangbe180102019-10-21 13:55:32 +08003037 pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
3038 info_name, prog->section_name, info_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003039 return 0;
3040}
3041
3042static int
3043bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
3044 const char *section_name, __u32 insn_offset)
3045{
3046 int err;
3047
3048 if (!insn_offset || prog->func_info) {
3049 /*
3050 * !insn_offset => main program
3051 *
3052 * For sub prog, the main program's func_info has to
3053 * be loaded first (i.e. prog->func_info != NULL)
3054 */
3055 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
3056 section_name, insn_offset,
3057 &prog->func_info,
3058 &prog->func_info_cnt);
3059 if (err)
3060 return check_btf_ext_reloc_err(prog, err,
3061 prog->func_info,
3062 "bpf_func_info");
3063
3064 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
3065 }
3066
Martin KaFai Lau3d650142018-12-07 16:42:31 -08003067 if (!insn_offset || prog->line_info) {
3068 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
3069 section_name, insn_offset,
3070 &prog->line_info,
3071 &prog->line_info_cnt);
3072 if (err)
3073 return check_btf_ext_reloc_err(prog, err,
3074 prog->line_info,
3075 "bpf_line_info");
3076
3077 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
3078 }
3079
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003080 return 0;
3081}
3082
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003083#define BPF_CORE_SPEC_MAX_LEN 64
3084
3085/* represents BPF CO-RE field or array element accessor */
3086struct bpf_core_accessor {
3087 __u32 type_id; /* struct/union type or array element type */
3088 __u32 idx; /* field index or array index */
3089 const char *name; /* field name or NULL for array accessor */
3090};
3091
3092struct bpf_core_spec {
3093 const struct btf *btf;
3094 /* high-level spec: named fields and array indices only */
3095 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
3096 /* high-level spec length */
3097 int len;
3098 /* raw, low-level spec: 1-to-1 with accessor spec string */
3099 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
3100 /* raw spec length */
3101 int raw_len;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003102 /* field bit offset represented by spec */
3103 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003104};
3105
3106static bool str_is_empty(const char *s)
3107{
3108 return !s || !s[0];
3109}
3110
3111/*
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003112 * Turn bpf_field_reloc into a low- and high-level spec representation,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003113 * validating correctness along the way, as well as calculating resulting
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003114 * field bit offset, specified by accessor string. Low-level spec captures
3115 * every single level of nestedness, including traversing anonymous
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003116 * struct/union members. High-level one only captures semantically meaningful
3117 * "turning points": named fields and array indicies.
3118 * E.g., for this case:
3119 *
3120 * struct sample {
3121 * int __unimportant;
3122 * struct {
3123 * int __1;
3124 * int __2;
3125 * int a[7];
3126 * };
3127 * };
3128 *
3129 * struct sample *s = ...;
3130 *
3131 * int x = &s->a[3]; // access string = '0:1:2:3'
3132 *
3133 * Low-level spec has 1:1 mapping with each element of access string (it's
3134 * just a parsed access string representation): [0, 1, 2, 3].
3135 *
3136 * High-level spec will capture only 3 points:
3137 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
3138 * - field 'a' access (corresponds to '2' in low-level spec);
3139 * - array element #3 access (corresponds to '3' in low-level spec).
3140 *
3141 */
3142static int bpf_core_spec_parse(const struct btf *btf,
3143 __u32 type_id,
3144 const char *spec_str,
3145 struct bpf_core_spec *spec)
3146{
3147 int access_idx, parsed_len, i;
3148 const struct btf_type *t;
3149 const char *name;
3150 __u32 id;
3151 __s64 sz;
3152
3153 if (str_is_empty(spec_str) || *spec_str == ':')
3154 return -EINVAL;
3155
3156 memset(spec, 0, sizeof(*spec));
3157 spec->btf = btf;
3158
3159 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
3160 while (*spec_str) {
3161 if (*spec_str == ':')
3162 ++spec_str;
3163 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
3164 return -EINVAL;
3165 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
3166 return -E2BIG;
3167 spec_str += parsed_len;
3168 spec->raw_spec[spec->raw_len++] = access_idx;
3169 }
3170
3171 if (spec->raw_len == 0)
3172 return -EINVAL;
3173
3174 /* first spec value is always reloc type array index */
3175 t = skip_mods_and_typedefs(btf, type_id, &id);
3176 if (!t)
3177 return -EINVAL;
3178
3179 access_idx = spec->raw_spec[0];
3180 spec->spec[0].type_id = id;
3181 spec->spec[0].idx = access_idx;
3182 spec->len++;
3183
3184 sz = btf__resolve_size(btf, id);
3185 if (sz < 0)
3186 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003187 spec->bit_offset = access_idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003188
3189 for (i = 1; i < spec->raw_len; i++) {
3190 t = skip_mods_and_typedefs(btf, id, &id);
3191 if (!t)
3192 return -EINVAL;
3193
3194 access_idx = spec->raw_spec[i];
3195
3196 if (btf_is_composite(t)) {
3197 const struct btf_member *m;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003198 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003199
3200 if (access_idx >= btf_vlen(t))
3201 return -EINVAL;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003202
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003203 bit_offset = btf_member_bit_offset(t, access_idx);
3204 spec->bit_offset += bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003205
3206 m = btf_members(t) + access_idx;
3207 if (m->name_off) {
3208 name = btf__name_by_offset(btf, m->name_off);
3209 if (str_is_empty(name))
3210 return -EINVAL;
3211
3212 spec->spec[spec->len].type_id = id;
3213 spec->spec[spec->len].idx = access_idx;
3214 spec->spec[spec->len].name = name;
3215 spec->len++;
3216 }
3217
3218 id = m->type;
3219 } else if (btf_is_array(t)) {
3220 const struct btf_array *a = btf_array(t);
3221
3222 t = skip_mods_and_typedefs(btf, a->type, &id);
3223 if (!t || access_idx >= a->nelems)
3224 return -EINVAL;
3225
3226 spec->spec[spec->len].type_id = id;
3227 spec->spec[spec->len].idx = access_idx;
3228 spec->len++;
3229
3230 sz = btf__resolve_size(btf, id);
3231 if (sz < 0)
3232 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003233 spec->bit_offset += access_idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003234 } else {
Kefeng Wangbe180102019-10-21 13:55:32 +08003235 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
3236 type_id, spec_str, i, id, btf_kind(t));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003237 return -EINVAL;
3238 }
3239 }
3240
3241 return 0;
3242}
3243
3244static bool bpf_core_is_flavor_sep(const char *s)
3245{
3246 /* check X___Y name pattern, where X and Y are not underscores */
3247 return s[0] != '_' && /* X */
3248 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
3249 s[4] != '_'; /* Y */
3250}
3251
3252/* Given 'some_struct_name___with_flavor' return the length of a name prefix
3253 * before last triple underscore. Struct name part after last triple
3254 * underscore is ignored by BPF CO-RE relocation during relocation matching.
3255 */
3256static size_t bpf_core_essential_name_len(const char *name)
3257{
3258 size_t n = strlen(name);
3259 int i;
3260
3261 for (i = n - 5; i >= 0; i--) {
3262 if (bpf_core_is_flavor_sep(name + i))
3263 return i + 1;
3264 }
3265 return n;
3266}
3267
3268/* dynamically sized list of type IDs */
3269struct ids_vec {
3270 __u32 *data;
3271 int len;
3272};
3273
3274static void bpf_core_free_cands(struct ids_vec *cand_ids)
3275{
3276 free(cand_ids->data);
3277 free(cand_ids);
3278}
3279
3280static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
3281 __u32 local_type_id,
3282 const struct btf *targ_btf)
3283{
3284 size_t local_essent_len, targ_essent_len;
3285 const char *local_name, *targ_name;
3286 const struct btf_type *t;
3287 struct ids_vec *cand_ids;
3288 __u32 *new_ids;
3289 int i, err, n;
3290
3291 t = btf__type_by_id(local_btf, local_type_id);
3292 if (!t)
3293 return ERR_PTR(-EINVAL);
3294
3295 local_name = btf__name_by_offset(local_btf, t->name_off);
3296 if (str_is_empty(local_name))
3297 return ERR_PTR(-EINVAL);
3298 local_essent_len = bpf_core_essential_name_len(local_name);
3299
3300 cand_ids = calloc(1, sizeof(*cand_ids));
3301 if (!cand_ids)
3302 return ERR_PTR(-ENOMEM);
3303
3304 n = btf__get_nr_types(targ_btf);
3305 for (i = 1; i <= n; i++) {
3306 t = btf__type_by_id(targ_btf, i);
3307 targ_name = btf__name_by_offset(targ_btf, t->name_off);
3308 if (str_is_empty(targ_name))
3309 continue;
3310
3311 targ_essent_len = bpf_core_essential_name_len(targ_name);
3312 if (targ_essent_len != local_essent_len)
3313 continue;
3314
3315 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
3316 pr_debug("[%d] %s: found candidate [%d] %s\n",
3317 local_type_id, local_name, i, targ_name);
3318 new_ids = realloc(cand_ids->data, cand_ids->len + 1);
3319 if (!new_ids) {
3320 err = -ENOMEM;
3321 goto err_out;
3322 }
3323 cand_ids->data = new_ids;
3324 cand_ids->data[cand_ids->len++] = i;
3325 }
3326 }
3327 return cand_ids;
3328err_out:
3329 bpf_core_free_cands(cand_ids);
3330 return ERR_PTR(err);
3331}
3332
3333/* Check two types for compatibility, skipping const/volatile/restrict and
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003334 * typedefs, to ensure we are relocating compatible entities:
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003335 * - any two STRUCTs/UNIONs are compatible and can be mixed;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003336 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003337 * - any two PTRs are always compatible;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003338 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
3339 * least one of enums should be anonymous;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003340 * - for ENUMs, check sizes, names are ignored;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003341 * - for INT, size and signedness are ignored;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003342 * - for ARRAY, dimensionality is ignored, element types are checked for
3343 * compatibility recursively;
3344 * - everything else shouldn't be ever a target of relocation.
3345 * These rules are not set in stone and probably will be adjusted as we get
3346 * more experience with using BPF CO-RE relocations.
3347 */
3348static int bpf_core_fields_are_compat(const struct btf *local_btf,
3349 __u32 local_id,
3350 const struct btf *targ_btf,
3351 __u32 targ_id)
3352{
3353 const struct btf_type *local_type, *targ_type;
3354
3355recur:
3356 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
3357 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
3358 if (!local_type || !targ_type)
3359 return -EINVAL;
3360
3361 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
3362 return 1;
3363 if (btf_kind(local_type) != btf_kind(targ_type))
3364 return 0;
3365
3366 switch (btf_kind(local_type)) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003367 case BTF_KIND_PTR:
3368 return 1;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003369 case BTF_KIND_FWD:
3370 case BTF_KIND_ENUM: {
3371 const char *local_name, *targ_name;
3372 size_t local_len, targ_len;
3373
3374 local_name = btf__name_by_offset(local_btf,
3375 local_type->name_off);
3376 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
3377 local_len = bpf_core_essential_name_len(local_name);
3378 targ_len = bpf_core_essential_name_len(targ_name);
3379 /* one of them is anonymous or both w/ same flavor-less names */
3380 return local_len == 0 || targ_len == 0 ||
3381 (local_len == targ_len &&
3382 strncmp(local_name, targ_name, local_len) == 0);
3383 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003384 case BTF_KIND_INT:
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003385 /* just reject deprecated bitfield-like integers; all other
3386 * integers are by default compatible between each other
3387 */
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003388 return btf_int_offset(local_type) == 0 &&
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003389 btf_int_offset(targ_type) == 0;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003390 case BTF_KIND_ARRAY:
3391 local_id = btf_array(local_type)->type;
3392 targ_id = btf_array(targ_type)->type;
3393 goto recur;
3394 default:
Kefeng Wangbe180102019-10-21 13:55:32 +08003395 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
3396 btf_kind(local_type), local_id, targ_id);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003397 return 0;
3398 }
3399}
3400
3401/*
3402 * Given single high-level named field accessor in local type, find
3403 * corresponding high-level accessor for a target type. Along the way,
3404 * maintain low-level spec for target as well. Also keep updating target
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003405 * bit offset.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003406 *
3407 * Searching is performed through recursive exhaustive enumeration of all
3408 * fields of a struct/union. If there are any anonymous (embedded)
3409 * structs/unions, they are recursively searched as well. If field with
3410 * desired name is found, check compatibility between local and target types,
3411 * before returning result.
3412 *
3413 * 1 is returned, if field is found.
3414 * 0 is returned if no compatible field is found.
3415 * <0 is returned on error.
3416 */
3417static int bpf_core_match_member(const struct btf *local_btf,
3418 const struct bpf_core_accessor *local_acc,
3419 const struct btf *targ_btf,
3420 __u32 targ_id,
3421 struct bpf_core_spec *spec,
3422 __u32 *next_targ_id)
3423{
3424 const struct btf_type *local_type, *targ_type;
3425 const struct btf_member *local_member, *m;
3426 const char *local_name, *targ_name;
3427 __u32 local_id;
3428 int i, n, found;
3429
3430 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
3431 if (!targ_type)
3432 return -EINVAL;
3433 if (!btf_is_composite(targ_type))
3434 return 0;
3435
3436 local_id = local_acc->type_id;
3437 local_type = btf__type_by_id(local_btf, local_id);
3438 local_member = btf_members(local_type) + local_acc->idx;
3439 local_name = btf__name_by_offset(local_btf, local_member->name_off);
3440
3441 n = btf_vlen(targ_type);
3442 m = btf_members(targ_type);
3443 for (i = 0; i < n; i++, m++) {
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003444 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003445
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003446 bit_offset = btf_member_bit_offset(targ_type, i);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003447
3448 /* too deep struct/union/array nesting */
3449 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
3450 return -E2BIG;
3451
3452 /* speculate this member will be the good one */
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003453 spec->bit_offset += bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003454 spec->raw_spec[spec->raw_len++] = i;
3455
3456 targ_name = btf__name_by_offset(targ_btf, m->name_off);
3457 if (str_is_empty(targ_name)) {
3458 /* embedded struct/union, we need to go deeper */
3459 found = bpf_core_match_member(local_btf, local_acc,
3460 targ_btf, m->type,
3461 spec, next_targ_id);
3462 if (found) /* either found or error */
3463 return found;
3464 } else if (strcmp(local_name, targ_name) == 0) {
3465 /* matching named field */
3466 struct bpf_core_accessor *targ_acc;
3467
3468 targ_acc = &spec->spec[spec->len++];
3469 targ_acc->type_id = targ_id;
3470 targ_acc->idx = i;
3471 targ_acc->name = targ_name;
3472
3473 *next_targ_id = m->type;
3474 found = bpf_core_fields_are_compat(local_btf,
3475 local_member->type,
3476 targ_btf, m->type);
3477 if (!found)
3478 spec->len--; /* pop accessor */
3479 return found;
3480 }
3481 /* member turned out not to be what we looked for */
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003482 spec->bit_offset -= bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003483 spec->raw_len--;
3484 }
3485
3486 return 0;
3487}
3488
3489/*
3490 * Try to match local spec to a target type and, if successful, produce full
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003491 * target spec (high-level, low-level + bit offset).
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003492 */
3493static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
3494 const struct btf *targ_btf, __u32 targ_id,
3495 struct bpf_core_spec *targ_spec)
3496{
3497 const struct btf_type *targ_type;
3498 const struct bpf_core_accessor *local_acc;
3499 struct bpf_core_accessor *targ_acc;
3500 int i, sz, matched;
3501
3502 memset(targ_spec, 0, sizeof(*targ_spec));
3503 targ_spec->btf = targ_btf;
3504
3505 local_acc = &local_spec->spec[0];
3506 targ_acc = &targ_spec->spec[0];
3507
3508 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
3509 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
3510 &targ_id);
3511 if (!targ_type)
3512 return -EINVAL;
3513
3514 if (local_acc->name) {
3515 matched = bpf_core_match_member(local_spec->btf,
3516 local_acc,
3517 targ_btf, targ_id,
3518 targ_spec, &targ_id);
3519 if (matched <= 0)
3520 return matched;
3521 } else {
3522 /* for i=0, targ_id is already treated as array element
3523 * type (because it's the original struct), for others
3524 * we should find array element type first
3525 */
3526 if (i > 0) {
3527 const struct btf_array *a;
3528
3529 if (!btf_is_array(targ_type))
3530 return 0;
3531
3532 a = btf_array(targ_type);
3533 if (local_acc->idx >= a->nelems)
3534 return 0;
3535 if (!skip_mods_and_typedefs(targ_btf, a->type,
3536 &targ_id))
3537 return -EINVAL;
3538 }
3539
3540 /* too deep struct/union/array nesting */
3541 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
3542 return -E2BIG;
3543
3544 targ_acc->type_id = targ_id;
3545 targ_acc->idx = local_acc->idx;
3546 targ_acc->name = NULL;
3547 targ_spec->len++;
3548 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
3549 targ_spec->raw_len++;
3550
3551 sz = btf__resolve_size(targ_btf, targ_id);
3552 if (sz < 0)
3553 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003554 targ_spec->bit_offset += local_acc->idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003555 }
3556 }
3557
3558 return 1;
3559}
3560
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003561static int bpf_core_calc_field_relo(const struct bpf_program *prog,
3562 const struct bpf_field_reloc *relo,
3563 const struct bpf_core_spec *spec,
3564 __u32 *val, bool *validate)
3565{
3566 const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
3567 const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
3568 __u32 byte_off, byte_sz, bit_off, bit_sz;
3569 const struct btf_member *m;
3570 const struct btf_type *mt;
3571 bool bitfield;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003572 __s64 sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003573
3574 /* a[n] accessor needs special handling */
3575 if (!acc->name) {
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003576 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
3577 *val = spec->bit_offset / 8;
3578 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
3579 sz = btf__resolve_size(spec->btf, acc->type_id);
3580 if (sz < 0)
3581 return -EINVAL;
3582 *val = sz;
3583 } else {
3584 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003585 bpf_program__title(prog, false),
3586 relo->kind, relo->insn_off / 8);
3587 return -EINVAL;
3588 }
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003589 if (validate)
3590 *validate = true;
3591 return 0;
3592 }
3593
3594 m = btf_members(t) + acc->idx;
3595 mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
3596 bit_off = spec->bit_offset;
3597 bit_sz = btf_member_bitfield_size(t, acc->idx);
3598
3599 bitfield = bit_sz > 0;
3600 if (bitfield) {
3601 byte_sz = mt->size;
3602 byte_off = bit_off / 8 / byte_sz * byte_sz;
3603 /* figure out smallest int size necessary for bitfield load */
3604 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
3605 if (byte_sz >= 8) {
3606 /* bitfield can't be read with 64-bit read */
3607 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
3608 bpf_program__title(prog, false),
3609 relo->kind, relo->insn_off / 8);
3610 return -E2BIG;
3611 }
3612 byte_sz *= 2;
3613 byte_off = bit_off / 8 / byte_sz * byte_sz;
3614 }
3615 } else {
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07003616 sz = btf__resolve_size(spec->btf, m->type);
3617 if (sz < 0)
3618 return -EINVAL;
3619 byte_sz = sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003620 byte_off = spec->bit_offset / 8;
3621 bit_sz = byte_sz * 8;
3622 }
3623
3624 /* for bitfields, all the relocatable aspects are ambiguous and we
3625 * might disagree with compiler, so turn off validation of expected
3626 * value, except for signedness
3627 */
3628 if (validate)
3629 *validate = !bitfield;
3630
3631 switch (relo->kind) {
3632 case BPF_FIELD_BYTE_OFFSET:
3633 *val = byte_off;
3634 break;
3635 case BPF_FIELD_BYTE_SIZE:
3636 *val = byte_sz;
3637 break;
3638 case BPF_FIELD_SIGNED:
3639 /* enums will be assumed unsigned */
3640 *val = btf_is_enum(mt) ||
3641 (btf_int_encoding(mt) & BTF_INT_SIGNED);
3642 if (validate)
3643 *validate = true; /* signedness is never ambiguous */
3644 break;
3645 case BPF_FIELD_LSHIFT_U64:
3646#if __BYTE_ORDER == __LITTLE_ENDIAN
3647 *val = 64 - (bit_off + bit_sz - byte_off * 8);
3648#else
3649 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
3650#endif
3651 break;
3652 case BPF_FIELD_RSHIFT_U64:
3653 *val = 64 - bit_sz;
3654 if (validate)
3655 *validate = true; /* right shift is never ambiguous */
3656 break;
3657 case BPF_FIELD_EXISTS:
3658 default:
3659 pr_warn("prog '%s': unknown relo %d at insn #%d\n",
3660 bpf_program__title(prog, false),
3661 relo->kind, relo->insn_off / 8);
3662 return -EINVAL;
3663 }
3664
3665 return 0;
3666}
3667
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003668/*
3669 * Patch relocatable BPF instruction.
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003670 *
3671 * Patched value is determined by relocation kind and target specification.
3672 * For field existence relocation target spec will be NULL if field is not
3673 * found.
3674 * Expected insn->imm value is determined using relocation kind and local
3675 * spec, and is checked before patching instruction. If actual insn->imm value
3676 * is wrong, bail out with error.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003677 *
3678 * Currently three kinds of BPF instructions are supported:
3679 * 1. rX = <imm> (assignment with immediate operand);
3680 * 2. rX += <imm> (arithmetic operations with immediate operand);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003681 */
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003682static int bpf_core_reloc_insn(struct bpf_program *prog,
3683 const struct bpf_field_reloc *relo,
3684 const struct bpf_core_spec *local_spec,
3685 const struct bpf_core_spec *targ_spec)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003686{
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003687 bool failed = false, validate = true;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003688 __u32 orig_val, new_val;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003689 struct bpf_insn *insn;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003690 int insn_idx, err;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003691 __u8 class;
3692
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003693 if (relo->insn_off % sizeof(struct bpf_insn))
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003694 return -EINVAL;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003695 insn_idx = relo->insn_off / sizeof(struct bpf_insn);
3696
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003697 if (relo->kind == BPF_FIELD_EXISTS) {
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003698 orig_val = 1; /* can't generate EXISTS relo w/o local field */
3699 new_val = targ_spec ? 1 : 0;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003700 } else if (!targ_spec) {
3701 failed = true;
3702 new_val = (__u32)-1;
3703 } else {
3704 err = bpf_core_calc_field_relo(prog, relo, local_spec,
3705 &orig_val, &validate);
3706 if (err)
3707 return err;
3708 err = bpf_core_calc_field_relo(prog, relo, targ_spec,
3709 &new_val, NULL);
3710 if (err)
3711 return err;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003712 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003713
3714 insn = &prog->insns[insn_idx];
3715 class = BPF_CLASS(insn->code);
3716
3717 if (class == BPF_ALU || class == BPF_ALU64) {
3718 if (BPF_SRC(insn->code) != BPF_K)
3719 return -EINVAL;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003720 if (!failed && validate && insn->imm != orig_val) {
3721 pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
3722 bpf_program__title(prog, false), insn_idx,
3723 insn->imm, orig_val, new_val);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003724 return -EINVAL;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003725 }
3726 orig_val = insn->imm;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003727 insn->imm = new_val;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003728 pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
3729 bpf_program__title(prog, false), insn_idx,
3730 failed ? " w/ failed reloc" : "", orig_val, new_val);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003731 } else {
Kefeng Wangbe180102019-10-21 13:55:32 +08003732 pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
3733 bpf_program__title(prog, false),
3734 insn_idx, insn->code, insn->src_reg, insn->dst_reg,
3735 insn->off, insn->imm);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003736 return -EINVAL;
3737 }
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003738
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003739 return 0;
3740}
3741
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003742static struct btf *btf_load_raw(const char *path)
3743{
3744 struct btf *btf;
3745 size_t read_cnt;
3746 struct stat st;
3747 void *data;
3748 FILE *f;
3749
3750 if (stat(path, &st))
3751 return ERR_PTR(-errno);
3752
3753 data = malloc(st.st_size);
3754 if (!data)
3755 return ERR_PTR(-ENOMEM);
3756
3757 f = fopen(path, "rb");
3758 if (!f) {
3759 btf = ERR_PTR(-errno);
3760 goto cleanup;
3761 }
3762
3763 read_cnt = fread(data, 1, st.st_size, f);
3764 fclose(f);
3765 if (read_cnt < st.st_size) {
3766 btf = ERR_PTR(-EBADF);
3767 goto cleanup;
3768 }
3769
3770 btf = btf__new(data, read_cnt);
3771
3772cleanup:
3773 free(data);
3774 return btf;
3775}
3776
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003777/*
3778 * Probe few well-known locations for vmlinux kernel image and try to load BTF
3779 * data out of it to use for target BTF.
3780 */
3781static struct btf *bpf_core_find_kernel_btf(void)
3782{
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003783 struct {
3784 const char *path_fmt;
3785 bool raw_btf;
3786 } locations[] = {
3787 /* try canonical vmlinux BTF through sysfs first */
3788 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
3789 /* fall back to trying to find vmlinux ELF on disk otherwise */
3790 { "/boot/vmlinux-%1$s" },
3791 { "/lib/modules/%1$s/vmlinux-%1$s" },
3792 { "/lib/modules/%1$s/build/vmlinux" },
3793 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
3794 { "/usr/lib/debug/boot/vmlinux-%1$s" },
3795 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
3796 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003797 };
3798 char path[PATH_MAX + 1];
3799 struct utsname buf;
3800 struct btf *btf;
3801 int i;
3802
3803 uname(&buf);
3804
3805 for (i = 0; i < ARRAY_SIZE(locations); i++) {
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003806 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003807
3808 if (access(path, R_OK))
3809 continue;
3810
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003811 if (locations[i].raw_btf)
3812 btf = btf_load_raw(path);
3813 else
3814 btf = btf__parse_elf(path, NULL);
3815
3816 pr_debug("loading kernel BTF '%s': %ld\n",
3817 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003818 if (IS_ERR(btf))
3819 continue;
3820
3821 return btf;
3822 }
3823
Kefeng Wangbe180102019-10-21 13:55:32 +08003824 pr_warn("failed to find valid kernel BTF\n");
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003825 return ERR_PTR(-ESRCH);
3826}
3827
3828/* Output spec definition in the format:
3829 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
3830 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
3831 */
3832static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
3833{
3834 const struct btf_type *t;
3835 const char *s;
3836 __u32 type_id;
3837 int i;
3838
3839 type_id = spec->spec[0].type_id;
3840 t = btf__type_by_id(spec->btf, type_id);
3841 s = btf__name_by_offset(spec->btf, t->name_off);
3842 libbpf_print(level, "[%u] %s + ", type_id, s);
3843
3844 for (i = 0; i < spec->raw_len; i++)
3845 libbpf_print(level, "%d%s", spec->raw_spec[i],
3846 i == spec->raw_len - 1 ? " => " : ":");
3847
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003848 libbpf_print(level, "%u.%u @ &x",
3849 spec->bit_offset / 8, spec->bit_offset % 8);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003850
3851 for (i = 0; i < spec->len; i++) {
3852 if (spec->spec[i].name)
3853 libbpf_print(level, ".%s", spec->spec[i].name);
3854 else
3855 libbpf_print(level, "[%u]", spec->spec[i].idx);
3856 }
3857
3858}
3859
3860static size_t bpf_core_hash_fn(const void *key, void *ctx)
3861{
3862 return (size_t)key;
3863}
3864
3865static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
3866{
3867 return k1 == k2;
3868}
3869
3870static void *u32_as_hash_key(__u32 x)
3871{
3872 return (void *)(uintptr_t)x;
3873}
3874
3875/*
3876 * CO-RE relocate single instruction.
3877 *
3878 * The outline and important points of the algorithm:
3879 * 1. For given local type, find corresponding candidate target types.
3880 * Candidate type is a type with the same "essential" name, ignoring
3881 * everything after last triple underscore (___). E.g., `sample`,
3882 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
3883 * for each other. Names with triple underscore are referred to as
3884 * "flavors" and are useful, among other things, to allow to
3885 * specify/support incompatible variations of the same kernel struct, which
3886 * might differ between different kernel versions and/or build
3887 * configurations.
3888 *
3889 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
3890 * converter, when deduplicated BTF of a kernel still contains more than
3891 * one different types with the same name. In that case, ___2, ___3, etc
3892 * are appended starting from second name conflict. But start flavors are
3893 * also useful to be defined "locally", in BPF program, to extract same
3894 * data from incompatible changes between different kernel
3895 * versions/configurations. For instance, to handle field renames between
3896 * kernel versions, one can use two flavors of the struct name with the
3897 * same common name and use conditional relocations to extract that field,
3898 * depending on target kernel version.
3899 * 2. For each candidate type, try to match local specification to this
3900 * candidate target type. Matching involves finding corresponding
3901 * high-level spec accessors, meaning that all named fields should match,
3902 * as well as all array accesses should be within the actual bounds. Also,
3903 * types should be compatible (see bpf_core_fields_are_compat for details).
3904 * 3. It is supported and expected that there might be multiple flavors
3905 * matching the spec. As long as all the specs resolve to the same set of
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003906 * offsets across all candidates, there is no error. If there is any
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003907 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
3908 * imprefection of BTF deduplication, which can cause slight duplication of
3909 * the same BTF type, if some directly or indirectly referenced (by
3910 * pointer) type gets resolved to different actual types in different
3911 * object files. If such situation occurs, deduplicated BTF will end up
3912 * with two (or more) structurally identical types, which differ only in
3913 * types they refer to through pointer. This should be OK in most cases and
3914 * is not an error.
3915 * 4. Candidate types search is performed by linearly scanning through all
3916 * types in target BTF. It is anticipated that this is overall more
3917 * efficient memory-wise and not significantly worse (if not better)
3918 * CPU-wise compared to prebuilding a map from all local type names to
3919 * a list of candidate type names. It's also sped up by caching resolved
3920 * list of matching candidates per each local "root" type ID, that has at
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003921 * least one bpf_field_reloc associated with it. This list is shared
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003922 * between multiple relocations for the same type ID and is updated as some
3923 * of the candidates are pruned due to structural incompatibility.
3924 */
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003925static int bpf_core_reloc_field(struct bpf_program *prog,
3926 const struct bpf_field_reloc *relo,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003927 int relo_idx,
3928 const struct btf *local_btf,
3929 const struct btf *targ_btf,
3930 struct hashmap *cand_cache)
3931{
3932 const char *prog_name = bpf_program__title(prog, false);
3933 struct bpf_core_spec local_spec, cand_spec, targ_spec;
3934 const void *type_key = u32_as_hash_key(relo->type_id);
3935 const struct btf_type *local_type, *cand_type;
3936 const char *local_name, *cand_name;
3937 struct ids_vec *cand_ids;
3938 __u32 local_id, cand_id;
3939 const char *spec_str;
3940 int i, j, err;
3941
3942 local_id = relo->type_id;
3943 local_type = btf__type_by_id(local_btf, local_id);
3944 if (!local_type)
3945 return -EINVAL;
3946
3947 local_name = btf__name_by_offset(local_btf, local_type->name_off);
3948 if (str_is_empty(local_name))
3949 return -EINVAL;
3950
3951 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
3952 if (str_is_empty(spec_str))
3953 return -EINVAL;
3954
3955 err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
3956 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003957 pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
3958 prog_name, relo_idx, local_id, local_name, spec_str,
3959 err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003960 return -EINVAL;
3961 }
3962
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003963 pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
3964 relo->kind);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003965 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
3966 libbpf_print(LIBBPF_DEBUG, "\n");
3967
3968 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
3969 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
3970 if (IS_ERR(cand_ids)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003971 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
3972 prog_name, relo_idx, local_id, local_name,
3973 PTR_ERR(cand_ids));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003974 return PTR_ERR(cand_ids);
3975 }
3976 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
3977 if (err) {
3978 bpf_core_free_cands(cand_ids);
3979 return err;
3980 }
3981 }
3982
3983 for (i = 0, j = 0; i < cand_ids->len; i++) {
3984 cand_id = cand_ids->data[i];
3985 cand_type = btf__type_by_id(targ_btf, cand_id);
3986 cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
3987
3988 err = bpf_core_spec_match(&local_spec, targ_btf,
3989 cand_id, &cand_spec);
3990 pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
3991 prog_name, relo_idx, i, cand_name);
3992 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
3993 libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
3994 if (err < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003995 pr_warn("prog '%s': relo #%d: matching error: %d\n",
3996 prog_name, relo_idx, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003997 return err;
3998 }
3999 if (err == 0)
4000 continue;
4001
4002 if (j == 0) {
4003 targ_spec = cand_spec;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004004 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004005 /* if there are many candidates, they should all
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004006 * resolve to the same bit offset
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004007 */
Kefeng Wangbe180102019-10-21 13:55:32 +08004008 pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07004009 prog_name, relo_idx, cand_spec.bit_offset,
4010 targ_spec.bit_offset);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004011 return -EINVAL;
4012 }
4013
4014 cand_ids->data[j++] = cand_spec.spec[0].type_id;
4015 }
4016
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004017 /*
4018 * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
4019 * requested, it's expected that we might not find any candidates.
4020 * In this case, if field wasn't found in any candidate, the list of
4021 * candidates shouldn't change at all, we'll just handle relocating
4022 * appropriately, depending on relo's kind.
4023 */
4024 if (j > 0)
4025 cand_ids->len = j;
4026
4027 if (j == 0 && !prog->obj->relaxed_core_relocs &&
4028 relo->kind != BPF_FIELD_EXISTS) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004029 pr_warn("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
4030 prog_name, relo_idx, local_id, local_name, spec_str);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004031 return -ESRCH;
4032 }
4033
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004034 /* bpf_core_reloc_insn should know how to handle missing targ_spec */
4035 err = bpf_core_reloc_insn(prog, relo, &local_spec,
4036 j ? &targ_spec : NULL);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004037 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004038 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
4039 prog_name, relo_idx, relo->insn_off, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004040 return -EINVAL;
4041 }
4042
4043 return 0;
4044}
4045
4046static int
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004047bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004048{
4049 const struct btf_ext_info_sec *sec;
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004050 const struct bpf_field_reloc *rec;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004051 const struct btf_ext_info *seg;
4052 struct hashmap_entry *entry;
4053 struct hashmap *cand_cache = NULL;
4054 struct bpf_program *prog;
4055 struct btf *targ_btf;
4056 const char *sec_name;
4057 int i, err = 0;
4058
4059 if (targ_btf_path)
4060 targ_btf = btf__parse_elf(targ_btf_path, NULL);
4061 else
4062 targ_btf = bpf_core_find_kernel_btf();
4063 if (IS_ERR(targ_btf)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004064 pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004065 return PTR_ERR(targ_btf);
4066 }
4067
4068 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
4069 if (IS_ERR(cand_cache)) {
4070 err = PTR_ERR(cand_cache);
4071 goto out;
4072 }
4073
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004074 seg = &obj->btf_ext->field_reloc_info;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004075 for_each_btf_ext_sec(seg, sec) {
4076 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
4077 if (str_is_empty(sec_name)) {
4078 err = -EINVAL;
4079 goto out;
4080 }
4081 prog = bpf_object__find_program_by_title(obj, sec_name);
4082 if (!prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004083 pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
4084 sec_name);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004085 err = -EINVAL;
4086 goto out;
4087 }
4088
4089 pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
4090 sec_name, sec->num_info);
4091
4092 for_each_btf_ext_rec(seg, sec, i, rec) {
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004093 err = bpf_core_reloc_field(prog, rec, i, obj->btf,
4094 targ_btf, cand_cache);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004095 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004096 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
4097 sec_name, i, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004098 goto out;
4099 }
4100 }
4101 }
4102
4103out:
4104 btf__free(targ_btf);
4105 if (!IS_ERR_OR_NULL(cand_cache)) {
4106 hashmap__for_each_entry(cand_cache, entry, i) {
4107 bpf_core_free_cands(entry->value);
4108 }
4109 hashmap__free(cand_cache);
4110 }
4111 return err;
4112}
4113
4114static int
4115bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
4116{
4117 int err = 0;
4118
Andrii Nakryiko511bb002019-10-15 11:28:45 -07004119 if (obj->btf_ext->field_reloc_info.len)
4120 err = bpf_core_reloc_fields(obj, targ_btf_path);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004121
4122 return err;
4123}
4124
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004125static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004126bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
4127 struct reloc_desc *relo)
4128{
4129 struct bpf_insn *insn, *new_insn;
4130 struct bpf_program *text;
4131 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08004132 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004133
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004134 if (prog->idx == obj->efile.text_shndx) {
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004135 pr_warn("relo in .text insn %d into off %d (insn #%d)\n",
4136 relo->insn_idx, relo->sym_off, relo->sym_off / 8);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004137 return -LIBBPF_ERRNO__RELOC;
4138 }
4139
4140 if (prog->main_prog_cnt == 0) {
4141 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
4142 if (!text) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004143 pr_warn("no .text section found yet relo into text exist\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004144 return -LIBBPF_ERRNO__RELOC;
4145 }
4146 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07004147 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004148 if (!new_insn) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004149 pr_warn("oom in prog realloc\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004150 return -ENOMEM;
4151 }
Andrii Nakryiko3dc5e052019-11-06 18:08:51 -08004152 prog->insns = new_insn;
Yonghong Song2993e052018-11-19 15:29:16 -08004153
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004154 if (obj->btf_ext) {
4155 err = bpf_program_reloc_btf_ext(prog, obj,
4156 text->section_name,
4157 prog->insns_cnt);
4158 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08004159 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08004160 }
4161
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004162 memcpy(new_insn + prog->insns_cnt, text->insns,
4163 text->insns_cnt * sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004164 prog->main_prog_cnt = prog->insns_cnt;
4165 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00004166 pr_debug("added %zd insn from %s to prog %s\n",
4167 text->insns_cnt, text->section_name,
4168 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004169 }
4170 insn = &prog->insns[relo->insn_idx];
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004171 insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004172 return 0;
4173}
4174
4175static int
Wang Nan9d759a92015-11-27 08:47:35 +00004176bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00004177{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004178 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00004179
Yonghong Song2993e052018-11-19 15:29:16 -08004180 if (!prog)
4181 return 0;
4182
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004183 if (obj->btf_ext) {
4184 err = bpf_program_reloc_btf_ext(prog, obj,
4185 prog->section_name, 0);
4186 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08004187 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08004188 }
4189
4190 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00004191 return 0;
4192
4193 for (i = 0; i < prog->nr_reloc; i++) {
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004194 struct reloc_desc *relo = &prog->reloc_desc[i];
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004195 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
Wang Nan8a47a6c2015-07-01 02:14:05 +00004196
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004197 if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
4198 pr_warn("relocation out of range: '%s'\n",
4199 prog->section_name);
4200 return -LIBBPF_ERRNO__RELOC;
4201 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00004202
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004203 switch (relo->type) {
4204 case RELO_LD64:
4205 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004206 insn[0].imm = obj->maps[relo->map_idx].fd;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004207 break;
4208 case RELO_DATA:
4209 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
4210 insn[1].imm = insn[0].imm + relo->sym_off;
4211 insn[0].imm = obj->maps[relo->map_idx].fd;
4212 break;
4213 case RELO_EXTERN:
4214 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
4215 insn[0].imm = obj->maps[obj->extern_map_idx].fd;
4216 insn[1].imm = relo->sym_off;
4217 break;
4218 case RELO_CALL:
Andrii Nakryiko53f8dd42019-11-27 12:06:50 -08004219 err = bpf_program__reloc_text(prog, obj, relo);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004220 if (err)
4221 return err;
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004222 break;
4223 default:
4224 pr_warn("relo #%d: bad relo type %d\n", i, relo->type);
4225 return -EINVAL;
Wang Nan8a47a6c2015-07-01 02:14:05 +00004226 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00004227 }
4228
4229 zfree(&prog->reloc_desc);
4230 prog->nr_reloc = 0;
4231 return 0;
4232}
4233
Wang Nan8a47a6c2015-07-01 02:14:05 +00004234static int
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004235bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
Wang Nan8a47a6c2015-07-01 02:14:05 +00004236{
4237 struct bpf_program *prog;
4238 size_t i;
4239 int err;
4240
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004241 if (obj->btf_ext) {
4242 err = bpf_object__relocate_core(obj, targ_btf_path);
4243 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004244 pr_warn("failed to perform CO-RE relocations: %d\n",
4245 err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004246 return err;
4247 }
4248 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00004249 for (i = 0; i < obj->nr_programs; i++) {
4250 prog = &obj->programs[i];
4251
Wang Nan9d759a92015-11-27 08:47:35 +00004252 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00004253 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004254 pr_warn("failed to relocate '%s'\n", prog->section_name);
Wang Nan8a47a6c2015-07-01 02:14:05 +00004255 return err;
4256 }
4257 }
4258 return 0;
4259}
4260
Wang Nan34090912015-07-01 02:14:02 +00004261static int bpf_object__collect_reloc(struct bpf_object *obj)
4262{
4263 int i, err;
4264
4265 if (!obj_elf_valid(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004266 pr_warn("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00004267 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00004268 }
4269
Andrii Nakryiko1f8e2bc2019-11-20 23:07:41 -08004270 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
4271 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
4272 Elf_Data *data = obj->efile.reloc_sects[i].data;
Wang Nan34090912015-07-01 02:14:02 +00004273 int idx = shdr->sh_info;
4274 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00004275
4276 if (shdr->sh_type != SHT_REL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004277 pr_warn("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004278 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00004279 }
4280
4281 prog = bpf_object__find_prog_by_idx(obj, idx);
4282 if (!prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004283 pr_warn("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004284 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00004285 }
4286
Andrii Nakryiko399dc652019-05-29 10:36:11 -07004287 err = bpf_program__collect_reloc(prog, shdr, data, obj);
Wang Nan34090912015-07-01 02:14:02 +00004288 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00004289 return err;
Wang Nan34090912015-07-01 02:14:02 +00004290 }
4291 return 0;
4292}
4293
Wang Nan55cffde2015-07-01 02:14:07 +00004294static int
Yonghong Song2993e052018-11-19 15:29:16 -08004295load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004296 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00004297{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004298 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004299 char *cp, errmsg[STRERR_BUFSIZE];
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004300 int log_buf_size = BPF_LOG_BUF_SIZE;
Wang Nan55cffde2015-07-01 02:14:07 +00004301 char *log_buf;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07004302 int btf_fd, ret;
Wang Nan55cffde2015-07-01 02:14:07 +00004303
Andrii Nakryikofba01a02019-05-29 10:36:08 -07004304 if (!insns || !insns_cnt)
4305 return -EINVAL;
4306
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004307 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08004308 load_attr.prog_type = prog->type;
4309 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08004310 if (prog->caps->name)
4311 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004312 load_attr.insns = insns;
4313 load_attr.insns_cnt = insns_cnt;
4314 load_attr.license = license;
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08004315 if (prog->type == BPF_PROG_TYPE_TRACING) {
4316 load_attr.attach_prog_fd = prog->attach_prog_fd;
4317 load_attr.attach_btf_id = prog->attach_btf_id;
4318 } else {
4319 load_attr.kern_version = kern_version;
4320 load_attr.prog_ifindex = prog->prog_ifindex;
4321 }
Andrii Nakryiko3415ec62019-08-01 00:24:05 -07004322 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
4323 if (prog->obj->btf_ext)
4324 btf_fd = bpf_object__btf_fd(prog->obj);
4325 else
4326 btf_fd = -1;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07004327 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08004328 load_attr.func_info = prog->func_info;
4329 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08004330 load_attr.func_info_cnt = prog->func_info_cnt;
Martin KaFai Lau3d650142018-12-07 16:42:31 -08004331 load_attr.line_info = prog->line_info;
4332 load_attr.line_info_rec_size = prog->line_info_rec_size;
4333 load_attr.line_info_cnt = prog->line_info_cnt;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004334 load_attr.log_level = prog->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01004335 load_attr.prog_flags = prog->prog_flags;
Wang Nan55cffde2015-07-01 02:14:07 +00004336
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004337retry_load:
4338 log_buf = malloc(log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00004339 if (!log_buf)
Kefeng Wangbe180102019-10-21 13:55:32 +08004340 pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
Wang Nan55cffde2015-07-01 02:14:07 +00004341
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004342 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00004343
4344 if (ret >= 0) {
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004345 if (load_attr.log_level)
4346 pr_debug("verifier log:\n%s", log_buf);
Wang Nan55cffde2015-07-01 02:14:07 +00004347 *pfd = ret;
4348 ret = 0;
4349 goto out;
4350 }
4351
Alexei Starovoitovda11b412019-04-01 21:27:47 -07004352 if (errno == ENOSPC) {
4353 log_buf_size <<= 1;
4354 free(log_buf);
4355 goto retry_load;
4356 }
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004357 ret = -errno;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004358 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08004359 pr_warn("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00004360
Wang Nan6371ca3b2015-11-06 13:49:37 +00004361 if (log_buf && log_buf[0] != '\0') {
4362 ret = -LIBBPF_ERRNO__VERIFY;
Kefeng Wangbe180102019-10-21 13:55:32 +08004363 pr_warn("-- BEGIN DUMP LOG ---\n");
4364 pr_warn("\n%s\n", log_buf);
4365 pr_warn("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004366 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004367 pr_warn("Program too large (%zu insns), at most %d insns\n",
4368 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00004369 ret = -LIBBPF_ERRNO__PROG2BIG;
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004370 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00004371 /* Wrong program type? */
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004372 int fd;
Wang Nan705fa212016-07-13 10:44:02 +00004373
Toke Høiland-Jørgensen4f33ddb2019-11-09 21:37:29 +01004374 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
4375 load_attr.expected_attach_type = 0;
4376 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
4377 if (fd >= 0) {
4378 close(fd);
4379 ret = -LIBBPF_ERRNO__PROGTYPE;
4380 goto out;
Wang Nan6371ca3b2015-11-06 13:49:37 +00004381 }
Wang Nan55cffde2015-07-01 02:14:07 +00004382 }
4383
4384out:
4385 free(log_buf);
4386 return ret;
4387}
4388
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004389static int libbpf_find_attach_btf_id(const char *name,
4390 enum bpf_attach_type attach_type,
4391 __u32 attach_prog_fd);
4392
4393int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
Wang Nan55cffde2015-07-01 02:14:07 +00004394{
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004395 int err = 0, fd, i, btf_id;
4396
4397 if (prog->type == BPF_PROG_TYPE_TRACING) {
4398 btf_id = libbpf_find_attach_btf_id(prog->section_name,
4399 prog->expected_attach_type,
4400 prog->attach_prog_fd);
4401 if (btf_id <= 0)
4402 return btf_id;
4403 prog->attach_btf_id = btf_id;
4404 }
Wang Nan55cffde2015-07-01 02:14:07 +00004405
Wang Nanb5805632015-11-16 12:10:09 +00004406 if (prog->instances.nr < 0 || !prog->instances.fds) {
4407 if (prog->preprocessor) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004408 pr_warn("Internal error: can't load program '%s'\n",
4409 prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00004410 return -LIBBPF_ERRNO__INTERNAL;
4411 }
Wang Nan55cffde2015-07-01 02:14:07 +00004412
Wang Nanb5805632015-11-16 12:10:09 +00004413 prog->instances.fds = malloc(sizeof(int));
4414 if (!prog->instances.fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004415 pr_warn("Not enough memory for BPF fds\n");
Wang Nanb5805632015-11-16 12:10:09 +00004416 return -ENOMEM;
4417 }
4418 prog->instances.nr = 1;
4419 prog->instances.fds[0] = -1;
4420 }
4421
4422 if (!prog->preprocessor) {
4423 if (prog->instances.nr != 1) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004424 pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
4425 prog->section_name, prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +00004426 }
Yonghong Song2993e052018-11-19 15:29:16 -08004427 err = load_program(prog, prog->insns, prog->insns_cnt,
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004428 license, kern_ver, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00004429 if (!err)
4430 prog->instances.fds[0] = fd;
4431 goto out;
4432 }
4433
4434 for (i = 0; i < prog->instances.nr; i++) {
4435 struct bpf_prog_prep_result result;
4436 bpf_program_prep_t preprocessor = prog->preprocessor;
4437
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -08004438 memset(&result, 0, sizeof(result));
Wang Nanb5805632015-11-16 12:10:09 +00004439 err = preprocessor(prog, i, prog->insns,
4440 prog->insns_cnt, &result);
4441 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004442 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
4443 i, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00004444 goto out;
4445 }
4446
4447 if (!result.new_insn_ptr || !result.new_insn_cnt) {
4448 pr_debug("Skip loading the %dth instance of program '%s'\n",
4449 i, prog->section_name);
4450 prog->instances.fds[i] = -1;
4451 if (result.pfd)
4452 *result.pfd = -1;
4453 continue;
4454 }
4455
Yonghong Song2993e052018-11-19 15:29:16 -08004456 err = load_program(prog, result.new_insn_ptr,
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004457 result.new_insn_cnt, license, kern_ver, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00004458 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004459 pr_warn("Loading the %dth instance of program '%s' failed\n",
4460 i, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00004461 goto out;
4462 }
4463
4464 if (result.pfd)
4465 *result.pfd = fd;
4466 prog->instances.fds[i] = fd;
4467 }
4468out:
Wang Nan55cffde2015-07-01 02:14:07 +00004469 if (err)
Kefeng Wangbe180102019-10-21 13:55:32 +08004470 pr_warn("failed to load program '%s'\n", prog->section_name);
Wang Nan55cffde2015-07-01 02:14:07 +00004471 zfree(&prog->insns);
4472 prog->insns_cnt = 0;
4473 return err;
4474}
4475
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004476static bool bpf_program__is_function_storage(const struct bpf_program *prog,
4477 const struct bpf_object *obj)
Jakub Kicinski9a94f272018-06-28 14:41:38 -07004478{
4479 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
4480}
4481
Wang Nan55cffde2015-07-01 02:14:07 +00004482static int
Quentin Monnet60276f92019-05-24 11:36:47 +01004483bpf_object__load_progs(struct bpf_object *obj, int log_level)
Wang Nan55cffde2015-07-01 02:14:07 +00004484{
4485 size_t i;
4486 int err;
4487
4488 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07004489 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08004490 continue;
Quentin Monnet501b1252019-05-29 15:26:41 +01004491 obj->programs[i].log_level |= log_level;
Wang Nan55cffde2015-07-01 02:14:07 +00004492 err = bpf_program__load(&obj->programs[i],
4493 obj->license,
4494 obj->kern_version);
4495 if (err)
4496 return err;
4497 }
4498 return 0;
4499}
4500
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004501static struct bpf_object *
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07004502__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08004503 const struct bpf_object_open_opts *opts)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004504{
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004505 const char *obj_name, *kconfig_path;
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07004506 struct bpf_program *prog;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004507 struct bpf_object *obj;
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004508 char tmp_name[64];
Wang Nan6371ca3b2015-11-06 13:49:37 +00004509 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004510
4511 if (elf_version(EV_CURRENT) == EV_NONE) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004512 pr_warn("failed to init libelf for %s\n",
4513 path ? : "(mem buf)");
Wang Nan6371ca3b2015-11-06 13:49:37 +00004514 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004515 }
4516
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004517 if (!OPTS_VALID(opts, bpf_object_open_opts))
4518 return ERR_PTR(-EINVAL);
4519
Andrii Nakryiko1aace102019-11-21 16:35:27 -08004520 obj_name = OPTS_GET(opts, object_name, NULL);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004521 if (obj_buf) {
4522 if (!obj_name) {
4523 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
4524 (unsigned long)obj_buf,
4525 (unsigned long)obj_buf_sz);
4526 obj_name = tmp_name;
4527 }
4528 path = obj_name;
4529 pr_debug("loading object '%s' from buffer\n", obj_name);
4530 }
4531
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004532 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004533 if (IS_ERR(obj))
4534 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004535
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07004536 obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004537 kconfig_path = OPTS_GET(opts, kconfig_path, NULL);
4538 if (kconfig_path) {
4539 obj->kconfig_path = strdup(kconfig_path);
4540 if (!obj->kconfig_path)
4541 return ERR_PTR(-ENOMEM);
4542 }
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004543
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08004544 err = bpf_object__elf_init(obj);
4545 err = err ? : bpf_object__check_endianness(obj);
4546 err = err ? : bpf_object__elf_collect(obj);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004547 err = err ? : bpf_object__collect_externs(obj);
4548 err = err ? : bpf_object__finalize_btf(obj);
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08004549 err = err ? : bpf_object__init_maps(obj, opts);
4550 err = err ? : bpf_object__init_prog_names(obj);
4551 err = err ? : bpf_object__collect_reloc(obj);
4552 if (err)
4553 goto out;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004554 bpf_object__elf_finish(obj);
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07004555
4556 bpf_object__for_each_program(prog, obj) {
4557 enum bpf_prog_type prog_type;
4558 enum bpf_attach_type attach_type;
4559
4560 err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
4561 &attach_type);
4562 if (err == -ESRCH)
4563 /* couldn't guess, but user might manually specify */
4564 continue;
4565 if (err)
4566 goto out;
4567
4568 bpf_program__set_type(prog, prog_type);
4569 bpf_program__set_expected_attach_type(prog, attach_type);
Andrii Nakryiko13acb502019-12-13 17:43:34 -08004570 if (prog_type == BPF_PROG_TYPE_TRACING)
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004571 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07004572 }
4573
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004574 return obj;
4575out:
4576 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004577 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004578}
4579
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07004580static struct bpf_object *
4581__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004582{
Andrii Nakryikoe00aca62019-10-22 10:21:00 -07004583 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004584 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
4585 );
4586
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004587 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07004588 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004589 return NULL;
4590
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07004591 pr_debug("loading %s\n", attr->file);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004592 return __bpf_object__open(attr->file, NULL, 0, &opts);
John Fastabendc034a172018-10-15 11:19:55 -07004593}
4594
4595struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
4596{
4597 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07004598}
4599
4600struct bpf_object *bpf_object__open(const char *path)
4601{
4602 struct bpf_object_open_attr attr = {
4603 .file = path,
4604 .prog_type = BPF_PROG_TYPE_UNSPEC,
4605 };
4606
4607 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00004608}
4609
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004610struct bpf_object *
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08004611bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004612{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004613 if (!path)
4614 return ERR_PTR(-EINVAL);
4615
4616 pr_debug("loading %s\n", path);
4617
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004618 return __bpf_object__open(path, NULL, 0, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004619}
4620
4621struct bpf_object *
4622bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08004623 const struct bpf_object_open_opts *opts)
Wang Nan6c956392015-07-01 02:13:54 +00004624{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004625 if (!obj_buf || obj_buf_sz == 0)
4626 return ERR_PTR(-EINVAL);
Wang Nan6c956392015-07-01 02:13:54 +00004627
Andrii Nakryiko291ee022019-10-15 11:28:46 -07004628 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004629}
4630
4631struct bpf_object *
4632bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
4633 const char *name)
4634{
Andrii Nakryikoe00aca62019-10-22 10:21:00 -07004635 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07004636 .object_name = name,
4637 /* wrong default, but backwards-compatible */
4638 .relaxed_maps = true,
4639 );
4640
4641 /* returning NULL is wrong, but backwards-compatible */
4642 if (!obj_buf || obj_buf_sz == 0)
4643 return NULL;
4644
4645 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004646}
4647
Wang Nan52d33522015-07-01 02:14:04 +00004648int bpf_object__unload(struct bpf_object *obj)
4649{
4650 size_t i;
4651
4652 if (!obj)
4653 return -EINVAL;
4654
Wang Nan9d759a92015-11-27 08:47:35 +00004655 for (i = 0; i < obj->nr_maps; i++)
4656 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00004657
Wang Nan55cffde2015-07-01 02:14:07 +00004658 for (i = 0; i < obj->nr_programs; i++)
4659 bpf_program__unload(&obj->programs[i]);
4660
Wang Nan52d33522015-07-01 02:14:04 +00004661 return 0;
4662}
4663
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08004664static int bpf_object__sanitize_maps(struct bpf_object *obj)
4665{
4666 struct bpf_map *m;
4667
4668 bpf_object__for_each_map(m, obj) {
4669 if (!bpf_map__is_internal(m))
4670 continue;
4671 if (!obj->caps.global_data) {
4672 pr_warn("kernel doesn't support global data\n");
4673 return -ENOTSUP;
4674 }
4675 if (!obj->caps.array_mmap)
4676 m->def.map_flags ^= BPF_F_MMAPABLE;
4677 }
4678
4679 return 0;
4680}
4681
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004682static int bpf_object__resolve_externs(struct bpf_object *obj,
4683 const char *config_path)
4684{
4685 bool need_config = false;
4686 struct extern_desc *ext;
4687 int err, i;
4688 void *data;
4689
4690 if (obj->nr_extern == 0)
4691 return 0;
4692
4693 data = obj->maps[obj->extern_map_idx].mmaped;
4694
4695 for (i = 0; i < obj->nr_extern; i++) {
4696 ext = &obj->externs[i];
4697
4698 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
4699 void *ext_val = data + ext->data_off;
4700 __u32 kver = get_kernel_version();
4701
4702 if (!kver) {
4703 pr_warn("failed to get kernel version\n");
4704 return -EINVAL;
4705 }
4706 err = set_ext_value_num(ext, ext_val, kver);
4707 if (err)
4708 return err;
4709 pr_debug("extern %s=0x%x\n", ext->name, kver);
4710 } else if (strncmp(ext->name, "CONFIG_", 7) == 0) {
4711 need_config = true;
4712 } else {
4713 pr_warn("unrecognized extern '%s'\n", ext->name);
4714 return -EINVAL;
4715 }
4716 }
4717 if (need_config) {
4718 err = bpf_object__read_kernel_config(obj, config_path, data);
4719 if (err)
4720 return -EINVAL;
4721 }
4722 for (i = 0; i < obj->nr_extern; i++) {
4723 ext = &obj->externs[i];
4724
4725 if (!ext->is_set && !ext->is_weak) {
4726 pr_warn("extern %s (strong) not resolved\n", ext->name);
4727 return -ESRCH;
4728 } else if (!ext->is_set) {
4729 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
4730 ext->name);
4731 }
4732 }
4733
4734 return 0;
4735}
4736
Quentin Monnet60276f92019-05-24 11:36:47 +01004737int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
Wang Nan52d33522015-07-01 02:14:04 +00004738{
Quentin Monnet60276f92019-05-24 11:36:47 +01004739 struct bpf_object *obj;
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +01004740 int err, i;
Wang Nan6371ca3b2015-11-06 13:49:37 +00004741
Quentin Monnet60276f92019-05-24 11:36:47 +01004742 if (!attr)
4743 return -EINVAL;
4744 obj = attr->obj;
Wang Nan52d33522015-07-01 02:14:04 +00004745 if (!obj)
4746 return -EINVAL;
4747
4748 if (obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004749 pr_warn("object should not be loaded twice\n");
Wang Nan52d33522015-07-01 02:14:04 +00004750 return -EINVAL;
4751 }
4752
4753 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00004754
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08004755 err = bpf_object__probe_caps(obj);
Andrii Nakryiko166750b2019-12-13 17:47:08 -08004756 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig_path);
Andrii Nakryiko0d13bfc2019-12-13 17:43:25 -08004757 err = err ? : bpf_object__sanitize_and_load_btf(obj);
4758 err = err ? : bpf_object__sanitize_maps(obj);
4759 err = err ? : bpf_object__create_maps(obj);
4760 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
4761 err = err ? : bpf_object__load_progs(obj, attr->log_level);
4762 if (err)
4763 goto out;
Wang Nan52d33522015-07-01 02:14:04 +00004764
4765 return 0;
4766out:
Toke Høiland-Jørgensenec6d5f472019-11-09 21:37:27 +01004767 /* unpin any maps that were auto-pinned during load */
4768 for (i = 0; i < obj->nr_maps; i++)
4769 if (obj->maps[i].pinned && !obj->maps[i].reused)
4770 bpf_map__unpin(&obj->maps[i], NULL);
4771
Wang Nan52d33522015-07-01 02:14:04 +00004772 bpf_object__unload(obj);
Kefeng Wangbe180102019-10-21 13:55:32 +08004773 pr_warn("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004774 return err;
Wang Nan52d33522015-07-01 02:14:04 +00004775}
4776
Quentin Monnet60276f92019-05-24 11:36:47 +01004777int bpf_object__load(struct bpf_object *obj)
4778{
4779 struct bpf_object_load_attr attr = {
4780 .obj = obj,
4781 };
4782
4783 return bpf_object__load_xattr(&attr);
4784}
4785
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004786static int make_parent_dir(const char *path)
4787{
4788 char *cp, errmsg[STRERR_BUFSIZE];
4789 char *dname, *dir;
4790 int err = 0;
4791
4792 dname = strdup(path);
4793 if (dname == NULL)
4794 return -ENOMEM;
4795
4796 dir = dirname(dname);
4797 if (mkdir(dir, 0700) && errno != EEXIST)
4798 err = -errno;
4799
4800 free(dname);
4801 if (err) {
4802 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4803 pr_warn("failed to mkdir %s: %s\n", path, cp);
4804 }
4805 return err;
4806}
4807
Joe Stringerf3675402017-01-26 13:19:56 -08004808static int check_path(const char *path)
4809{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004810 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08004811 struct statfs st_fs;
4812 char *dname, *dir;
4813 int err = 0;
4814
4815 if (path == NULL)
4816 return -EINVAL;
4817
4818 dname = strdup(path);
4819 if (dname == NULL)
4820 return -ENOMEM;
4821
4822 dir = dirname(dname);
4823 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004824 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08004825 pr_warn("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08004826 err = -errno;
4827 }
4828 free(dname);
4829
4830 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004831 pr_warn("specified path %s is not on BPF FS\n", path);
Joe Stringerf3675402017-01-26 13:19:56 -08004832 err = -EINVAL;
4833 }
4834
4835 return err;
4836}
4837
4838int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
4839 int instance)
4840{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004841 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08004842 int err;
4843
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004844 err = make_parent_dir(path);
4845 if (err)
4846 return err;
4847
Joe Stringerf3675402017-01-26 13:19:56 -08004848 err = check_path(path);
4849 if (err)
4850 return err;
4851
4852 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004853 pr_warn("invalid program pointer\n");
Joe Stringerf3675402017-01-26 13:19:56 -08004854 return -EINVAL;
4855 }
4856
4857 if (instance < 0 || instance >= prog->instances.nr) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004858 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
4859 instance, prog->section_name, prog->instances.nr);
Joe Stringerf3675402017-01-26 13:19:56 -08004860 return -EINVAL;
4861 }
4862
4863 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004864 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08004865 pr_warn("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08004866 return -errno;
4867 }
4868 pr_debug("pinned program '%s'\n", path);
4869
4870 return 0;
4871}
4872
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004873int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
4874 int instance)
4875{
4876 int err;
4877
4878 err = check_path(path);
4879 if (err)
4880 return err;
4881
4882 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004883 pr_warn("invalid program pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004884 return -EINVAL;
4885 }
4886
4887 if (instance < 0 || instance >= prog->instances.nr) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004888 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
4889 instance, prog->section_name, prog->instances.nr);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004890 return -EINVAL;
4891 }
4892
4893 err = unlink(path);
4894 if (err != 0)
4895 return -errno;
4896 pr_debug("unpinned program '%s'\n", path);
4897
4898 return 0;
4899}
4900
Joe Stringerf3675402017-01-26 13:19:56 -08004901int bpf_program__pin(struct bpf_program *prog, const char *path)
4902{
4903 int i, err;
4904
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004905 err = make_parent_dir(path);
4906 if (err)
4907 return err;
4908
Joe Stringerf3675402017-01-26 13:19:56 -08004909 err = check_path(path);
4910 if (err)
4911 return err;
4912
4913 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004914 pr_warn("invalid program pointer\n");
Joe Stringerf3675402017-01-26 13:19:56 -08004915 return -EINVAL;
4916 }
4917
4918 if (prog->instances.nr <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004919 pr_warn("no instances of prog %s to pin\n",
Joe Stringerf3675402017-01-26 13:19:56 -08004920 prog->section_name);
4921 return -EINVAL;
4922 }
4923
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08004924 if (prog->instances.nr == 1) {
4925 /* don't create subdirs when pinning single instance */
4926 return bpf_program__pin_instance(prog, path, 0);
4927 }
4928
Joe Stringerf3675402017-01-26 13:19:56 -08004929 for (i = 0; i < prog->instances.nr; i++) {
4930 char buf[PATH_MAX];
4931 int len;
4932
4933 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004934 if (len < 0) {
4935 err = -EINVAL;
4936 goto err_unpin;
4937 } else if (len >= PATH_MAX) {
4938 err = -ENAMETOOLONG;
4939 goto err_unpin;
4940 }
4941
4942 err = bpf_program__pin_instance(prog, buf, i);
4943 if (err)
4944 goto err_unpin;
4945 }
4946
4947 return 0;
4948
4949err_unpin:
4950 for (i = i - 1; i >= 0; i--) {
4951 char buf[PATH_MAX];
4952 int len;
4953
4954 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
4955 if (len < 0)
4956 continue;
4957 else if (len >= PATH_MAX)
4958 continue;
4959
4960 bpf_program__unpin_instance(prog, buf, i);
4961 }
4962
4963 rmdir(path);
4964
4965 return err;
4966}
4967
4968int bpf_program__unpin(struct bpf_program *prog, const char *path)
4969{
4970 int i, err;
4971
4972 err = check_path(path);
4973 if (err)
4974 return err;
4975
4976 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004977 pr_warn("invalid program pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004978 return -EINVAL;
4979 }
4980
4981 if (prog->instances.nr <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004982 pr_warn("no instances of prog %s to pin\n",
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004983 prog->section_name);
4984 return -EINVAL;
4985 }
4986
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08004987 if (prog->instances.nr == 1) {
4988 /* don't create subdirs when pinning single instance */
4989 return bpf_program__unpin_instance(prog, path, 0);
4990 }
4991
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004992 for (i = 0; i < prog->instances.nr; i++) {
4993 char buf[PATH_MAX];
4994 int len;
4995
4996 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08004997 if (len < 0)
4998 return -EINVAL;
4999 else if (len >= PATH_MAX)
5000 return -ENAMETOOLONG;
5001
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005002 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08005003 if (err)
5004 return err;
5005 }
5006
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005007 err = rmdir(path);
5008 if (err)
5009 return -errno;
5010
Joe Stringerf3675402017-01-26 13:19:56 -08005011 return 0;
5012}
5013
Joe Stringerb6989f32017-01-26 13:19:57 -08005014int bpf_map__pin(struct bpf_map *map, const char *path)
5015{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02005016 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08005017 int err;
5018
Joe Stringerb6989f32017-01-26 13:19:57 -08005019 if (map == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005020 pr_warn("invalid map pointer\n");
Joe Stringerb6989f32017-01-26 13:19:57 -08005021 return -EINVAL;
5022 }
5023
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005024 if (map->pin_path) {
5025 if (path && strcmp(path, map->pin_path)) {
5026 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
5027 bpf_map__name(map), map->pin_path, path);
5028 return -EINVAL;
5029 } else if (map->pinned) {
5030 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
5031 bpf_map__name(map), map->pin_path);
5032 return 0;
5033 }
5034 } else {
5035 if (!path) {
5036 pr_warn("missing a path to pin map '%s' at\n",
5037 bpf_map__name(map));
5038 return -EINVAL;
5039 } else if (map->pinned) {
5040 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
5041 return -EEXIST;
5042 }
5043
5044 map->pin_path = strdup(path);
5045 if (!map->pin_path) {
5046 err = -errno;
5047 goto out_err;
5048 }
Joe Stringerb6989f32017-01-26 13:19:57 -08005049 }
5050
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01005051 err = make_parent_dir(map->pin_path);
5052 if (err)
5053 return err;
5054
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005055 err = check_path(map->pin_path);
5056 if (err)
5057 return err;
5058
5059 if (bpf_obj_pin(map->fd, map->pin_path)) {
5060 err = -errno;
5061 goto out_err;
5062 }
5063
5064 map->pinned = true;
5065 pr_debug("pinned map '%s'\n", map->pin_path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005066
Joe Stringerb6989f32017-01-26 13:19:57 -08005067 return 0;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005068
5069out_err:
5070 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5071 pr_warn("failed to pin map: %s\n", cp);
5072 return err;
Joe Stringerb6989f32017-01-26 13:19:57 -08005073}
5074
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005075int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08005076{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005077 int err;
5078
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005079 if (map == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005080 pr_warn("invalid map pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005081 return -EINVAL;
5082 }
5083
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005084 if (map->pin_path) {
5085 if (path && strcmp(path, map->pin_path)) {
5086 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
5087 bpf_map__name(map), map->pin_path, path);
5088 return -EINVAL;
5089 }
5090 path = map->pin_path;
5091 } else if (!path) {
5092 pr_warn("no path to unpin map '%s' from\n",
5093 bpf_map__name(map));
5094 return -EINVAL;
5095 }
5096
5097 err = check_path(path);
5098 if (err)
5099 return err;
5100
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005101 err = unlink(path);
5102 if (err != 0)
5103 return -errno;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005104
5105 map->pinned = false;
5106 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005107
5108 return 0;
5109}
5110
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005111int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
5112{
5113 char *new = NULL;
5114
5115 if (path) {
5116 new = strdup(path);
5117 if (!new)
5118 return -errno;
5119 }
5120
5121 free(map->pin_path);
5122 map->pin_path = new;
5123 return 0;
5124}
5125
5126const char *bpf_map__get_pin_path(const struct bpf_map *map)
5127{
5128 return map->pin_path;
5129}
5130
5131bool bpf_map__is_pinned(const struct bpf_map *map)
5132{
5133 return map->pinned;
5134}
5135
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005136int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
5137{
Joe Stringerd5148d82017-01-26 13:19:58 -08005138 struct bpf_map *map;
5139 int err;
5140
5141 if (!obj)
5142 return -ENOENT;
5143
5144 if (!obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005145 pr_warn("object not yet loaded; load it first\n");
Joe Stringerd5148d82017-01-26 13:19:58 -08005146 return -ENOENT;
5147 }
5148
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08005149 bpf_object__for_each_map(map, obj) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005150 char *pin_path = NULL;
Joe Stringerd5148d82017-01-26 13:19:58 -08005151 char buf[PATH_MAX];
Joe Stringerd5148d82017-01-26 13:19:58 -08005152
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005153 if (path) {
5154 int len;
5155
5156 len = snprintf(buf, PATH_MAX, "%s/%s", path,
5157 bpf_map__name(map));
5158 if (len < 0) {
5159 err = -EINVAL;
5160 goto err_unpin_maps;
5161 } else if (len >= PATH_MAX) {
5162 err = -ENAMETOOLONG;
5163 goto err_unpin_maps;
5164 }
5165 pin_path = buf;
5166 } else if (!map->pin_path) {
5167 continue;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005168 }
5169
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005170 err = bpf_map__pin(map, pin_path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005171 if (err)
5172 goto err_unpin_maps;
5173 }
5174
5175 return 0;
5176
5177err_unpin_maps:
5178 while ((map = bpf_map__prev(map, obj))) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005179 if (!map->pin_path)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005180 continue;
5181
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005182 bpf_map__unpin(map, NULL);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005183 }
5184
5185 return err;
5186}
5187
5188int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
5189{
5190 struct bpf_map *map;
5191 int err;
5192
5193 if (!obj)
5194 return -ENOENT;
5195
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08005196 bpf_object__for_each_map(map, obj) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005197 char *pin_path = NULL;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005198 char buf[PATH_MAX];
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005199
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005200 if (path) {
5201 int len;
Joe Stringerd5148d82017-01-26 13:19:58 -08005202
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01005203 len = snprintf(buf, PATH_MAX, "%s/%s", path,
5204 bpf_map__name(map));
5205 if (len < 0)
5206 return -EINVAL;
5207 else if (len >= PATH_MAX)
5208 return -ENAMETOOLONG;
5209 pin_path = buf;
5210 } else if (!map->pin_path) {
5211 continue;
5212 }
5213
5214 err = bpf_map__unpin(map, pin_path);
Joe Stringerd5148d82017-01-26 13:19:58 -08005215 if (err)
5216 return err;
5217 }
5218
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005219 return 0;
5220}
5221
5222int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
5223{
5224 struct bpf_program *prog;
5225 int err;
5226
5227 if (!obj)
5228 return -ENOENT;
5229
5230 if (!obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005231 pr_warn("object not yet loaded; load it first\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005232 return -ENOENT;
5233 }
5234
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005235 bpf_object__for_each_program(prog, obj) {
5236 char buf[PATH_MAX];
5237 int len;
5238
5239 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08005240 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005241 if (len < 0) {
5242 err = -EINVAL;
5243 goto err_unpin_programs;
5244 } else if (len >= PATH_MAX) {
5245 err = -ENAMETOOLONG;
5246 goto err_unpin_programs;
5247 }
5248
5249 err = bpf_program__pin(prog, buf);
5250 if (err)
5251 goto err_unpin_programs;
5252 }
5253
5254 return 0;
5255
5256err_unpin_programs:
5257 while ((prog = bpf_program__prev(prog, obj))) {
5258 char buf[PATH_MAX];
5259 int len;
5260
5261 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08005262 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005263 if (len < 0)
5264 continue;
5265 else if (len >= PATH_MAX)
5266 continue;
5267
5268 bpf_program__unpin(prog, buf);
5269 }
5270
5271 return err;
5272}
5273
5274int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
5275{
5276 struct bpf_program *prog;
5277 int err;
5278
5279 if (!obj)
5280 return -ENOENT;
5281
Joe Stringerd5148d82017-01-26 13:19:58 -08005282 bpf_object__for_each_program(prog, obj) {
5283 char buf[PATH_MAX];
5284 int len;
5285
5286 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08005287 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08005288 if (len < 0)
5289 return -EINVAL;
5290 else if (len >= PATH_MAX)
5291 return -ENAMETOOLONG;
5292
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005293 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08005294 if (err)
5295 return err;
5296 }
5297
5298 return 0;
5299}
5300
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005301int bpf_object__pin(struct bpf_object *obj, const char *path)
5302{
5303 int err;
5304
5305 err = bpf_object__pin_maps(obj, path);
5306 if (err)
5307 return err;
5308
5309 err = bpf_object__pin_programs(obj, path);
5310 if (err) {
5311 bpf_object__unpin_maps(obj, path);
5312 return err;
5313 }
5314
5315 return 0;
5316}
5317
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005318void bpf_object__close(struct bpf_object *obj)
5319{
Wang Nana5b8bd42015-07-01 02:14:00 +00005320 size_t i;
5321
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005322 if (!obj)
5323 return;
5324
Wang Nan10931d22016-11-26 07:03:26 +00005325 if (obj->clear_priv)
5326 obj->clear_priv(obj, obj->priv);
5327
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005328 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00005329 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005330 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08005331 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005332
Wang Nan9d759a92015-11-27 08:47:35 +00005333 for (i = 0; i < obj->nr_maps; i++) {
Andrii Nakryikoeba9c5f2019-12-13 17:43:33 -08005334 struct bpf_map *map = &obj->maps[i];
5335
5336 if (map->clear_priv)
5337 map->clear_priv(map, map->priv);
5338 map->priv = NULL;
5339 map->clear_priv = NULL;
5340
5341 if (map->mmaped) {
5342 munmap(map->mmaped, bpf_map_mmap_sz(map));
5343 map->mmaped = NULL;
5344 }
5345
5346 zfree(&map->name);
5347 zfree(&map->pin_path);
Wang Nan9d759a92015-11-27 08:47:35 +00005348 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02005349
Andrii Nakryiko166750b2019-12-13 17:47:08 -08005350 zfree(&obj->kconfig_path);
5351 zfree(&obj->externs);
5352 obj->nr_extern = 0;
5353
Wang Nan9d759a92015-11-27 08:47:35 +00005354 zfree(&obj->maps);
5355 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00005356
5357 if (obj->programs && obj->nr_programs) {
5358 for (i = 0; i < obj->nr_programs; i++)
5359 bpf_program__exit(&obj->programs[i]);
5360 }
5361 zfree(&obj->programs);
5362
Wang Nan9a208ef2015-07-01 02:14:10 +00005363 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00005364 free(obj);
5365}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005366
Wang Nan9a208ef2015-07-01 02:14:10 +00005367struct bpf_object *
5368bpf_object__next(struct bpf_object *prev)
5369{
5370 struct bpf_object *next;
5371
5372 if (!prev)
5373 next = list_first_entry(&bpf_objects_list,
5374 struct bpf_object,
5375 list);
5376 else
5377 next = list_next_entry(prev, list);
5378
5379 /* Empty list is noticed here so don't need checking on entry. */
5380 if (&next->list == &bpf_objects_list)
5381 return NULL;
5382
5383 return next;
5384}
5385
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005386const char *bpf_object__name(const struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00005387{
Andrii Nakryikoc9e4c302019-10-04 15:40:36 -07005388 return obj ? obj->name : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00005389}
5390
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005391unsigned int bpf_object__kversion(const struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00005392{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03005393 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00005394}
5395
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005396struct btf *bpf_object__btf(const struct bpf_object *obj)
Andrey Ignatov789f6ba2019-02-14 15:01:43 -08005397{
5398 return obj ? obj->btf : NULL;
5399}
5400
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005401int bpf_object__btf_fd(const struct bpf_object *obj)
5402{
5403 return obj->btf ? btf__fd(obj->btf) : -1;
5404}
5405
Wang Nan10931d22016-11-26 07:03:26 +00005406int bpf_object__set_priv(struct bpf_object *obj, void *priv,
5407 bpf_object_clear_priv_t clear_priv)
5408{
5409 if (obj->priv && obj->clear_priv)
5410 obj->clear_priv(obj, obj->priv);
5411
5412 obj->priv = priv;
5413 obj->clear_priv = clear_priv;
5414 return 0;
5415}
5416
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005417void *bpf_object__priv(const struct bpf_object *obj)
Wang Nan10931d22016-11-26 07:03:26 +00005418{
5419 return obj ? obj->priv : ERR_PTR(-EINVAL);
5420}
5421
Jakub Kicinskieac7d842018-06-28 14:41:39 -07005422static struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005423__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
5424 bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005425{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08005426 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005427 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005428
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08005429 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005430 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005431
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08005432 if (!p)
5433 /* Iter from the beginning */
5434 return forward ? &obj->programs[0] :
5435 &obj->programs[nr_programs - 1];
5436
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005437 if (p->obj != obj) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005438 pr_warn("error: program handler doesn't match object\n");
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005439 return NULL;
5440 }
5441
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08005442 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005443 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005444 return NULL;
5445 return &obj->programs[idx];
5446}
5447
Jakub Kicinskieac7d842018-06-28 14:41:39 -07005448struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005449bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
Jakub Kicinskieac7d842018-06-28 14:41:39 -07005450{
5451 struct bpf_program *prog = prev;
5452
5453 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08005454 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005455 } while (prog && bpf_program__is_function_storage(prog, obj));
5456
5457 return prog;
5458}
5459
5460struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005461bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005462{
5463 struct bpf_program *prog = next;
5464
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005465 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08005466 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07005467 } while (prog && bpf_program__is_function_storage(prog, obj));
5468
5469 return prog;
5470}
5471
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03005472int bpf_program__set_priv(struct bpf_program *prog, void *priv,
5473 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005474{
5475 if (prog->priv && prog->clear_priv)
5476 prog->clear_priv(prog, prog->priv);
5477
5478 prog->priv = priv;
5479 prog->clear_priv = clear_priv;
5480 return 0;
5481}
5482
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005483void *bpf_program__priv(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005484{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03005485 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005486}
5487
Jakub Kicinski9aba3612018-06-28 14:41:37 -07005488void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
5489{
5490 prog->prog_ifindex = ifindex;
5491}
5492
Andrii Nakryiko01af3bf2019-12-13 17:43:32 -08005493const char *bpf_program__name(const struct bpf_program *prog)
5494{
5495 return prog->name;
5496}
5497
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005498const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005499{
5500 const char *title;
5501
5502 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09005503 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005504 title = strdup(title);
5505 if (!title) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005506 pr_warn("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00005507 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005508 }
5509 }
5510
5511 return title;
5512}
5513
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005514int bpf_program__fd(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005515{
Wang Nanb5805632015-11-16 12:10:09 +00005516 return bpf_program__nth_fd(prog, 0);
5517}
5518
Toke Høiland-Jørgensen1a734ef2019-11-09 21:37:32 +01005519size_t bpf_program__size(const struct bpf_program *prog)
5520{
5521 return prog->insns_cnt * sizeof(struct bpf_insn);
5522}
5523
Wang Nanb5805632015-11-16 12:10:09 +00005524int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
5525 bpf_program_prep_t prep)
5526{
5527 int *instances_fds;
5528
5529 if (nr_instances <= 0 || !prep)
5530 return -EINVAL;
5531
5532 if (prog->instances.nr > 0 || prog->instances.fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005533 pr_warn("Can't set pre-processor after loading\n");
Wang Nanb5805632015-11-16 12:10:09 +00005534 return -EINVAL;
5535 }
5536
5537 instances_fds = malloc(sizeof(int) * nr_instances);
5538 if (!instances_fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005539 pr_warn("alloc memory failed for fds\n");
Wang Nanb5805632015-11-16 12:10:09 +00005540 return -ENOMEM;
5541 }
5542
5543 /* fill all fd with -1 */
5544 memset(instances_fds, -1, sizeof(int) * nr_instances);
5545
5546 prog->instances.nr = nr_instances;
5547 prog->instances.fds = instances_fds;
5548 prog->preprocessor = prep;
5549 return 0;
5550}
5551
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005552int bpf_program__nth_fd(const struct bpf_program *prog, int n)
Wang Nanb5805632015-11-16 12:10:09 +00005553{
5554 int fd;
5555
Jakub Kicinski1e960042018-07-26 14:32:18 -07005556 if (!prog)
5557 return -EINVAL;
5558
Wang Nanb5805632015-11-16 12:10:09 +00005559 if (n >= prog->instances.nr || n < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005560 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
5561 n, prog->section_name, prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +00005562 return -EINVAL;
5563 }
5564
5565 fd = prog->instances.fds[n];
5566 if (fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005567 pr_warn("%dth instance of program '%s' is invalid\n",
5568 n, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00005569 return -ENOENT;
5570 }
5571
5572 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00005573}
Wang Nan9d759a92015-11-27 08:47:35 +00005574
Andrii Nakryikof1eead92019-10-20 20:38:57 -07005575enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
5576{
5577 return prog->type;
5578}
5579
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07005580void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00005581{
5582 prog->type = type;
5583}
5584
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005585static bool bpf_program__is_type(const struct bpf_program *prog,
Wang Nan5f44e4c82016-07-13 10:44:01 +00005586 enum bpf_prog_type type)
5587{
5588 return prog ? (prog->type == type) : false;
5589}
5590
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005591#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
5592int bpf_program__set_##NAME(struct bpf_program *prog) \
5593{ \
5594 if (!prog) \
5595 return -EINVAL; \
5596 bpf_program__set_type(prog, TYPE); \
5597 return 0; \
5598} \
5599 \
5600bool bpf_program__is_##NAME(const struct bpf_program *prog) \
5601{ \
5602 return bpf_program__is_type(prog, TYPE); \
5603} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00005604
Joe Stringer7803ba72017-01-22 17:11:24 -08005605BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08005606BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08005607BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
5608BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08005609BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07005610BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08005611BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
5612BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005613BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
Wang Nan5f44e4c82016-07-13 10:44:01 +00005614
Andrii Nakryikof1eead92019-10-20 20:38:57 -07005615enum bpf_attach_type
5616bpf_program__get_expected_attach_type(struct bpf_program *prog)
5617{
5618 return prog->expected_attach_type;
5619}
5620
John Fastabend16962b22018-04-23 14:30:38 -07005621void bpf_program__set_expected_attach_type(struct bpf_program *prog,
5622 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005623{
5624 prog->expected_attach_type = type;
5625}
5626
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07005627#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
5628 { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005629
Andrey Ignatov956b6202018-09-26 15:24:53 -07005630/* Programs that can NOT be attached. */
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07005631#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005632
Andrey Ignatov956b6202018-09-26 15:24:53 -07005633/* Programs that can be attached. */
5634#define BPF_APROG_SEC(string, ptype, atype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07005635 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07005636
Andrey Ignatov956b6202018-09-26 15:24:53 -07005637/* Programs that must specify expected attach type at load time. */
5638#define BPF_EAPROG_SEC(string, ptype, eatype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07005639 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
5640
5641/* Programs that use BTF to identify attach point */
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005642#define BPF_PROG_BTF(string, ptype, eatype) \
5643 BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
Andrey Ignatov956b6202018-09-26 15:24:53 -07005644
5645/* Programs that can be attached but attach type can't be identified by section
5646 * name. Kept for backward compatibility.
5647 */
5648#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07005649
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005650#define SEC_DEF(sec_pfx, ptype, ...) { \
5651 .sec = sec_pfx, \
5652 .len = sizeof(sec_pfx) - 1, \
5653 .prog_type = BPF_PROG_TYPE_##ptype, \
5654 __VA_ARGS__ \
5655}
5656
5657struct bpf_sec_def;
5658
5659typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
5660 struct bpf_program *prog);
5661
5662static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
5663 struct bpf_program *prog);
5664static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
5665 struct bpf_program *prog);
5666static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
5667 struct bpf_program *prog);
5668static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
5669 struct bpf_program *prog);
5670
5671struct bpf_sec_def {
Roman Gushchin583c9002017-12-13 15:18:51 +00005672 const char *sec;
5673 size_t len;
5674 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005675 enum bpf_attach_type expected_attach_type;
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07005676 bool is_attachable;
5677 bool is_attach_btf;
Andrey Ignatov956b6202018-09-26 15:24:53 -07005678 enum bpf_attach_type attach_type;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005679 attach_fn_t attach_fn;
5680};
5681
5682static const struct bpf_sec_def section_defs[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07005683 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
Jakub Sitnicki67d69cc2019-12-12 11:22:50 +01005684 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005685 SEC_DEF("kprobe/", KPROBE,
5686 .attach_fn = attach_kprobe),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07005687 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005688 SEC_DEF("kretprobe/", KPROBE,
5689 .attach_fn = attach_kprobe),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07005690 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
Andrey Ignatov956b6202018-09-26 15:24:53 -07005691 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
5692 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005693 SEC_DEF("tracepoint/", TRACEPOINT,
5694 .attach_fn = attach_tp),
5695 SEC_DEF("tp/", TRACEPOINT,
5696 .attach_fn = attach_tp),
5697 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
5698 .attach_fn = attach_raw_tp),
5699 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
5700 .attach_fn = attach_raw_tp),
5701 SEC_DEF("tp_btf/", TRACING,
5702 .expected_attach_type = BPF_TRACE_RAW_TP,
5703 .is_attach_btf = true,
5704 .attach_fn = attach_trace),
5705 SEC_DEF("fentry/", TRACING,
5706 .expected_attach_type = BPF_TRACE_FENTRY,
5707 .is_attach_btf = true,
5708 .attach_fn = attach_trace),
5709 SEC_DEF("fexit/", TRACING,
5710 .expected_attach_type = BPF_TRACE_FEXIT,
5711 .is_attach_btf = true,
5712 .attach_fn = attach_trace),
Andrey Ignatov956b6202018-09-26 15:24:53 -07005713 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
5714 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
5715 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
5716 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
5717 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
5718 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07005719 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
5720 BPF_CGROUP_INET_INGRESS),
5721 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
5722 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07005723 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
5724 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
5725 BPF_CGROUP_INET_SOCK_CREATE),
5726 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
5727 BPF_CGROUP_INET4_POST_BIND),
5728 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
5729 BPF_CGROUP_INET6_POST_BIND),
5730 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
5731 BPF_CGROUP_DEVICE),
5732 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
5733 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07005734 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
5735 BPF_SK_SKB_STREAM_PARSER),
5736 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
5737 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07005738 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
5739 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
5740 BPF_SK_MSG_VERDICT),
5741 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
5742 BPF_LIRC_MODE2),
5743 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
5744 BPF_FLOW_DISSECTOR),
5745 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5746 BPF_CGROUP_INET4_BIND),
5747 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5748 BPF_CGROUP_INET6_BIND),
5749 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5750 BPF_CGROUP_INET4_CONNECT),
5751 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5752 BPF_CGROUP_INET6_CONNECT),
5753 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5754 BPF_CGROUP_UDP4_SENDMSG),
5755 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5756 BPF_CGROUP_UDP6_SENDMSG),
Daniel Borkmann9bb59ac2019-06-07 01:48:59 +02005757 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5758 BPF_CGROUP_UDP4_RECVMSG),
5759 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
5760 BPF_CGROUP_UDP6_RECVMSG),
Andrey Ignatov063cc9f2019-03-08 09:15:26 -08005761 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
5762 BPF_CGROUP_SYSCTL),
Stanislav Fomichev4cdbfb52019-06-27 13:38:49 -07005763 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
5764 BPF_CGROUP_GETSOCKOPT),
5765 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
5766 BPF_CGROUP_SETSOCKOPT),
Roman Gushchin583c9002017-12-13 15:18:51 +00005767};
Roman Gushchin583c9002017-12-13 15:18:51 +00005768
Andrey Ignatov956b6202018-09-26 15:24:53 -07005769#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005770#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07005771#undef BPF_APROG_SEC
5772#undef BPF_EAPROG_SEC
5773#undef BPF_APROG_COMPAT
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005774#undef SEC_DEF
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005775
Taeung Songc76e4c22019-01-21 22:06:38 +09005776#define MAX_TYPE_NAME_SIZE 32
5777
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005778static const struct bpf_sec_def *find_sec_def(const char *sec_name)
5779{
5780 int i, n = ARRAY_SIZE(section_defs);
5781
5782 for (i = 0; i < n; i++) {
5783 if (strncmp(sec_name,
5784 section_defs[i].sec, section_defs[i].len))
5785 continue;
5786 return &section_defs[i];
5787 }
5788 return NULL;
5789}
5790
Taeung Songc76e4c22019-01-21 22:06:38 +09005791static char *libbpf_get_type_names(bool attach_type)
5792{
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005793 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
Taeung Songc76e4c22019-01-21 22:06:38 +09005794 char *buf;
5795
5796 buf = malloc(len);
5797 if (!buf)
5798 return NULL;
5799
5800 buf[0] = '\0';
5801 /* Forge string buf with all available names */
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005802 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
5803 if (attach_type && !section_defs[i].is_attachable)
Taeung Songc76e4c22019-01-21 22:06:38 +09005804 continue;
5805
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005806 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
Taeung Songc76e4c22019-01-21 22:06:38 +09005807 free(buf);
5808 return NULL;
5809 }
5810 strcat(buf, " ");
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005811 strcat(buf, section_defs[i].sec);
Taeung Songc76e4c22019-01-21 22:06:38 +09005812 }
5813
5814 return buf;
5815}
5816
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005817int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
5818 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00005819{
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005820 const struct bpf_sec_def *sec_def;
Taeung Songc76e4c22019-01-21 22:06:38 +09005821 char *type_names;
Roman Gushchin583c9002017-12-13 15:18:51 +00005822
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005823 if (!name)
5824 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00005825
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005826 sec_def = find_sec_def(name);
5827 if (sec_def) {
5828 *prog_type = sec_def->prog_type;
5829 *expected_attach_type = sec_def->expected_attach_type;
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005830 return 0;
5831 }
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005832
Andrii Nakryiko8983b732019-11-20 23:07:42 -08005833 pr_warn("failed to guess program type from ELF section '%s'\n", name);
Taeung Songc76e4c22019-01-21 22:06:38 +09005834 type_names = libbpf_get_type_names(false);
5835 if (type_names != NULL) {
Andrii Nakryiko3f519352019-12-13 17:43:35 -08005836 pr_debug("supported section(type) names are:%s\n", type_names);
Taeung Songc76e4c22019-01-21 22:06:38 +09005837 free(type_names);
5838 }
5839
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005840 return -ESRCH;
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005841}
Roman Gushchin583c9002017-12-13 15:18:51 +00005842
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005843#define BTF_PREFIX "btf_trace_"
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005844int libbpf_find_vmlinux_btf_id(const char *name,
5845 enum bpf_attach_type attach_type)
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005846{
5847 struct btf *btf = bpf_core_find_kernel_btf();
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005848 char raw_tp_btf[128] = BTF_PREFIX;
5849 char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
5850 const char *btf_name;
5851 int err = -EINVAL;
Andrii Nakryikob615e5a2019-11-25 13:29:48 -08005852 __u32 kind;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005853
5854 if (IS_ERR(btf)) {
5855 pr_warn("vmlinux BTF is not found\n");
5856 return -EINVAL;
5857 }
5858
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005859 if (attach_type == BPF_TRACE_RAW_TP) {
5860 /* prepend "btf_trace_" prefix per kernel convention */
5861 strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX));
5862 btf_name = raw_tp_btf;
5863 kind = BTF_KIND_TYPEDEF;
5864 } else {
5865 btf_name = name;
5866 kind = BTF_KIND_FUNC;
5867 }
5868 err = btf__find_by_name_kind(btf, btf_name, kind);
5869 btf__free(btf);
5870 return err;
5871}
5872
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08005873static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
5874{
5875 struct bpf_prog_info_linear *info_linear;
5876 struct bpf_prog_info *info;
5877 struct btf *btf = NULL;
5878 int err = -EINVAL;
5879
5880 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
5881 if (IS_ERR_OR_NULL(info_linear)) {
5882 pr_warn("failed get_prog_info_linear for FD %d\n",
5883 attach_prog_fd);
5884 return -EINVAL;
5885 }
5886 info = &info_linear->info;
5887 if (!info->btf_id) {
5888 pr_warn("The target program doesn't have BTF\n");
5889 goto out;
5890 }
5891 if (btf__get_from_id(info->btf_id, &btf)) {
5892 pr_warn("Failed to get BTF of the program\n");
5893 goto out;
5894 }
5895 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
5896 btf__free(btf);
5897 if (err <= 0) {
5898 pr_warn("%s is not found in prog's BTF\n", name);
5899 goto out;
5900 }
5901out:
5902 free(info_linear);
5903 return err;
5904}
5905
5906static int libbpf_find_attach_btf_id(const char *name,
5907 enum bpf_attach_type attach_type,
5908 __u32 attach_prog_fd)
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005909{
5910 int i, err;
5911
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005912 if (!name)
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005913 return -EINVAL;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005914
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005915 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
5916 if (!section_defs[i].is_attach_btf)
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005917 continue;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005918 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005919 continue;
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08005920 if (attach_prog_fd)
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005921 err = libbpf_find_prog_btf_id(name + section_defs[i].len,
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08005922 attach_prog_fd);
5923 else
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005924 err = libbpf_find_vmlinux_btf_id(name + section_defs[i].len,
Alexei Starovoitove7bf94d2019-11-14 10:57:18 -08005925 attach_type);
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005926 if (err <= 0)
5927 pr_warn("%s is not found in vmlinux BTF\n", name);
5928 return err;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005929 }
5930 pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08005931 return -ESRCH;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005932}
5933
Andrey Ignatov956b6202018-09-26 15:24:53 -07005934int libbpf_attach_type_by_name(const char *name,
5935 enum bpf_attach_type *attach_type)
5936{
Taeung Songc76e4c22019-01-21 22:06:38 +09005937 char *type_names;
Andrey Ignatov956b6202018-09-26 15:24:53 -07005938 int i;
5939
5940 if (!name)
5941 return -EINVAL;
5942
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005943 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
5944 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
Andrey Ignatov956b6202018-09-26 15:24:53 -07005945 continue;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005946 if (!section_defs[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07005947 return -EINVAL;
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08005948 *attach_type = section_defs[i].attach_type;
Andrey Ignatov956b6202018-09-26 15:24:53 -07005949 return 0;
5950 }
Kefeng Wangbe180102019-10-21 13:55:32 +08005951 pr_warn("failed to guess attach type based on ELF section name '%s'\n", name);
Taeung Songc76e4c22019-01-21 22:06:38 +09005952 type_names = libbpf_get_type_names(true);
5953 if (type_names != NULL) {
5954 pr_info("attachable section(type) names are:%s\n", type_names);
5955 free(type_names);
5956 }
5957
Andrey Ignatov956b6202018-09-26 15:24:53 -07005958 return -EINVAL;
5959}
5960
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005961int bpf_map__fd(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00005962{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03005963 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00005964}
5965
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005966const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00005967{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03005968 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00005969}
5970
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005971const char *bpf_map__name(const struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00005972{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03005973 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00005974}
5975
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07005976__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005977{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07005978 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005979}
5980
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07005981__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005982{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07005983 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005984}
5985
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03005986int bpf_map__set_priv(struct bpf_map *map, void *priv,
5987 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00005988{
5989 if (!map)
5990 return -EINVAL;
5991
5992 if (map->priv) {
5993 if (map->clear_priv)
5994 map->clear_priv(map, map->priv);
5995 }
5996
5997 map->priv = priv;
5998 map->clear_priv = clear_priv;
5999 return 0;
6000}
6001
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006002void *bpf_map__priv(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00006003{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03006004 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00006005}
6006
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006007bool bpf_map__is_offload_neutral(const struct bpf_map *map)
Jakub Kicinskif83fb222018-07-10 14:43:01 -07006008{
6009 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
6010}
6011
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006012bool bpf_map__is_internal(const struct bpf_map *map)
Daniel Borkmannd8599002019-04-09 23:20:13 +02006013{
6014 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
6015}
6016
Jakub Kicinski9aba3612018-06-28 14:41:37 -07006017void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
6018{
6019 map->map_ifindex = ifindex;
6020}
6021
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08006022int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
6023{
6024 if (!bpf_map_type__is_map_in_map(map->def.type)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006025 pr_warn("error: unsupported map type\n");
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08006026 return -EINVAL;
6027 }
6028 if (map->inner_map_fd != -1) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006029 pr_warn("error: inner_map_fd already specified\n");
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08006030 return -EINVAL;
6031 }
6032 map->inner_map_fd = fd;
6033 return 0;
6034}
6035
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006036static struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006037__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00006038{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006039 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00006040 struct bpf_map *s, *e;
6041
6042 if (!obj || !obj->maps)
6043 return NULL;
6044
6045 s = obj->maps;
6046 e = obj->maps + obj->nr_maps;
6047
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006048 if ((m < s) || (m >= e)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006049 pr_warn("error in %s: map handler doesn't belong to object\n",
6050 __func__);
Wang Nan9d759a92015-11-27 08:47:35 +00006051 return NULL;
6052 }
6053
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006054 idx = (m - obj->maps) + i;
6055 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00006056 return NULL;
6057 return &obj->maps[idx];
6058}
Wang Nan561bbcc2015-11-27 08:47:36 +00006059
6060struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006061bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006062{
6063 if (prev == NULL)
6064 return obj->maps;
6065
6066 return __bpf_map__iter(prev, obj, 1);
6067}
6068
6069struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006070bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08006071{
6072 if (next == NULL) {
6073 if (!obj->nr_maps)
6074 return NULL;
6075 return obj->maps + obj->nr_maps - 1;
6076 }
6077
6078 return __bpf_map__iter(next, obj, -1);
6079}
6080
6081struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006082bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00006083{
6084 struct bpf_map *pos;
6085
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08006086 bpf_object__for_each_map(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00006087 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00006088 return pos;
6089 }
6090 return NULL;
6091}
Wang Nan5a6acad2016-11-26 07:03:27 +00006092
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01006093int
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07006094bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01006095{
6096 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
6097}
6098
Wang Nan5a6acad2016-11-26 07:03:27 +00006099struct bpf_map *
6100bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
6101{
Andrii Nakryikodb488142019-06-17 12:26:54 -07006102 return ERR_PTR(-ENOTSUP);
Wang Nan5a6acad2016-11-26 07:03:27 +00006103}
Joe Stringere28ff1a2017-01-22 17:11:25 -08006104
6105long libbpf_get_error(const void *ptr)
6106{
Hariprasad Kelamd98363b2019-05-25 14:32:57 +05306107 return PTR_ERR_OR_ZERO(ptr);
Joe Stringere28ff1a2017-01-22 17:11:25 -08006108}
John Fastabend6f6d33f2017-08-15 22:34:22 -07006109
6110int bpf_prog_load(const char *file, enum bpf_prog_type type,
6111 struct bpf_object **pobj, int *prog_fd)
6112{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006113 struct bpf_prog_load_attr attr;
6114
6115 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
6116 attr.file = file;
6117 attr.prog_type = type;
6118 attr.expected_attach_type = 0;
6119
6120 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
6121}
6122
6123int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
6124 struct bpf_object **pobj, int *prog_fd)
6125{
Leo Yan33bae182019-07-02 18:25:31 +08006126 struct bpf_object_open_attr open_attr = {};
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006127 struct bpf_program *prog, *first_prog = NULL;
John Fastabend6f6d33f2017-08-15 22:34:22 -07006128 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07006129 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07006130 int err;
6131
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006132 if (!attr)
6133 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07006134 if (!attr->file)
6135 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07006136
Leo Yan33bae182019-07-02 18:25:31 +08006137 open_attr.file = attr->file;
6138 open_attr.prog_type = attr->prog_type;
6139
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07006140 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07006141 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07006142 return -ENOENT;
6143
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006144 bpf_object__for_each_program(prog, obj) {
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006145 enum bpf_attach_type attach_type = attr->expected_attach_type;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006146 /*
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006147 * to preserve backwards compatibility, bpf_prog_load treats
6148 * attr->prog_type, if specified, as an override to whatever
6149 * bpf_object__open guessed
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006150 */
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006151 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
6152 bpf_program__set_type(prog, attr->prog_type);
6153 bpf_program__set_expected_attach_type(prog,
6154 attach_type);
6155 }
6156 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
6157 /*
6158 * we haven't guessed from section name and user
6159 * didn't provide a fallback type, too bad...
6160 */
6161 bpf_object__close(obj);
6162 return -EINVAL;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006163 }
6164
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07006165 prog->prog_ifindex = attr->ifindex;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07006166 prog->log_level = attr->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01006167 prog->prog_flags = attr->prog_flags;
Taeung Song69495d22018-09-03 08:30:07 +09006168 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006169 first_prog = prog;
6170 }
6171
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08006172 bpf_object__for_each_map(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07006173 if (!bpf_map__is_offload_neutral(map))
6174 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07006175 }
6176
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006177 if (!first_prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006178 pr_warn("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07006179 bpf_object__close(obj);
6180 return -ENOENT;
6181 }
6182
John Fastabend6f6d33f2017-08-15 22:34:22 -07006183 err = bpf_object__load(obj);
6184 if (err) {
6185 bpf_object__close(obj);
6186 return -EINVAL;
6187 }
6188
6189 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08006190 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07006191 return 0;
6192}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006193
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07006194struct bpf_link {
6195 int (*destroy)(struct bpf_link *link);
6196};
6197
6198int bpf_link__destroy(struct bpf_link *link)
6199{
6200 int err;
6201
6202 if (!link)
6203 return 0;
6204
6205 err = link->destroy(link);
6206 free(link);
6207
6208 return err;
6209}
6210
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07006211struct bpf_link_fd {
6212 struct bpf_link link; /* has to be at the top of struct */
6213 int fd; /* hook FD */
6214};
6215
6216static int bpf_link__destroy_perf_event(struct bpf_link *link)
6217{
6218 struct bpf_link_fd *l = (void *)link;
6219 int err;
6220
6221 err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
6222 if (err)
6223 err = -errno;
6224
6225 close(l->fd);
6226 return err;
6227}
6228
6229struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
6230 int pfd)
6231{
6232 char errmsg[STRERR_BUFSIZE];
6233 struct bpf_link_fd *link;
6234 int prog_fd, err;
6235
6236 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006237 pr_warn("program '%s': invalid perf event FD %d\n",
6238 bpf_program__title(prog, false), pfd);
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07006239 return ERR_PTR(-EINVAL);
6240 }
6241 prog_fd = bpf_program__fd(prog);
6242 if (prog_fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006243 pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
6244 bpf_program__title(prog, false));
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07006245 return ERR_PTR(-EINVAL);
6246 }
6247
6248 link = malloc(sizeof(*link));
6249 if (!link)
6250 return ERR_PTR(-ENOMEM);
6251 link->link.destroy = &bpf_link__destroy_perf_event;
6252 link->fd = pfd;
6253
6254 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
6255 err = -errno;
6256 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08006257 pr_warn("program '%s': failed to attach to pfd %d: %s\n",
6258 bpf_program__title(prog, false), pfd,
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07006259 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
6260 return ERR_PTR(err);
6261 }
6262 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
6263 err = -errno;
6264 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08006265 pr_warn("program '%s': failed to enable pfd %d: %s\n",
6266 bpf_program__title(prog, false), pfd,
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07006267 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
6268 return ERR_PTR(err);
6269 }
6270 return (struct bpf_link *)link;
6271}
6272
Andrii Nakryikob2650022019-07-01 16:58:58 -07006273/*
6274 * this function is expected to parse integer in the range of [0, 2^31-1] from
6275 * given file using scanf format string fmt. If actual parsed value is
6276 * negative, the result might be indistinguishable from error
6277 */
6278static int parse_uint_from_file(const char *file, const char *fmt)
6279{
6280 char buf[STRERR_BUFSIZE];
6281 int err, ret;
6282 FILE *f;
6283
6284 f = fopen(file, "r");
6285 if (!f) {
6286 err = -errno;
6287 pr_debug("failed to open '%s': %s\n", file,
6288 libbpf_strerror_r(err, buf, sizeof(buf)));
6289 return err;
6290 }
6291 err = fscanf(f, fmt, &ret);
6292 if (err != 1) {
6293 err = err == EOF ? -EIO : -errno;
6294 pr_debug("failed to parse '%s': %s\n", file,
6295 libbpf_strerror_r(err, buf, sizeof(buf)));
6296 fclose(f);
6297 return err;
6298 }
6299 fclose(f);
6300 return ret;
6301}
6302
6303static int determine_kprobe_perf_type(void)
6304{
6305 const char *file = "/sys/bus/event_source/devices/kprobe/type";
6306
6307 return parse_uint_from_file(file, "%d\n");
6308}
6309
6310static int determine_uprobe_perf_type(void)
6311{
6312 const char *file = "/sys/bus/event_source/devices/uprobe/type";
6313
6314 return parse_uint_from_file(file, "%d\n");
6315}
6316
6317static int determine_kprobe_retprobe_bit(void)
6318{
6319 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
6320
6321 return parse_uint_from_file(file, "config:%d\n");
6322}
6323
6324static int determine_uprobe_retprobe_bit(void)
6325{
6326 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
6327
6328 return parse_uint_from_file(file, "config:%d\n");
6329}
6330
6331static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
6332 uint64_t offset, int pid)
6333{
6334 struct perf_event_attr attr = {};
6335 char errmsg[STRERR_BUFSIZE];
6336 int type, pfd, err;
6337
6338 type = uprobe ? determine_uprobe_perf_type()
6339 : determine_kprobe_perf_type();
6340 if (type < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006341 pr_warn("failed to determine %s perf type: %s\n",
6342 uprobe ? "uprobe" : "kprobe",
6343 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006344 return type;
6345 }
6346 if (retprobe) {
6347 int bit = uprobe ? determine_uprobe_retprobe_bit()
6348 : determine_kprobe_retprobe_bit();
6349
6350 if (bit < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006351 pr_warn("failed to determine %s retprobe bit: %s\n",
6352 uprobe ? "uprobe" : "kprobe",
6353 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006354 return bit;
6355 }
6356 attr.config |= 1 << bit;
6357 }
6358 attr.size = sizeof(attr);
6359 attr.type = type;
Andrii Nakryiko36db2a92019-07-08 21:00:07 -07006360 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
6361 attr.config2 = offset; /* kprobe_addr or probe_offset */
Andrii Nakryikob2650022019-07-01 16:58:58 -07006362
6363 /* pid filter is meaningful only for uprobes */
6364 pfd = syscall(__NR_perf_event_open, &attr,
6365 pid < 0 ? -1 : pid /* pid */,
6366 pid == -1 ? 0 : -1 /* cpu */,
6367 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
6368 if (pfd < 0) {
6369 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006370 pr_warn("%s perf_event_open() failed: %s\n",
6371 uprobe ? "uprobe" : "kprobe",
6372 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006373 return err;
6374 }
6375 return pfd;
6376}
6377
6378struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
6379 bool retprobe,
6380 const char *func_name)
6381{
6382 char errmsg[STRERR_BUFSIZE];
6383 struct bpf_link *link;
6384 int pfd, err;
6385
6386 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
6387 0 /* offset */, -1 /* pid */);
6388 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006389 pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
6390 bpf_program__title(prog, false),
6391 retprobe ? "kretprobe" : "kprobe", func_name,
6392 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006393 return ERR_PTR(pfd);
6394 }
6395 link = bpf_program__attach_perf_event(prog, pfd);
6396 if (IS_ERR(link)) {
6397 close(pfd);
6398 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08006399 pr_warn("program '%s': failed to attach to %s '%s': %s\n",
6400 bpf_program__title(prog, false),
6401 retprobe ? "kretprobe" : "kprobe", func_name,
6402 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006403 return link;
6404 }
6405 return link;
6406}
6407
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006408static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
6409 struct bpf_program *prog)
6410{
6411 const char *func_name;
6412 bool retprobe;
6413
6414 func_name = bpf_program__title(prog, false) + sec->len;
6415 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
6416
6417 return bpf_program__attach_kprobe(prog, retprobe, func_name);
6418}
6419
Andrii Nakryikob2650022019-07-01 16:58:58 -07006420struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
6421 bool retprobe, pid_t pid,
6422 const char *binary_path,
6423 size_t func_offset)
6424{
6425 char errmsg[STRERR_BUFSIZE];
6426 struct bpf_link *link;
6427 int pfd, err;
6428
6429 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
6430 binary_path, func_offset, pid);
6431 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006432 pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
6433 bpf_program__title(prog, false),
6434 retprobe ? "uretprobe" : "uprobe",
6435 binary_path, func_offset,
6436 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006437 return ERR_PTR(pfd);
6438 }
6439 link = bpf_program__attach_perf_event(prog, pfd);
6440 if (IS_ERR(link)) {
6441 close(pfd);
6442 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08006443 pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
6444 bpf_program__title(prog, false),
6445 retprobe ? "uretprobe" : "uprobe",
6446 binary_path, func_offset,
6447 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07006448 return link;
6449 }
6450 return link;
6451}
6452
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07006453static int determine_tracepoint_id(const char *tp_category,
6454 const char *tp_name)
6455{
6456 char file[PATH_MAX];
6457 int ret;
6458
6459 ret = snprintf(file, sizeof(file),
6460 "/sys/kernel/debug/tracing/events/%s/%s/id",
6461 tp_category, tp_name);
6462 if (ret < 0)
6463 return -errno;
6464 if (ret >= sizeof(file)) {
6465 pr_debug("tracepoint %s/%s path is too long\n",
6466 tp_category, tp_name);
6467 return -E2BIG;
6468 }
6469 return parse_uint_from_file(file, "%d\n");
6470}
6471
6472static int perf_event_open_tracepoint(const char *tp_category,
6473 const char *tp_name)
6474{
6475 struct perf_event_attr attr = {};
6476 char errmsg[STRERR_BUFSIZE];
6477 int tp_id, pfd, err;
6478
6479 tp_id = determine_tracepoint_id(tp_category, tp_name);
6480 if (tp_id < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006481 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
6482 tp_category, tp_name,
6483 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07006484 return tp_id;
6485 }
6486
6487 attr.type = PERF_TYPE_TRACEPOINT;
6488 attr.size = sizeof(attr);
6489 attr.config = tp_id;
6490
6491 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
6492 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
6493 if (pfd < 0) {
6494 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006495 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
6496 tp_category, tp_name,
6497 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07006498 return err;
6499 }
6500 return pfd;
6501}
6502
6503struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
6504 const char *tp_category,
6505 const char *tp_name)
6506{
6507 char errmsg[STRERR_BUFSIZE];
6508 struct bpf_link *link;
6509 int pfd, err;
6510
6511 pfd = perf_event_open_tracepoint(tp_category, tp_name);
6512 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006513 pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
6514 bpf_program__title(prog, false),
6515 tp_category, tp_name,
6516 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07006517 return ERR_PTR(pfd);
6518 }
6519 link = bpf_program__attach_perf_event(prog, pfd);
6520 if (IS_ERR(link)) {
6521 close(pfd);
6522 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08006523 pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
6524 bpf_program__title(prog, false),
6525 tp_category, tp_name,
6526 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07006527 return link;
6528 }
6529 return link;
6530}
6531
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006532static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
6533 struct bpf_program *prog)
6534{
6535 char *sec_name, *tp_cat, *tp_name;
6536 struct bpf_link *link;
6537
6538 sec_name = strdup(bpf_program__title(prog, false));
6539 if (!sec_name)
6540 return ERR_PTR(-ENOMEM);
6541
6542 /* extract "tp/<category>/<name>" */
6543 tp_cat = sec_name + sec->len;
6544 tp_name = strchr(tp_cat, '/');
6545 if (!tp_name) {
6546 link = ERR_PTR(-EINVAL);
6547 goto out;
6548 }
6549 *tp_name = '\0';
6550 tp_name++;
6551
6552 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
6553out:
6554 free(sec_name);
6555 return link;
6556}
6557
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07006558static int bpf_link__destroy_fd(struct bpf_link *link)
6559{
6560 struct bpf_link_fd *l = (void *)link;
6561
6562 return close(l->fd);
6563}
6564
6565struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
6566 const char *tp_name)
6567{
6568 char errmsg[STRERR_BUFSIZE];
6569 struct bpf_link_fd *link;
6570 int prog_fd, pfd;
6571
6572 prog_fd = bpf_program__fd(prog);
6573 if (prog_fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006574 pr_warn("program '%s': can't attach before loaded\n",
6575 bpf_program__title(prog, false));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07006576 return ERR_PTR(-EINVAL);
6577 }
6578
6579 link = malloc(sizeof(*link));
6580 if (!link)
6581 return ERR_PTR(-ENOMEM);
6582 link->link.destroy = &bpf_link__destroy_fd;
6583
6584 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
6585 if (pfd < 0) {
6586 pfd = -errno;
6587 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08006588 pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
6589 bpf_program__title(prog, false), tp_name,
6590 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07006591 return ERR_PTR(pfd);
6592 }
6593 link->fd = pfd;
6594 return (struct bpf_link *)link;
6595}
6596
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006597static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
6598 struct bpf_program *prog)
6599{
6600 const char *tp_name = bpf_program__title(prog, false) + sec->len;
6601
6602 return bpf_program__attach_raw_tracepoint(prog, tp_name);
6603}
6604
Alexei Starovoitovb8c54ea2019-11-14 10:57:06 -08006605struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
6606{
6607 char errmsg[STRERR_BUFSIZE];
6608 struct bpf_link_fd *link;
6609 int prog_fd, pfd;
6610
6611 prog_fd = bpf_program__fd(prog);
6612 if (prog_fd < 0) {
6613 pr_warn("program '%s': can't attach before loaded\n",
6614 bpf_program__title(prog, false));
6615 return ERR_PTR(-EINVAL);
6616 }
6617
6618 link = malloc(sizeof(*link));
6619 if (!link)
6620 return ERR_PTR(-ENOMEM);
6621 link->link.destroy = &bpf_link__destroy_fd;
6622
6623 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
6624 if (pfd < 0) {
6625 pfd = -errno;
6626 free(link);
6627 pr_warn("program '%s': failed to attach to trace: %s\n",
6628 bpf_program__title(prog, false),
6629 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
6630 return ERR_PTR(pfd);
6631 }
6632 link->fd = pfd;
6633 return (struct bpf_link *)link;
6634}
6635
Andrii Nakryikod7a18ea2019-12-13 17:43:26 -08006636static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
6637 struct bpf_program *prog)
6638{
6639 return bpf_program__attach_trace(prog);
6640}
6641
6642struct bpf_link *bpf_program__attach(struct bpf_program *prog)
6643{
6644 const struct bpf_sec_def *sec_def;
6645
6646 sec_def = find_sec_def(bpf_program__title(prog, false));
6647 if (!sec_def || !sec_def->attach_fn)
6648 return ERR_PTR(-ESRCH);
6649
6650 return sec_def->attach_fn(sec_def, prog);
6651}
6652
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006653enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006654bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
6655 void **copy_mem, size_t *copy_size,
6656 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006657{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006658 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02006659 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006660 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006661 void *base = ((__u8 *)header) + page_size;
6662 int ret = LIBBPF_PERF_EVENT_CONT;
6663 struct perf_event_header *ehdr;
6664 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006665
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006666 while (data_head != data_tail) {
6667 ehdr = base + (data_tail & (mmap_size - 1));
6668 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006669
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006670 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
6671 void *copy_start = ehdr;
6672 size_t len_first = base + mmap_size - copy_start;
6673 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006674
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006675 if (*copy_size < ehdr_size) {
6676 free(*copy_mem);
6677 *copy_mem = malloc(ehdr_size);
6678 if (!*copy_mem) {
6679 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006680 ret = LIBBPF_PERF_EVENT_ERROR;
6681 break;
6682 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006683 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006684 }
6685
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006686 memcpy(*copy_mem, copy_start, len_first);
6687 memcpy(*copy_mem + len_first, base, len_secnd);
6688 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006689 }
6690
Daniel Borkmann3dca2112018-10-21 02:09:28 +02006691 ret = fn(ehdr, private_data);
6692 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006693 if (ret != LIBBPF_PERF_EVENT_CONT)
6694 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006695 }
6696
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02006697 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07006698 return ret;
6699}
Song Liu34be16462019-03-11 22:30:38 -07006700
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006701struct perf_buffer;
6702
6703struct perf_buffer_params {
6704 struct perf_event_attr *attr;
6705 /* if event_cb is specified, it takes precendence */
6706 perf_buffer_event_fn event_cb;
6707 /* sample_cb and lost_cb are higher-level common-case callbacks */
6708 perf_buffer_sample_fn sample_cb;
6709 perf_buffer_lost_fn lost_cb;
6710 void *ctx;
6711 int cpu_cnt;
6712 int *cpus;
6713 int *map_keys;
6714};
6715
6716struct perf_cpu_buf {
6717 struct perf_buffer *pb;
6718 void *base; /* mmap()'ed memory */
6719 void *buf; /* for reconstructing segmented data */
6720 size_t buf_size;
6721 int fd;
6722 int cpu;
6723 int map_key;
6724};
6725
6726struct perf_buffer {
6727 perf_buffer_event_fn event_cb;
6728 perf_buffer_sample_fn sample_cb;
6729 perf_buffer_lost_fn lost_cb;
6730 void *ctx; /* passed into callbacks */
6731
6732 size_t page_size;
6733 size_t mmap_size;
6734 struct perf_cpu_buf **cpu_bufs;
6735 struct epoll_event *events;
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006736 int cpu_cnt; /* number of allocated CPU buffers */
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006737 int epoll_fd; /* perf event FD */
6738 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
6739};
6740
6741static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
6742 struct perf_cpu_buf *cpu_buf)
6743{
6744 if (!cpu_buf)
6745 return;
6746 if (cpu_buf->base &&
6747 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
Kefeng Wangbe180102019-10-21 13:55:32 +08006748 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006749 if (cpu_buf->fd >= 0) {
6750 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
6751 close(cpu_buf->fd);
6752 }
6753 free(cpu_buf->buf);
6754 free(cpu_buf);
6755}
6756
6757void perf_buffer__free(struct perf_buffer *pb)
6758{
6759 int i;
6760
6761 if (!pb)
6762 return;
6763 if (pb->cpu_bufs) {
6764 for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
6765 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
6766
6767 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
6768 perf_buffer__free_cpu_buf(pb, cpu_buf);
6769 }
6770 free(pb->cpu_bufs);
6771 }
6772 if (pb->epoll_fd >= 0)
6773 close(pb->epoll_fd);
6774 free(pb->events);
6775 free(pb);
6776}
6777
6778static struct perf_cpu_buf *
6779perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
6780 int cpu, int map_key)
6781{
6782 struct perf_cpu_buf *cpu_buf;
6783 char msg[STRERR_BUFSIZE];
6784 int err;
6785
6786 cpu_buf = calloc(1, sizeof(*cpu_buf));
6787 if (!cpu_buf)
6788 return ERR_PTR(-ENOMEM);
6789
6790 cpu_buf->pb = pb;
6791 cpu_buf->cpu = cpu;
6792 cpu_buf->map_key = map_key;
6793
6794 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
6795 -1, PERF_FLAG_FD_CLOEXEC);
6796 if (cpu_buf->fd < 0) {
6797 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006798 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
6799 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006800 goto error;
6801 }
6802
6803 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
6804 PROT_READ | PROT_WRITE, MAP_SHARED,
6805 cpu_buf->fd, 0);
6806 if (cpu_buf->base == MAP_FAILED) {
6807 cpu_buf->base = NULL;
6808 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006809 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
6810 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006811 goto error;
6812 }
6813
6814 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
6815 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006816 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
6817 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006818 goto error;
6819 }
6820
6821 return cpu_buf;
6822
6823error:
6824 perf_buffer__free_cpu_buf(pb, cpu_buf);
6825 return (struct perf_cpu_buf *)ERR_PTR(err);
6826}
6827
6828static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
6829 struct perf_buffer_params *p);
6830
6831struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
6832 const struct perf_buffer_opts *opts)
6833{
6834 struct perf_buffer_params p = {};
Arnaldo Carvalho de Melo4be6e052019-07-19 11:34:07 -03006835 struct perf_event_attr attr = { 0, };
6836
6837 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
6838 attr.type = PERF_TYPE_SOFTWARE;
6839 attr.sample_type = PERF_SAMPLE_RAW;
6840 attr.sample_period = 1;
6841 attr.wakeup_events = 1;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006842
6843 p.attr = &attr;
6844 p.sample_cb = opts ? opts->sample_cb : NULL;
6845 p.lost_cb = opts ? opts->lost_cb : NULL;
6846 p.ctx = opts ? opts->ctx : NULL;
6847
6848 return __perf_buffer__new(map_fd, page_cnt, &p);
6849}
6850
6851struct perf_buffer *
6852perf_buffer__new_raw(int map_fd, size_t page_cnt,
6853 const struct perf_buffer_raw_opts *opts)
6854{
6855 struct perf_buffer_params p = {};
6856
6857 p.attr = opts->attr;
6858 p.event_cb = opts->event_cb;
6859 p.ctx = opts->ctx;
6860 p.cpu_cnt = opts->cpu_cnt;
6861 p.cpus = opts->cpus;
6862 p.map_keys = opts->map_keys;
6863
6864 return __perf_buffer__new(map_fd, page_cnt, &p);
6865}
6866
6867static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
6868 struct perf_buffer_params *p)
6869{
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006870 const char *online_cpus_file = "/sys/devices/system/cpu/online";
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006871 struct bpf_map_info map = {};
6872 char msg[STRERR_BUFSIZE];
6873 struct perf_buffer *pb;
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006874 bool *online = NULL;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006875 __u32 map_info_len;
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006876 int err, i, j, n;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006877
6878 if (page_cnt & (page_cnt - 1)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006879 pr_warn("page count should be power of two, but is %zu\n",
6880 page_cnt);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006881 return ERR_PTR(-EINVAL);
6882 }
6883
6884 map_info_len = sizeof(map);
6885 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
6886 if (err) {
6887 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006888 pr_warn("failed to get map info for map FD %d: %s\n",
6889 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006890 return ERR_PTR(err);
6891 }
6892
6893 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006894 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
6895 map.name);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006896 return ERR_PTR(-EINVAL);
6897 }
6898
6899 pb = calloc(1, sizeof(*pb));
6900 if (!pb)
6901 return ERR_PTR(-ENOMEM);
6902
6903 pb->event_cb = p->event_cb;
6904 pb->sample_cb = p->sample_cb;
6905 pb->lost_cb = p->lost_cb;
6906 pb->ctx = p->ctx;
6907
6908 pb->page_size = getpagesize();
6909 pb->mmap_size = pb->page_size * page_cnt;
6910 pb->map_fd = map_fd;
6911
6912 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
6913 if (pb->epoll_fd < 0) {
6914 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006915 pr_warn("failed to create epoll instance: %s\n",
6916 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006917 goto error;
6918 }
6919
6920 if (p->cpu_cnt > 0) {
6921 pb->cpu_cnt = p->cpu_cnt;
6922 } else {
6923 pb->cpu_cnt = libbpf_num_possible_cpus();
6924 if (pb->cpu_cnt < 0) {
6925 err = pb->cpu_cnt;
6926 goto error;
6927 }
6928 if (map.max_entries < pb->cpu_cnt)
6929 pb->cpu_cnt = map.max_entries;
6930 }
6931
6932 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
6933 if (!pb->events) {
6934 err = -ENOMEM;
Kefeng Wangbe180102019-10-21 13:55:32 +08006935 pr_warn("failed to allocate events: out of memory\n");
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006936 goto error;
6937 }
6938 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
6939 if (!pb->cpu_bufs) {
6940 err = -ENOMEM;
Kefeng Wangbe180102019-10-21 13:55:32 +08006941 pr_warn("failed to allocate buffers: out of memory\n");
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006942 goto error;
6943 }
6944
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006945 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
6946 if (err) {
6947 pr_warn("failed to get online CPU mask: %d\n", err);
6948 goto error;
6949 }
6950
6951 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006952 struct perf_cpu_buf *cpu_buf;
6953 int cpu, map_key;
6954
6955 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
6956 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
6957
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006958 /* in case user didn't explicitly requested particular CPUs to
6959 * be attached to, skip offline/not present CPUs
6960 */
6961 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
6962 continue;
6963
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006964 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
6965 if (IS_ERR(cpu_buf)) {
6966 err = PTR_ERR(cpu_buf);
6967 goto error;
6968 }
6969
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006970 pb->cpu_bufs[j] = cpu_buf;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006971
6972 err = bpf_map_update_elem(pb->map_fd, &map_key,
6973 &cpu_buf->fd, 0);
6974 if (err) {
6975 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006976 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
6977 cpu, map_key, cpu_buf->fd,
6978 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006979 goto error;
6980 }
6981
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006982 pb->events[j].events = EPOLLIN;
6983 pb->events[j].data.ptr = cpu_buf;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006984 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006985 &pb->events[j]) < 0) {
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006986 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006987 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
6988 cpu, cpu_buf->fd,
6989 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006990 goto error;
6991 }
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006992 j++;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006993 }
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08006994 pb->cpu_cnt = j;
6995 free(online);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006996
6997 return pb;
6998
6999error:
Andrii Nakryiko783b8f02019-12-11 17:36:09 -08007000 free(online);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007001 if (pb)
7002 perf_buffer__free(pb);
7003 return ERR_PTR(err);
7004}
7005
7006struct perf_sample_raw {
7007 struct perf_event_header header;
7008 uint32_t size;
7009 char data[0];
7010};
7011
7012struct perf_sample_lost {
7013 struct perf_event_header header;
7014 uint64_t id;
7015 uint64_t lost;
7016 uint64_t sample_id;
7017};
7018
7019static enum bpf_perf_event_ret
7020perf_buffer__process_record(struct perf_event_header *e, void *ctx)
7021{
7022 struct perf_cpu_buf *cpu_buf = ctx;
7023 struct perf_buffer *pb = cpu_buf->pb;
7024 void *data = e;
7025
7026 /* user wants full control over parsing perf event */
7027 if (pb->event_cb)
7028 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
7029
7030 switch (e->type) {
7031 case PERF_RECORD_SAMPLE: {
7032 struct perf_sample_raw *s = data;
7033
7034 if (pb->sample_cb)
7035 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
7036 break;
7037 }
7038 case PERF_RECORD_LOST: {
7039 struct perf_sample_lost *s = data;
7040
7041 if (pb->lost_cb)
7042 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
7043 break;
7044 }
7045 default:
Kefeng Wangbe180102019-10-21 13:55:32 +08007046 pr_warn("unknown perf sample type %d\n", e->type);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007047 return LIBBPF_PERF_EVENT_ERROR;
7048 }
7049 return LIBBPF_PERF_EVENT_CONT;
7050}
7051
7052static int perf_buffer__process_records(struct perf_buffer *pb,
7053 struct perf_cpu_buf *cpu_buf)
7054{
7055 enum bpf_perf_event_ret ret;
7056
7057 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
7058 pb->page_size, &cpu_buf->buf,
7059 &cpu_buf->buf_size,
7060 perf_buffer__process_record, cpu_buf);
7061 if (ret != LIBBPF_PERF_EVENT_CONT)
7062 return ret;
7063 return 0;
7064}
7065
7066int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
7067{
7068 int i, cnt, err;
7069
7070 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
7071 for (i = 0; i < cnt; i++) {
7072 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
7073
7074 err = perf_buffer__process_records(pb, cpu_buf);
7075 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08007076 pr_warn("error while processing records: %d\n", err);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07007077 return err;
7078 }
7079 }
7080 return cnt < 0 ? -errno : cnt;
7081}
7082
Song Liu34be16462019-03-11 22:30:38 -07007083struct bpf_prog_info_array_desc {
7084 int array_offset; /* e.g. offset of jited_prog_insns */
7085 int count_offset; /* e.g. offset of jited_prog_len */
7086 int size_offset; /* > 0: offset of rec size,
7087 * < 0: fix size of -size_offset
7088 */
7089};
7090
7091static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
7092 [BPF_PROG_INFO_JITED_INSNS] = {
7093 offsetof(struct bpf_prog_info, jited_prog_insns),
7094 offsetof(struct bpf_prog_info, jited_prog_len),
7095 -1,
7096 },
7097 [BPF_PROG_INFO_XLATED_INSNS] = {
7098 offsetof(struct bpf_prog_info, xlated_prog_insns),
7099 offsetof(struct bpf_prog_info, xlated_prog_len),
7100 -1,
7101 },
7102 [BPF_PROG_INFO_MAP_IDS] = {
7103 offsetof(struct bpf_prog_info, map_ids),
7104 offsetof(struct bpf_prog_info, nr_map_ids),
7105 -(int)sizeof(__u32),
7106 },
7107 [BPF_PROG_INFO_JITED_KSYMS] = {
7108 offsetof(struct bpf_prog_info, jited_ksyms),
7109 offsetof(struct bpf_prog_info, nr_jited_ksyms),
7110 -(int)sizeof(__u64),
7111 },
7112 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
7113 offsetof(struct bpf_prog_info, jited_func_lens),
7114 offsetof(struct bpf_prog_info, nr_jited_func_lens),
7115 -(int)sizeof(__u32),
7116 },
7117 [BPF_PROG_INFO_FUNC_INFO] = {
7118 offsetof(struct bpf_prog_info, func_info),
7119 offsetof(struct bpf_prog_info, nr_func_info),
7120 offsetof(struct bpf_prog_info, func_info_rec_size),
7121 },
7122 [BPF_PROG_INFO_LINE_INFO] = {
7123 offsetof(struct bpf_prog_info, line_info),
7124 offsetof(struct bpf_prog_info, nr_line_info),
7125 offsetof(struct bpf_prog_info, line_info_rec_size),
7126 },
7127 [BPF_PROG_INFO_JITED_LINE_INFO] = {
7128 offsetof(struct bpf_prog_info, jited_line_info),
7129 offsetof(struct bpf_prog_info, nr_jited_line_info),
7130 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
7131 },
7132 [BPF_PROG_INFO_PROG_TAGS] = {
7133 offsetof(struct bpf_prog_info, prog_tags),
7134 offsetof(struct bpf_prog_info, nr_prog_tags),
7135 -(int)sizeof(__u8) * BPF_TAG_SIZE,
7136 },
7137
7138};
7139
Andrii Nakryiko8983b732019-11-20 23:07:42 -08007140static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
7141 int offset)
Song Liu34be16462019-03-11 22:30:38 -07007142{
7143 __u32 *array = (__u32 *)info;
7144
7145 if (offset >= 0)
7146 return array[offset / sizeof(__u32)];
7147 return -(int)offset;
7148}
7149
Andrii Nakryiko8983b732019-11-20 23:07:42 -08007150static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
7151 int offset)
Song Liu34be16462019-03-11 22:30:38 -07007152{
7153 __u64 *array = (__u64 *)info;
7154
7155 if (offset >= 0)
7156 return array[offset / sizeof(__u64)];
7157 return -(int)offset;
7158}
7159
7160static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
7161 __u32 val)
7162{
7163 __u32 *array = (__u32 *)info;
7164
7165 if (offset >= 0)
7166 array[offset / sizeof(__u32)] = val;
7167}
7168
7169static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
7170 __u64 val)
7171{
7172 __u64 *array = (__u64 *)info;
7173
7174 if (offset >= 0)
7175 array[offset / sizeof(__u64)] = val;
7176}
7177
7178struct bpf_prog_info_linear *
7179bpf_program__get_prog_info_linear(int fd, __u64 arrays)
7180{
7181 struct bpf_prog_info_linear *info_linear;
7182 struct bpf_prog_info info = {};
7183 __u32 info_len = sizeof(info);
7184 __u32 data_len = 0;
7185 int i, err;
7186 void *ptr;
7187
7188 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
7189 return ERR_PTR(-EINVAL);
7190
7191 /* step 1: get array dimensions */
7192 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
7193 if (err) {
7194 pr_debug("can't get prog info: %s", strerror(errno));
7195 return ERR_PTR(-EFAULT);
7196 }
7197
7198 /* step 2: calculate total size of all arrays */
7199 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
7200 bool include_array = (arrays & (1UL << i)) > 0;
7201 struct bpf_prog_info_array_desc *desc;
7202 __u32 count, size;
7203
7204 desc = bpf_prog_info_array_desc + i;
7205
7206 /* kernel is too old to support this field */
7207 if (info_len < desc->array_offset + sizeof(__u32) ||
7208 info_len < desc->count_offset + sizeof(__u32) ||
7209 (desc->size_offset > 0 && info_len < desc->size_offset))
7210 include_array = false;
7211
7212 if (!include_array) {
7213 arrays &= ~(1UL << i); /* clear the bit */
7214 continue;
7215 }
7216
7217 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
7218 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
7219
7220 data_len += count * size;
7221 }
7222
7223 /* step 3: allocate continuous memory */
7224 data_len = roundup(data_len, sizeof(__u64));
7225 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
7226 if (!info_linear)
7227 return ERR_PTR(-ENOMEM);
7228
7229 /* step 4: fill data to info_linear->info */
7230 info_linear->arrays = arrays;
7231 memset(&info_linear->info, 0, sizeof(info));
7232 ptr = info_linear->data;
7233
7234 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
7235 struct bpf_prog_info_array_desc *desc;
7236 __u32 count, size;
7237
7238 if ((arrays & (1UL << i)) == 0)
7239 continue;
7240
7241 desc = bpf_prog_info_array_desc + i;
7242 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
7243 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
7244 bpf_prog_info_set_offset_u32(&info_linear->info,
7245 desc->count_offset, count);
7246 bpf_prog_info_set_offset_u32(&info_linear->info,
7247 desc->size_offset, size);
7248 bpf_prog_info_set_offset_u64(&info_linear->info,
7249 desc->array_offset,
7250 ptr_to_u64(ptr));
7251 ptr += count * size;
7252 }
7253
7254 /* step 5: call syscall again to get required arrays */
7255 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
7256 if (err) {
7257 pr_debug("can't get prog info: %s", strerror(errno));
7258 free(info_linear);
7259 return ERR_PTR(-EFAULT);
7260 }
7261
7262 /* step 6: verify the data */
7263 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
7264 struct bpf_prog_info_array_desc *desc;
7265 __u32 v1, v2;
7266
7267 if ((arrays & (1UL << i)) == 0)
7268 continue;
7269
7270 desc = bpf_prog_info_array_desc + i;
7271 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
7272 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
7273 desc->count_offset);
7274 if (v1 != v2)
Kefeng Wangbe180102019-10-21 13:55:32 +08007275 pr_warn("%s: mismatch in element count\n", __func__);
Song Liu34be16462019-03-11 22:30:38 -07007276
7277 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
7278 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
7279 desc->size_offset);
7280 if (v1 != v2)
Kefeng Wangbe180102019-10-21 13:55:32 +08007281 pr_warn("%s: mismatch in rec size\n", __func__);
Song Liu34be16462019-03-11 22:30:38 -07007282 }
7283
7284 /* step 7: update info_len and data_len */
7285 info_linear->info_len = sizeof(struct bpf_prog_info);
7286 info_linear->data_len = data_len;
7287
7288 return info_linear;
7289}
7290
7291void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
7292{
7293 int i;
7294
7295 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
7296 struct bpf_prog_info_array_desc *desc;
7297 __u64 addr, offs;
7298
7299 if ((info_linear->arrays & (1UL << i)) == 0)
7300 continue;
7301
7302 desc = bpf_prog_info_array_desc + i;
7303 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
7304 desc->array_offset);
7305 offs = addr - ptr_to_u64(info_linear->data);
7306 bpf_prog_info_set_offset_u64(&info_linear->info,
7307 desc->array_offset, offs);
7308 }
7309}
7310
7311void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
7312{
7313 int i;
7314
7315 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
7316 struct bpf_prog_info_array_desc *desc;
7317 __u64 addr, offs;
7318
7319 if ((info_linear->arrays & (1UL << i)) == 0)
7320 continue;
7321
7322 desc = bpf_prog_info_array_desc + i;
7323 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
7324 desc->array_offset);
7325 addr = offs + ptr_to_u64(info_linear->data);
7326 bpf_prog_info_set_offset_u64(&info_linear->info,
7327 desc->array_offset, addr);
7328 }
7329}
Hechao Li6446b312019-06-10 17:56:50 -07007330
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08007331int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
7332{
7333 int err = 0, n, len, start, end = -1;
7334 bool *tmp;
7335
7336 *mask = NULL;
7337 *mask_sz = 0;
7338
7339 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
7340 while (*s) {
7341 if (*s == ',' || *s == '\n') {
7342 s++;
7343 continue;
7344 }
7345 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
7346 if (n <= 0 || n > 2) {
7347 pr_warn("Failed to get CPU range %s: %d\n", s, n);
7348 err = -EINVAL;
7349 goto cleanup;
7350 } else if (n == 1) {
7351 end = start;
7352 }
7353 if (start < 0 || start > end) {
7354 pr_warn("Invalid CPU range [%d,%d] in %s\n",
7355 start, end, s);
7356 err = -EINVAL;
7357 goto cleanup;
7358 }
7359 tmp = realloc(*mask, end + 1);
7360 if (!tmp) {
7361 err = -ENOMEM;
7362 goto cleanup;
7363 }
7364 *mask = tmp;
7365 memset(tmp + *mask_sz, 0, start - *mask_sz);
7366 memset(tmp + start, 1, end - start + 1);
7367 *mask_sz = end + 1;
7368 s += len;
7369 }
7370 if (!*mask_sz) {
7371 pr_warn("Empty CPU range\n");
7372 return -EINVAL;
7373 }
7374 return 0;
7375cleanup:
7376 free(*mask);
7377 *mask = NULL;
7378 return err;
7379}
7380
7381int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
7382{
7383 int fd, err = 0, len;
7384 char buf[128];
7385
7386 fd = open(fcpu, O_RDONLY);
7387 if (fd < 0) {
7388 err = -errno;
7389 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
7390 return err;
7391 }
7392 len = read(fd, buf, sizeof(buf));
7393 close(fd);
7394 if (len <= 0) {
7395 err = len ? -errno : -EINVAL;
7396 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
7397 return err;
7398 }
7399 if (len >= sizeof(buf)) {
7400 pr_warn("CPU mask is too big in file %s\n", fcpu);
7401 return -E2BIG;
7402 }
7403 buf[len] = '\0';
7404
7405 return parse_cpu_mask_str(buf, mask, mask_sz);
7406}
7407
Hechao Li6446b312019-06-10 17:56:50 -07007408int libbpf_num_possible_cpus(void)
7409{
7410 static const char *fcpu = "/sys/devices/system/cpu/possible";
Hechao Li6446b312019-06-10 17:56:50 -07007411 static int cpus;
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08007412 int err, n, i, tmp_cpus;
7413 bool *mask;
Hechao Li6446b312019-06-10 17:56:50 -07007414
Takshak Chahande56fbc242019-07-31 15:10:55 -07007415 tmp_cpus = READ_ONCE(cpus);
7416 if (tmp_cpus > 0)
7417 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07007418
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08007419 err = parse_cpu_mask_file(fcpu, &mask, &n);
7420 if (err)
7421 return err;
Hechao Li6446b312019-06-10 17:56:50 -07007422
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08007423 tmp_cpus = 0;
7424 for (i = 0; i < n; i++) {
7425 if (mask[i])
7426 tmp_cpus++;
Hechao Li6446b312019-06-10 17:56:50 -07007427 }
Andrii Nakryiko6803ee22019-12-11 17:35:48 -08007428 free(mask);
Takshak Chahande56fbc242019-07-31 15:10:55 -07007429
7430 WRITE_ONCE(cpus, tmp_cpus);
7431 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07007432}
Andrii Nakryikod66562f2019-12-13 17:43:36 -08007433
7434int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
7435 const struct bpf_object_open_opts *opts)
7436{
7437 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
7438 .object_name = s->name,
7439 );
7440 struct bpf_object *obj;
7441 int i;
7442
7443 /* Attempt to preserve opts->object_name, unless overriden by user
7444 * explicitly. Overwriting object name for skeletons is discouraged,
7445 * as it breaks global data maps, because they contain object name
7446 * prefix as their own map name prefix. When skeleton is generated,
7447 * bpftool is making an assumption that this name will stay the same.
7448 */
7449 if (opts) {
7450 memcpy(&skel_opts, opts, sizeof(*opts));
7451 if (!opts->object_name)
7452 skel_opts.object_name = s->name;
7453 }
7454
7455 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
7456 if (IS_ERR(obj)) {
7457 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
7458 s->name, PTR_ERR(obj));
7459 return PTR_ERR(obj);
7460 }
7461
7462 *s->obj = obj;
7463
7464 for (i = 0; i < s->map_cnt; i++) {
7465 struct bpf_map **map = s->maps[i].map;
7466 const char *name = s->maps[i].name;
7467 void **mmaped = s->maps[i].mmaped;
7468
7469 *map = bpf_object__find_map_by_name(obj, name);
7470 if (!*map) {
7471 pr_warn("failed to find skeleton map '%s'\n", name);
7472 return -ESRCH;
7473 }
7474
Andrii Nakryiko2ad97d42019-12-13 17:47:09 -08007475 /* externs shouldn't be pre-setup from user code */
7476 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_EXTERN)
Andrii Nakryikod66562f2019-12-13 17:43:36 -08007477 *mmaped = (*map)->mmaped;
7478 }
7479
7480 for (i = 0; i < s->prog_cnt; i++) {
7481 struct bpf_program **prog = s->progs[i].prog;
7482 const char *name = s->progs[i].name;
7483
7484 *prog = bpf_object__find_program_by_name(obj, name);
7485 if (!*prog) {
7486 pr_warn("failed to find skeleton program '%s'\n", name);
7487 return -ESRCH;
7488 }
7489 }
7490
7491 return 0;
7492}
7493
7494int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
7495{
7496 int i, err;
7497
7498 err = bpf_object__load(*s->obj);
7499 if (err) {
7500 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
7501 return err;
7502 }
7503
7504 for (i = 0; i < s->map_cnt; i++) {
7505 struct bpf_map *map = *s->maps[i].map;
7506 size_t mmap_sz = bpf_map_mmap_sz(map);
7507 int prot, map_fd = bpf_map__fd(map);
7508 void **mmaped = s->maps[i].mmaped;
Andrii Nakryikod66562f2019-12-13 17:43:36 -08007509
7510 if (!mmaped)
7511 continue;
7512
7513 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
7514 *mmaped = NULL;
7515 continue;
7516 }
7517
7518 if (map->def.map_flags & BPF_F_RDONLY_PROG)
7519 prot = PROT_READ;
7520 else
7521 prot = PROT_READ | PROT_WRITE;
7522
7523 /* Remap anonymous mmap()-ed "map initialization image" as
7524 * a BPF map-backed mmap()-ed memory, but preserving the same
7525 * memory address. This will cause kernel to change process'
7526 * page table to point to a different piece of kernel memory,
7527 * but from userspace point of view memory address (and its
7528 * contents, being identical at this point) will stay the
7529 * same. This mapping will be released by bpf_object__close()
7530 * as per normal clean up procedure, so we don't need to worry
7531 * about it from skeleton's clean up perspective.
7532 */
Andrii Nakryiko2ad97d42019-12-13 17:47:09 -08007533 *mmaped = mmap(map->mmaped, mmap_sz, prot,
7534 MAP_SHARED | MAP_FIXED, map_fd, 0);
7535 if (*mmaped == MAP_FAILED) {
Andrii Nakryikod66562f2019-12-13 17:43:36 -08007536 err = -errno;
7537 *mmaped = NULL;
7538 pr_warn("failed to re-mmap() map '%s': %d\n",
7539 bpf_map__name(map), err);
7540 return err;
7541 }
7542 }
7543
7544 return 0;
7545}
7546
7547int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
7548{
7549 int i;
7550
7551 for (i = 0; i < s->prog_cnt; i++) {
7552 struct bpf_program *prog = *s->progs[i].prog;
7553 struct bpf_link **link = s->progs[i].link;
7554 const struct bpf_sec_def *sec_def;
7555 const char *sec_name = bpf_program__title(prog, false);
7556
7557 sec_def = find_sec_def(sec_name);
7558 if (!sec_def || !sec_def->attach_fn)
7559 continue;
7560
7561 *link = sec_def->attach_fn(sec_def, prog);
7562 if (IS_ERR(*link)) {
7563 pr_warn("failed to auto-attach program '%s': %ld\n",
7564 bpf_program__name(prog), PTR_ERR(*link));
7565 return PTR_ERR(*link);
7566 }
7567 }
7568
7569 return 0;
7570}
7571
7572void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
7573{
7574 int i;
7575
7576 for (i = 0; i < s->prog_cnt; i++) {
7577 struct bpf_link **link = s->progs[i].link;
7578
7579 if (!IS_ERR_OR_NULL(*link))
7580 bpf_link__destroy(*link);
7581 *link = NULL;
7582 }
7583}
7584
7585void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
7586{
7587 if (s->progs)
7588 bpf_object__detach_skeleton(s);
7589 if (s->obj)
7590 bpf_object__close(*s->obj);
7591 free(s->maps);
7592 free(s->progs);
7593 free(s);
7594}