blob: be4af95d5a2cc4446e9150ace5c065f223860de3 [file] [log] [blame]
Alexei Starovoitov1bc38b82018-10-05 16:40:00 -07001// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
Eric Leblond6061a3d2018-01-30 21:55:03 +01002
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Daniel Borkmannd8599002019-04-09 23:20:13 +020010 * Copyright (C) 2019 Isovalent, Inc.
Wang Nan1b76c132015-07-01 02:13:51 +000011 */
12
Yonghong Songb4269952018-11-29 15:31:45 -080013#ifndef _GNU_SOURCE
Jakub Kicinski531b0142018-07-10 14:43:05 -070014#define _GNU_SOURCE
Yonghong Songb4269952018-11-29 15:31:45 -080015#endif
Wang Nan1b76c132015-07-01 02:13:51 +000016#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000017#include <stdio.h>
18#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080019#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000020#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000021#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000022#include <unistd.h>
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -030023#include <endian.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000024#include <fcntl.h>
25#include <errno.h>
Wang Nan1b76c132015-07-01 02:13:51 +000026#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080027#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000028#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000029#include <linux/bpf.h>
Martin KaFai Lau38d5d3b2018-07-24 08:40:22 -070030#include <linux/btf.h>
Stanislav Fomichev47eff612018-11-20 17:11:19 -080031#include <linux/filter.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000032#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080033#include <linux/limits.h>
Yonghong Song438363c2018-10-09 16:14:47 -070034#include <linux/perf_event.h>
Daniel Borkmanna64af0e2018-10-19 15:51:03 +020035#include <linux/ring_buffer.h>
Andrii Nakryiko5e61f272019-10-04 15:40:34 -070036#include <linux/version.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070037#include <sys/epoll.h>
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -070038#include <sys/ioctl.h>
Andrii Nakryikofb84b822019-07-06 11:06:24 -070039#include <sys/mman.h>
Joe Stringerf3675402017-01-26 13:19:56 -080040#include <sys/stat.h>
41#include <sys/types.h>
42#include <sys/vfs.h>
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070043#include <sys/utsname.h>
Jakub Kicinski531b0142018-07-10 14:43:05 -070044#include <tools/libc_compat.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000045#include <libelf.h>
46#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000047
48#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000049#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070050#include "btf.h"
Arnaldo Carvalho de Melo6d419072018-09-14 16:47:14 -030051#include "str_error.h"
Andrii Nakryikod7c4b392019-05-10 14:13:15 -070052#include "libbpf_internal.h"
Andrii Nakryikoddc7c302019-08-07 14:39:51 -070053#include "hashmap.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000054
Wang Nan9b161372016-07-18 06:01:08 +000055#ifndef EM_BPF
56#define EM_BPF 247
57#endif
58
Joe Stringerf3675402017-01-26 13:19:56 -080059#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
Andrey Ignatovff466b52019-04-06 22:37:34 -070063/* vsprintf() in __base_pr() uses nonliteral format string. It may break
64 * compilation if user enables corresponding warning. Disable it explicitly.
65 */
66#pragma GCC diagnostic ignored "-Wformat-nonliteral"
67
Wang Nanb3f59d62015-07-01 02:13:52 +000068#define __printf(a, b) __attribute__((format(printf, a, b)))
69
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080070static int __base_pr(enum libbpf_print_level level, const char *format,
71 va_list args)
Wang Nanb3f59d62015-07-01 02:13:52 +000072{
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080073 if (level == LIBBPF_DEBUG)
74 return 0;
75
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080076 return vfprintf(stderr, format, args);
Wang Nanb3f59d62015-07-01 02:13:52 +000077}
78
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080079static libbpf_print_fn_t __libbpf_pr = __base_pr;
Wang Nanb3f59d62015-07-01 02:13:52 +000080
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070081libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
Wang Nanb3f59d62015-07-01 02:13:52 +000082{
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070083 libbpf_print_fn_t old_print_fn = __libbpf_pr;
84
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080085 __libbpf_pr = fn;
Andrii Nakryikoe87fd8b2019-07-27 20:25:26 -070086 return old_print_fn;
Wang Nanb3f59d62015-07-01 02:13:52 +000087}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000088
Yonghong Song8461ef82019-02-01 16:14:14 -080089__printf(2, 3)
90void libbpf_print(enum libbpf_print_level level, const char *format, ...)
91{
92 va_list args;
93
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080094 if (!__libbpf_pr)
95 return;
96
Yonghong Song8461ef82019-02-01 16:14:14 -080097 va_start(args, format);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080098 __libbpf_pr(level, format, args);
Yonghong Song8461ef82019-02-01 16:14:14 -080099 va_end(args);
100}
101
Wang Nan6371ca3b2015-11-06 13:49:37 +0000102#define STRERR_BUFSIZE 128
103
Wang Nan6371ca3b2015-11-06 13:49:37 +0000104#define CHECK_ERR(action, err, out) do { \
105 err = action; \
106 if (err) \
107 goto out; \
108} while(0)
109
110
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000111/* Copied from tools/perf/util/util.h */
112#ifndef zfree
113# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
114#endif
115
116#ifndef zclose
117# define zclose(fd) ({ \
118 int ___err = 0; \
119 if ((fd) >= 0) \
120 ___err = close((fd)); \
121 fd = -1; \
122 ___err; })
123#endif
124
125#ifdef HAVE_LIBELF_MMAP_SUPPORT
126# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
127#else
128# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
129#endif
130
Song Liu34be16462019-03-11 22:30:38 -0700131static inline __u64 ptr_to_u64(const void *ptr)
132{
133 return (__u64) (unsigned long) ptr;
134}
135
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800136struct bpf_capabilities {
137 /* v4.14: kernel support for program & map names. */
138 __u32 name:1;
Daniel Borkmann8837fe52019-04-24 00:45:56 +0200139 /* v5.2: kernel support for global data sections. */
140 __u32 global_data:1;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -0700141 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
142 __u32 btf_func:1;
143 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
144 __u32 btf_datasec:1;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800145};
146
Wang Nana5b8bd42015-07-01 02:14:00 +0000147/*
148 * bpf_prog should be a better name but it has been used in
149 * linux/filter.h.
150 */
151struct bpf_program {
152 /* Index in elf obj file, for relocation use. */
153 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700154 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700155 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000156 char *section_name;
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800157 /* section_name with / replaced by _; makes recursive pinning
158 * in bpf_object__pin_programs easier
159 */
160 char *pin_name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000161 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800162 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000163 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000164
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800165 struct reloc_desc {
166 enum {
167 RELO_LD64,
168 RELO_CALL,
Daniel Borkmannd8599002019-04-09 23:20:13 +0200169 RELO_DATA,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800170 } type;
Wang Nan34090912015-07-01 02:14:02 +0000171 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800172 union {
173 int map_idx;
174 int text_off;
175 };
Wang Nan34090912015-07-01 02:14:02 +0000176 } *reloc_desc;
177 int nr_reloc;
Alexei Starovoitovda11b412019-04-01 21:27:47 -0700178 int log_level;
Wang Nan55cffde2015-07-01 02:14:07 +0000179
Wang Nanb5805632015-11-16 12:10:09 +0000180 struct {
181 int nr;
182 int *fds;
183 } instances;
184 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000185
186 struct bpf_object *obj;
187 void *priv;
188 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700189
190 enum bpf_attach_type expected_attach_type;
Alexei Starovoitov12a86542019-10-30 15:32:12 -0700191 __u32 attach_btf_id;
Yonghong Song2993e052018-11-19 15:29:16 -0800192 void *func_info;
193 __u32 func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -0800194 __u32 func_info_cnt;
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800195
196 struct bpf_capabilities *caps;
Martin KaFai Lau3d650142018-12-07 16:42:31 -0800197
198 void *line_info;
199 __u32 line_info_rec_size;
200 __u32 line_info_cnt;
Jiong Wang04656192019-05-24 23:25:19 +0100201 __u32 prog_flags;
Wang Nana5b8bd42015-07-01 02:14:00 +0000202};
203
Daniel Borkmannd8599002019-04-09 23:20:13 +0200204enum libbpf_map_type {
205 LIBBPF_MAP_UNSPEC,
206 LIBBPF_MAP_DATA,
207 LIBBPF_MAP_BSS,
208 LIBBPF_MAP_RODATA,
209};
210
211static const char * const libbpf_type_to_btf_name[] = {
212 [LIBBPF_MAP_DATA] = ".data",
213 [LIBBPF_MAP_BSS] = ".bss",
214 [LIBBPF_MAP_RODATA] = ".rodata",
215};
216
Wang Nan9d759a92015-11-27 08:47:35 +0000217struct bpf_map {
218 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000219 char *name;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700220 int sec_idx;
221 size_t sec_offset;
David Beckettf0307a72018-05-16 14:02:49 -0700222 int map_ifindex;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800223 int inner_map_fd;
Wang Nan9d759a92015-11-27 08:47:35 +0000224 struct bpf_map_def def;
Martin KaFai Lau5b891af2018-07-24 08:40:21 -0700225 __u32 btf_key_type_id;
226 __u32 btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000227 void *priv;
228 bpf_map_clear_priv_t clear_priv;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200229 enum libbpf_map_type libbpf_type;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +0100230 char *pin_path;
231 bool pinned;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200232};
233
234struct bpf_secdata {
235 void *rodata;
236 void *data;
Wang Nan9d759a92015-11-27 08:47:35 +0000237};
238
Wang Nan9a208ef2015-07-01 02:14:10 +0000239static LIST_HEAD(bpf_objects_list);
240
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000241struct bpf_object {
Daniel Borkmannd8599002019-04-09 23:20:13 +0200242 char name[BPF_OBJ_NAME_LEN];
Wang Nancb1e5e92015-07-01 02:13:57 +0000243 char license[64];
Yonghong Song438363c2018-10-09 16:14:47 -0700244 __u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000245
Wang Nana5b8bd42015-07-01 02:14:00 +0000246 struct bpf_program *programs;
247 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000248 struct bpf_map *maps;
249 size_t nr_maps;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700250 size_t maps_cap;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200251 struct bpf_secdata sections;
Wang Nan9d759a92015-11-27 08:47:35 +0000252
Wang Nan52d33522015-07-01 02:14:04 +0000253 bool loaded;
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700254 bool has_pseudo_calls;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -0700255 bool relaxed_core_relocs;
Wang Nana5b8bd42015-07-01 02:14:00 +0000256
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000257 /*
258 * Information when doing elf related work. Only valid if fd
259 * is valid.
260 */
261 struct {
262 int fd;
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700263 const void *obj_buf;
Wang Nan6c956392015-07-01 02:13:54 +0000264 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000265 Elf *elf;
266 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000267 Elf_Data *symbols;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200268 Elf_Data *data;
269 Elf_Data *rodata;
270 Elf_Data *bss;
Wang Nan77ba9a52015-12-08 02:25:30 +0000271 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000272 struct {
273 GElf_Shdr shdr;
274 Elf_Data *data;
275 } *reloc;
276 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000277 int maps_shndx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700278 int btf_maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800279 int text_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200280 int data_shndx;
281 int rodata_shndx;
282 int bss_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000283 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000284 /*
285 * All loaded bpf_object is linked in a list, which is
286 * hidden to caller. bpf_objects__<func> handlers deal with
287 * all objects.
288 */
289 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000290
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700291 struct btf *btf;
Yonghong Song2993e052018-11-19 15:29:16 -0800292 struct btf_ext *btf_ext;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700293
Wang Nan10931d22016-11-26 07:03:26 +0000294 void *priv;
295 bpf_object_clear_priv_t clear_priv;
296
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800297 struct bpf_capabilities caps;
298
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000299 char path[];
300};
301#define obj_elf_valid(o) ((o)->efile.elf)
302
Joe Stringer29cd77f2018-10-02 13:35:39 -0700303void bpf_program__unload(struct bpf_program *prog)
Wang Nan55cffde2015-07-01 02:14:07 +0000304{
Wang Nanb5805632015-11-16 12:10:09 +0000305 int i;
306
Wang Nan55cffde2015-07-01 02:14:07 +0000307 if (!prog)
308 return;
309
Wang Nanb5805632015-11-16 12:10:09 +0000310 /*
311 * If the object is opened but the program was never loaded,
312 * it is possible that prog->instances.nr == -1.
313 */
314 if (prog->instances.nr > 0) {
315 for (i = 0; i < prog->instances.nr; i++)
316 zclose(prog->instances.fds[i]);
317 } else if (prog->instances.nr != -1) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800318 pr_warn("Internal error: instances.nr is %d\n",
319 prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +0000320 }
321
322 prog->instances.nr = -1;
323 zfree(&prog->instances.fds);
Yonghong Song2993e052018-11-19 15:29:16 -0800324
Yonghong Song2993e052018-11-19 15:29:16 -0800325 zfree(&prog->func_info);
Prashant Bhole07a09d12018-12-17 16:57:50 +0900326 zfree(&prog->line_info);
Wang Nan55cffde2015-07-01 02:14:07 +0000327}
328
Wang Nana5b8bd42015-07-01 02:14:00 +0000329static void bpf_program__exit(struct bpf_program *prog)
330{
331 if (!prog)
332 return;
333
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000334 if (prog->clear_priv)
335 prog->clear_priv(prog, prog->priv);
336
337 prog->priv = NULL;
338 prog->clear_priv = NULL;
339
Wang Nan55cffde2015-07-01 02:14:07 +0000340 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700341 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000342 zfree(&prog->section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800343 zfree(&prog->pin_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000344 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000345 zfree(&prog->reloc_desc);
346
347 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000348 prog->insns_cnt = 0;
349 prog->idx = -1;
350}
351
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800352static char *__bpf_program__pin_name(struct bpf_program *prog)
353{
354 char *name, *p;
355
356 name = p = strdup(prog->section_name);
357 while ((p = strchr(p, '/')))
358 *p = '_';
359
360 return name;
361}
362
Wang Nana5b8bd42015-07-01 02:14:00 +0000363static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700364bpf_program__init(void *data, size_t size, char *section_name, int idx,
365 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000366{
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700367 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
368
369 if (size == 0 || size % bpf_insn_sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800370 pr_warn("corrupted section '%s', size: %zu\n",
371 section_name, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000372 return -EINVAL;
373 }
374
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -0800375 memset(prog, 0, sizeof(*prog));
Wang Nana5b8bd42015-07-01 02:14:00 +0000376
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700377 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000378 if (!prog->section_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800379 pr_warn("failed to alloc name for prog under section(%d) %s\n",
380 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000381 goto errout;
382 }
383
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800384 prog->pin_name = __bpf_program__pin_name(prog);
385 if (!prog->pin_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800386 pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
387 idx, section_name);
Stanislav Fomichev33a2c752018-11-09 08:21:43 -0800388 goto errout;
389 }
390
Wang Nana5b8bd42015-07-01 02:14:00 +0000391 prog->insns = malloc(size);
392 if (!prog->insns) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800393 pr_warn("failed to alloc insns for prog under section %s\n",
394 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000395 goto errout;
396 }
Andrii Nakryiko8ca990c2019-05-29 10:36:03 -0700397 prog->insns_cnt = size / bpf_insn_sz;
398 memcpy(prog->insns, data, size);
Wang Nana5b8bd42015-07-01 02:14:00 +0000399 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000400 prog->instances.fds = NULL;
401 prog->instances.nr = -1;
Nikita V. Shirokov47ae7e32018-11-23 12:58:12 -0800402 prog->type = BPF_PROG_TYPE_UNSPEC;
Wang Nana5b8bd42015-07-01 02:14:00 +0000403
404 return 0;
405errout:
406 bpf_program__exit(prog);
407 return -ENOMEM;
408}
409
410static int
411bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700412 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000413{
414 struct bpf_program prog, *progs;
415 int nr_progs, err;
416
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700417 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000418 if (err)
419 return err;
420
Stanislav Fomichev47eff612018-11-20 17:11:19 -0800421 prog.caps = &obj->caps;
Wang Nana5b8bd42015-07-01 02:14:00 +0000422 progs = obj->programs;
423 nr_progs = obj->nr_programs;
424
Jakub Kicinski531b0142018-07-10 14:43:05 -0700425 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
Wang Nana5b8bd42015-07-01 02:14:00 +0000426 if (!progs) {
427 /*
428 * In this case the original obj->programs
429 * is still valid, so don't need special treat for
430 * bpf_close_object().
431 */
Kefeng Wangbe180102019-10-21 13:55:32 +0800432 pr_warn("failed to alloc a new program under section '%s'\n",
433 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000434 bpf_program__exit(&prog);
435 return -ENOMEM;
436 }
437
438 pr_debug("found program %s\n", prog.section_name);
439 obj->programs = progs;
440 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000441 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000442 progs[nr_progs] = prog;
443 return 0;
444}
445
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700446static int
447bpf_object__init_prog_names(struct bpf_object *obj)
448{
449 Elf_Data *symbols = obj->efile.symbols;
450 struct bpf_program *prog;
451 size_t pi, si;
452
453 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800454 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700455
456 prog = &obj->programs[pi];
457
458 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
459 si++) {
460 GElf_Sym sym;
461
462 if (!gelf_getsym(symbols, si, &sym))
463 continue;
464 if (sym.st_shndx != prog->idx)
465 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000466 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
467 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700468
469 name = elf_strptr(obj->efile.elf,
470 obj->efile.strtabidx,
471 sym.st_name);
472 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800473 pr_warn("failed to get sym name string for prog %s\n",
474 prog->section_name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700475 return -LIBBPF_ERRNO__LIBELF;
476 }
477 }
478
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700479 if (!name && prog->idx == obj->efile.text_shndx)
480 name = ".text";
481
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700482 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800483 pr_warn("failed to find sym for prog %s\n",
484 prog->section_name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700485 return -EINVAL;
486 }
Jakub Kicinski9a94f272018-06-28 14:41:38 -0700487
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700488 prog->name = strdup(name);
489 if (!prog->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800490 pr_warn("failed to allocate memory for prog sym %s\n",
491 name);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700492 return -ENOMEM;
493 }
494 }
495
496 return 0;
497}
498
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700499static __u32 get_kernel_version(void)
500{
501 __u32 major, minor, patch;
502 struct utsname info;
503
504 uname(&info);
505 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
506 return 0;
507 return KERNEL_VERSION(major, minor, patch);
508}
509
Wang Nan6c956392015-07-01 02:13:54 +0000510static struct bpf_object *bpf_object__new(const char *path,
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700511 const void *obj_buf,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700512 size_t obj_buf_sz,
513 const char *obj_name)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000514{
515 struct bpf_object *obj;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200516 char *end;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000517
518 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
519 if (!obj) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800520 pr_warn("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000521 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000522 }
523
524 strcpy(obj->path, path);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -0700525 if (obj_name) {
526 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
527 obj->name[sizeof(obj->name) - 1] = 0;
528 } else {
529 /* Using basename() GNU version which doesn't modify arg. */
530 strncpy(obj->name, basename((void *)path),
531 sizeof(obj->name) - 1);
532 end = strchr(obj->name, '.');
533 if (end)
534 *end = 0;
535 }
Wang Nan6c956392015-07-01 02:13:54 +0000536
Daniel Borkmannd8599002019-04-09 23:20:13 +0200537 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000538 /*
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700539 * Caller of this function should also call
Wang Nan6c956392015-07-01 02:13:54 +0000540 * bpf_object__elf_finish() after data collection to return
541 * obj_buf to user. If not, we should duplicate the buffer to
542 * avoid user freeing them before elf finish.
543 */
544 obj->efile.obj_buf = obj_buf;
545 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000546 obj->efile.maps_shndx = -1;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -0700547 obj->efile.btf_maps_shndx = -1;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200548 obj->efile.data_shndx = -1;
549 obj->efile.rodata_shndx = -1;
550 obj->efile.bss_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000551
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700552 obj->kern_version = get_kernel_version();
Wang Nan52d33522015-07-01 02:14:04 +0000553 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000554
555 INIT_LIST_HEAD(&obj->list);
556 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000557 return obj;
558}
559
560static void bpf_object__elf_finish(struct bpf_object *obj)
561{
562 if (!obj_elf_valid(obj))
563 return;
564
565 if (obj->efile.elf) {
566 elf_end(obj->efile.elf);
567 obj->efile.elf = NULL;
568 }
Wang Nanbec7d682015-07-01 02:13:59 +0000569 obj->efile.symbols = NULL;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200570 obj->efile.data = NULL;
571 obj->efile.rodata = NULL;
572 obj->efile.bss = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000573
574 zfree(&obj->efile.reloc);
575 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000576 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000577 obj->efile.obj_buf = NULL;
578 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000579}
580
581static int bpf_object__elf_init(struct bpf_object *obj)
582{
583 int err = 0;
584 GElf_Ehdr *ep;
585
586 if (obj_elf_valid(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800587 pr_warn("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000588 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000589 }
590
Wang Nan6c956392015-07-01 02:13:54 +0000591 if (obj->efile.obj_buf_sz > 0) {
592 /*
593 * obj_buf should have been validated by
594 * bpf_object__open_buffer().
595 */
Andrii Nakryiko5e61f272019-10-04 15:40:34 -0700596 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
Wang Nan6c956392015-07-01 02:13:54 +0000597 obj->efile.obj_buf_sz);
598 } else {
599 obj->efile.fd = open(obj->path, O_RDONLY);
600 if (obj->efile.fd < 0) {
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700601 char errmsg[STRERR_BUFSIZE], *cp;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +0200602
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700603 err = -errno;
604 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +0800605 pr_warn("failed to open %s: %s\n", obj->path, cp);
Andrii Nakryikobe5c5d42019-05-29 10:36:04 -0700606 return err;
Wang Nan6c956392015-07-01 02:13:54 +0000607 }
608
609 obj->efile.elf = elf_begin(obj->efile.fd,
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700610 LIBBPF_ELF_C_READ_MMAP, NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000611 }
612
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000613 if (!obj->efile.elf) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800614 pr_warn("failed to open %s as ELF file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000615 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000616 goto errout;
617 }
618
619 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800620 pr_warn("failed to get EHDR from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000621 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000622 goto errout;
623 }
624 ep = &obj->efile.ehdr;
625
Wang Nan9b161372016-07-18 06:01:08 +0000626 /* Old LLVM set e_machine to EM_NONE */
Andrii Nakryiko76e10222019-05-29 10:36:10 -0700627 if (ep->e_type != ET_REL ||
628 (ep->e_machine && ep->e_machine != EM_BPF)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800629 pr_warn("%s is not an eBPF object file\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000630 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000631 goto errout;
632 }
633
634 return 0;
635errout:
636 bpf_object__elf_finish(obj);
637 return err;
638}
639
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700640static int bpf_object__check_endianness(struct bpf_object *obj)
Wang Nancc4228d2015-07-01 02:13:55 +0000641{
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -0300642#if __BYTE_ORDER == __LITTLE_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700643 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
644 return 0;
Arnaldo Carvalho de Melocdb2f922019-07-19 11:34:06 -0300645#elif __BYTE_ORDER == __BIG_ENDIAN
Andrii Nakryiko12ef5632019-05-29 10:36:05 -0700646 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
647 return 0;
648#else
649# error "Unrecognized __BYTE_ORDER__"
650#endif
Kefeng Wangbe180102019-10-21 13:55:32 +0800651 pr_warn("endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000652 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000653}
654
Wang Nancb1e5e92015-07-01 02:13:57 +0000655static int
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700656bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
Wang Nancb1e5e92015-07-01 02:13:57 +0000657{
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700658 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
Wang Nancb1e5e92015-07-01 02:13:57 +0000659 pr_debug("license of %s is %s\n", obj->path, obj->license);
660 return 0;
661}
662
John Fastabend54b86252019-10-18 07:41:26 -0700663static int
664bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
665{
666 __u32 kver;
667
668 if (size != sizeof(kver)) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800669 pr_warn("invalid kver section in %s\n", obj->path);
John Fastabend54b86252019-10-18 07:41:26 -0700670 return -LIBBPF_ERRNO__FORMAT;
671 }
672 memcpy(&kver, data, sizeof(kver));
673 obj->kern_version = kver;
674 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
675 return 0;
676}
677
Eric Leblond4708bbd2016-11-15 04:05:47 +0000678static int compare_bpf_map(const void *_a, const void *_b)
679{
680 const struct bpf_map *a = _a;
681 const struct bpf_map *b = _b;
682
Andrii Nakryikodb488142019-06-17 12:26:54 -0700683 if (a->sec_idx != b->sec_idx)
684 return a->sec_idx - b->sec_idx;
685 return a->sec_offset - b->sec_offset;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000686}
687
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -0800688static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
689{
690 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
691 type == BPF_MAP_TYPE_HASH_OF_MAPS)
692 return true;
693 return false;
694}
695
Daniel Borkmann1713d682019-04-09 23:20:14 +0200696static int bpf_object_search_section_size(const struct bpf_object *obj,
697 const char *name, size_t *d_size)
698{
699 const GElf_Ehdr *ep = &obj->efile.ehdr;
700 Elf *elf = obj->efile.elf;
701 Elf_Scn *scn = NULL;
702 int idx = 0;
703
704 while ((scn = elf_nextscn(elf, scn)) != NULL) {
705 const char *sec_name;
706 Elf_Data *data;
707 GElf_Shdr sh;
708
709 idx++;
710 if (gelf_getshdr(scn, &sh) != &sh) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800711 pr_warn("failed to get section(%d) header from %s\n",
712 idx, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200713 return -EIO;
714 }
715
716 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
717 if (!sec_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800718 pr_warn("failed to get section(%d) name from %s\n",
719 idx, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200720 return -EIO;
721 }
722
723 if (strcmp(name, sec_name))
724 continue;
725
726 data = elf_getdata(scn, 0);
727 if (!data) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800728 pr_warn("failed to get section(%d) data from %s(%s)\n",
729 idx, name, obj->path);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200730 return -EIO;
731 }
732
733 *d_size = data->d_size;
734 return 0;
735 }
736
737 return -ENOENT;
738}
739
740int bpf_object__section_size(const struct bpf_object *obj, const char *name,
741 __u32 *size)
742{
743 int ret = -ENOENT;
744 size_t d_size;
745
746 *size = 0;
747 if (!name) {
748 return -EINVAL;
749 } else if (!strcmp(name, ".data")) {
750 if (obj->efile.data)
751 *size = obj->efile.data->d_size;
752 } else if (!strcmp(name, ".bss")) {
753 if (obj->efile.bss)
754 *size = obj->efile.bss->d_size;
755 } else if (!strcmp(name, ".rodata")) {
756 if (obj->efile.rodata)
757 *size = obj->efile.rodata->d_size;
758 } else {
759 ret = bpf_object_search_section_size(obj, name, &d_size);
760 if (!ret)
761 *size = d_size;
762 }
763
764 return *size ? 0 : ret;
765}
766
767int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
768 __u32 *off)
769{
770 Elf_Data *symbols = obj->efile.symbols;
771 const char *sname;
772 size_t si;
773
774 if (!name || !off)
775 return -EINVAL;
776
777 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
778 GElf_Sym sym;
779
780 if (!gelf_getsym(symbols, si, &sym))
781 continue;
782 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
783 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
784 continue;
785
786 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
787 sym.st_name);
788 if (!sname) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800789 pr_warn("failed to get sym name string for var %s\n",
790 name);
Daniel Borkmann1713d682019-04-09 23:20:14 +0200791 return -EIO;
792 }
793 if (strcmp(name, sname) == 0) {
794 *off = sym.st_value;
795 return 0;
796 }
797 }
798
799 return -ENOENT;
800}
801
Andrii Nakryikobf829272019-06-17 12:26:53 -0700802static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
Daniel Borkmannd8599002019-04-09 23:20:13 +0200803{
Andrii Nakryikobf829272019-06-17 12:26:53 -0700804 struct bpf_map *new_maps;
805 size_t new_cap;
806 int i;
807
808 if (obj->nr_maps < obj->maps_cap)
809 return &obj->maps[obj->nr_maps++];
810
Ivan Khoronzhuk95064972019-06-26 13:38:37 +0300811 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700812 new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
813 if (!new_maps) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800814 pr_warn("alloc maps for object failed\n");
Andrii Nakryikobf829272019-06-17 12:26:53 -0700815 return ERR_PTR(-ENOMEM);
816 }
817
818 obj->maps_cap = new_cap;
819 obj->maps = new_maps;
820
821 /* zero out new maps */
822 memset(obj->maps + obj->nr_maps, 0,
823 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
824 /*
825 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
826 * when failure (zclose won't close negative fd)).
827 */
828 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
829 obj->maps[i].fd = -1;
830 obj->maps[i].inner_map_fd = -1;
831 }
832
833 return &obj->maps[obj->nr_maps++];
Daniel Borkmannd8599002019-04-09 23:20:13 +0200834}
835
836static int
Andrii Nakryikobf829272019-06-17 12:26:53 -0700837bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700838 int sec_idx, Elf_Data *data, void **data_buff)
Daniel Borkmannd8599002019-04-09 23:20:13 +0200839{
Daniel Borkmannd8599002019-04-09 23:20:13 +0200840 char map_name[BPF_OBJ_NAME_LEN];
Andrii Nakryikobf829272019-06-17 12:26:53 -0700841 struct bpf_map_def *def;
842 struct bpf_map *map;
843
844 map = bpf_object__add_map(obj);
845 if (IS_ERR(map))
846 return PTR_ERR(map);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200847
848 map->libbpf_type = type;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700849 map->sec_idx = sec_idx;
850 map->sec_offset = 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200851 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
852 libbpf_type_to_btf_name[type]);
853 map->name = strdup(map_name);
854 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800855 pr_warn("failed to alloc map name\n");
Daniel Borkmannd8599002019-04-09 23:20:13 +0200856 return -ENOMEM;
857 }
Andrii Nakryikodb488142019-06-17 12:26:54 -0700858 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
859 map_name, map->sec_idx, map->sec_offset);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200860
Andrii Nakryikobf829272019-06-17 12:26:53 -0700861 def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200862 def->type = BPF_MAP_TYPE_ARRAY;
863 def->key_size = sizeof(int);
864 def->value_size = data->d_size;
865 def->max_entries = 1;
Andrii Nakryiko399dc652019-05-29 10:36:11 -0700866 def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200867 if (data_buff) {
868 *data_buff = malloc(data->d_size);
869 if (!*data_buff) {
870 zfree(&map->name);
Kefeng Wangbe180102019-10-21 13:55:32 +0800871 pr_warn("failed to alloc map content buffer\n");
Daniel Borkmannd8599002019-04-09 23:20:13 +0200872 return -ENOMEM;
873 }
874 memcpy(*data_buff, data->d_buf, data->d_size);
875 }
876
Andrii Nakryikoe1d1dc42019-04-16 11:47:17 -0700877 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
Daniel Borkmannd8599002019-04-09 23:20:13 +0200878 return 0;
879}
880
Andrii Nakryikobf829272019-06-17 12:26:53 -0700881static int bpf_object__init_global_data_maps(struct bpf_object *obj)
Eric Leblond4708bbd2016-11-15 04:05:47 +0000882{
Andrii Nakryikobf829272019-06-17 12:26:53 -0700883 int err;
884
885 if (!obj->caps.global_data)
886 return 0;
887 /*
888 * Populate obj->maps with libbpf internal maps.
889 */
890 if (obj->efile.data_shndx >= 0) {
891 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700892 obj->efile.data_shndx,
Andrii Nakryikobf829272019-06-17 12:26:53 -0700893 obj->efile.data,
894 &obj->sections.data);
895 if (err)
896 return err;
897 }
898 if (obj->efile.rodata_shndx >= 0) {
899 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700900 obj->efile.rodata_shndx,
Andrii Nakryikobf829272019-06-17 12:26:53 -0700901 obj->efile.rodata,
902 &obj->sections.rodata);
903 if (err)
904 return err;
905 }
906 if (obj->efile.bss_shndx >= 0) {
907 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
Andrii Nakryikodb488142019-06-17 12:26:54 -0700908 obj->efile.bss_shndx,
Andrii Nakryikobf829272019-06-17 12:26:53 -0700909 obj->efile.bss, NULL);
910 if (err)
911 return err;
912 }
913 return 0;
914}
915
916static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
917{
Eric Leblond4708bbd2016-11-15 04:05:47 +0000918 Elf_Data *symbols = obj->efile.symbols;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700919 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
Daniel Borkmannd8599002019-04-09 23:20:13 +0200920 Elf_Data *data = NULL;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700921 Elf_Scn *scn;
922
923 if (obj->efile.maps_shndx < 0)
924 return 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000925
Eric Leblond4708bbd2016-11-15 04:05:47 +0000926 if (!symbols)
927 return -EINVAL;
928
Andrii Nakryikobf829272019-06-17 12:26:53 -0700929 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
930 if (scn)
931 data = elf_getdata(scn, NULL);
932 if (!scn || !data) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800933 pr_warn("failed to get Elf_Data from map section %d\n",
934 obj->efile.maps_shndx);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700935 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000936 }
937
Eric Leblond4708bbd2016-11-15 04:05:47 +0000938 /*
939 * Count number of maps. Each map has a name.
940 * Array of maps is not supported: only the first element is
941 * considered.
942 *
943 * TODO: Detect array of map and report error.
944 */
Andrii Nakryikobf829272019-06-17 12:26:53 -0700945 nr_syms = symbols->d_size / sizeof(GElf_Sym);
946 for (i = 0; i < nr_syms; i++) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000947 GElf_Sym sym;
948
949 if (!gelf_getsym(symbols, i, &sym))
950 continue;
951 if (sym.st_shndx != obj->efile.maps_shndx)
952 continue;
953 nr_maps++;
954 }
Craig Gallekb13c5c12017-10-05 10:41:57 -0400955 /* Assume equally sized map definitions */
Andrii Nakryikobf829272019-06-17 12:26:53 -0700956 pr_debug("maps in %s: %d maps in %zd bytes\n",
957 obj->path, nr_maps, data->d_size);
Daniel Borkmann4f8827d2019-04-24 00:45:57 +0200958
Andrii Nakryikobf829272019-06-17 12:26:53 -0700959 map_def_sz = data->d_size / nr_maps;
960 if (!data->d_size || (data->d_size % nr_maps) != 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800961 pr_warn("unable to determine map definition size "
962 "section %s, %d maps in %zd bytes\n",
963 obj->path, nr_maps, data->d_size);
Andrii Nakryikobf829272019-06-17 12:26:53 -0700964 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400965 }
966
Andrii Nakryikobf829272019-06-17 12:26:53 -0700967 /* Fill obj->maps using data in "maps" section. */
968 for (i = 0; i < nr_syms; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000969 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000970 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000971 struct bpf_map_def *def;
Andrii Nakryikobf829272019-06-17 12:26:53 -0700972 struct bpf_map *map;
Wang Nan561bbcc2015-11-27 08:47:36 +0000973
974 if (!gelf_getsym(symbols, i, &sym))
975 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000976 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000977 continue;
978
Andrii Nakryikobf829272019-06-17 12:26:53 -0700979 map = bpf_object__add_map(obj);
980 if (IS_ERR(map))
981 return PTR_ERR(map);
982
983 map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000984 sym.st_name);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -0700985 if (!map_name) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800986 pr_warn("failed to get map #%d name sym string for obj %s\n",
987 i, obj->path);
Andrii Nakryikoc51829b2019-05-29 10:36:06 -0700988 return -LIBBPF_ERRNO__FORMAT;
989 }
Daniel Borkmannd8599002019-04-09 23:20:13 +0200990
Andrii Nakryikobf829272019-06-17 12:26:53 -0700991 map->libbpf_type = LIBBPF_MAP_UNSPEC;
Andrii Nakryikodb488142019-06-17 12:26:54 -0700992 map->sec_idx = sym.st_shndx;
993 map->sec_offset = sym.st_value;
994 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
995 map_name, map->sec_idx, map->sec_offset);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400996 if (sym.st_value + map_def_sz > data->d_size) {
Kefeng Wangbe180102019-10-21 13:55:32 +0800997 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
998 obj->path, map_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000999 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +00001000 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001001
Andrii Nakryikobf829272019-06-17 12:26:53 -07001002 map->name = strdup(map_name);
1003 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001004 pr_warn("failed to alloc map name\n");
Wang Nan973170e2015-12-08 02:25:29 +00001005 return -ENOMEM;
1006 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001007 pr_debug("map %d is \"%s\"\n", i, map->name);
Eric Leblond4708bbd2016-11-15 04:05:47 +00001008 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001009 /*
1010 * If the definition of the map in the object file fits in
1011 * bpf_map_def, copy it. Any extra fields in our version
1012 * of bpf_map_def will default to zero as a result of the
1013 * calloc above.
1014 */
1015 if (map_def_sz <= sizeof(struct bpf_map_def)) {
Andrii Nakryikobf829272019-06-17 12:26:53 -07001016 memcpy(&map->def, def, map_def_sz);
Craig Gallekb13c5c12017-10-05 10:41:57 -04001017 } else {
1018 /*
1019 * Here the map structure being read is bigger than what
1020 * we expect, truncate if the excess bits are all zero.
1021 * If they are not zero, reject this map as
1022 * incompatible.
1023 */
1024 char *b;
1025 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1026 b < ((char *)def) + map_def_sz; b++) {
1027 if (*b != 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001028 pr_warn("maps section in %s: \"%s\" "
1029 "has unrecognized, non-zero "
1030 "options\n",
1031 obj->path, map_name);
John Fastabendc034a172018-10-15 11:19:55 -07001032 if (strict)
1033 return -EINVAL;
Craig Gallekb13c5c12017-10-05 10:41:57 -04001034 }
1035 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001036 memcpy(&map->def, def, sizeof(struct bpf_map_def));
Craig Gallekb13c5c12017-10-05 10:41:57 -04001037 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001038 }
Andrii Nakryikobf829272019-06-17 12:26:53 -07001039 return 0;
1040}
Eric Leblond4708bbd2016-11-15 04:05:47 +00001041
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001042static const struct btf_type *
1043skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001044{
1045 const struct btf_type *t = btf__type_by_id(btf, id);
1046
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001047 if (res_id)
1048 *res_id = id;
1049
1050 while (btf_is_mod(t) || btf_is_typedef(t)) {
1051 if (res_id)
1052 *res_id = t->type;
1053 t = btf__type_by_id(btf, t->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001054 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001055
1056 return t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001057}
1058
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001059/*
1060 * Fetch integer attribute of BTF map definition. Such attributes are
1061 * represented using a pointer to an array, in which dimensionality of array
1062 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1063 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1064 * type definition, while using only sizeof(void *) space in ELF data section.
1065 */
1066static bool get_map_field_int(const char *map_name, const struct btf *btf,
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001067 const struct btf_type *def,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001068 const struct btf_member *m, __u32 *res) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001069 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001070 const char *name = btf__name_by_offset(btf, m->name_off);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001071 const struct btf_array *arr_info;
1072 const struct btf_type *arr_t;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001073
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001074 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001075 pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
1076 map_name, name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001077 return false;
1078 }
Eric Leblond4708bbd2016-11-15 04:05:47 +00001079
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001080 arr_t = btf__type_by_id(btf, t->type);
1081 if (!arr_t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001082 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1083 map_name, name, t->type);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001084 return false;
1085 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001086 if (!btf_is_array(arr_t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001087 pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
1088 map_name, name, btf_kind(arr_t));
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001089 return false;
1090 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001091 arr_info = btf_array(arr_t);
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001092 *res = arr_info->nelems;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001093 return true;
1094}
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001095
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001096static int build_map_pin_path(struct bpf_map *map, const char *path)
1097{
1098 char buf[PATH_MAX];
1099 int err, len;
1100
1101 if (!path)
1102 path = "/sys/fs/bpf";
1103
1104 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1105 if (len < 0)
1106 return -EINVAL;
1107 else if (len >= PATH_MAX)
1108 return -ENAMETOOLONG;
1109
1110 err = bpf_map__set_pin_path(map, buf);
1111 if (err)
1112 return err;
1113
1114 return 0;
1115}
1116
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001117static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1118 const struct btf_type *sec,
1119 int var_idx, int sec_idx,
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001120 const Elf_Data *data, bool strict,
1121 const char *pin_root_path)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001122{
1123 const struct btf_type *var, *def, *t;
1124 const struct btf_var_secinfo *vi;
1125 const struct btf_var *var_extra;
1126 const struct btf_member *m;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001127 const char *map_name;
1128 struct bpf_map *map;
1129 int vlen, i;
1130
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001131 vi = btf_var_secinfos(sec) + var_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001132 var = btf__type_by_id(obj->btf, vi->type);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001133 var_extra = btf_var(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001134 map_name = btf__name_by_offset(obj->btf, var->name_off);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001135 vlen = btf_vlen(var);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001136
1137 if (map_name == NULL || map_name[0] == '\0') {
Kefeng Wangbe180102019-10-21 13:55:32 +08001138 pr_warn("map #%d: empty name.\n", var_idx);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001139 return -EINVAL;
1140 }
1141 if ((__u64)vi->offset + vi->size > data->d_size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001142 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001143 return -EINVAL;
1144 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001145 if (!btf_is_var(var)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001146 pr_warn("map '%s': unexpected var kind %u.\n",
1147 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001148 return -EINVAL;
1149 }
1150 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1151 var_extra->linkage != BTF_VAR_STATIC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001152 pr_warn("map '%s': unsupported var linkage %u.\n",
1153 map_name, var_extra->linkage);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001154 return -EOPNOTSUPP;
1155 }
1156
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07001157 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001158 if (!btf_is_struct(def)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001159 pr_warn("map '%s': unexpected def kind %u.\n",
1160 map_name, btf_kind(var));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001161 return -EINVAL;
1162 }
1163 if (def->size > vi->size) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001164 pr_warn("map '%s': invalid def size.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001165 return -EINVAL;
1166 }
1167
1168 map = bpf_object__add_map(obj);
1169 if (IS_ERR(map))
1170 return PTR_ERR(map);
1171 map->name = strdup(map_name);
1172 if (!map->name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001173 pr_warn("map '%s': failed to alloc map name.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001174 return -ENOMEM;
1175 }
1176 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1177 map->def.type = BPF_MAP_TYPE_UNSPEC;
1178 map->sec_idx = sec_idx;
1179 map->sec_offset = vi->offset;
1180 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1181 map_name, map->sec_idx, map->sec_offset);
1182
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001183 vlen = btf_vlen(def);
1184 m = btf_members(def);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001185 for (i = 0; i < vlen; i++, m++) {
1186 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1187
1188 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001189 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001190 return -EINVAL;
1191 }
1192 if (strcmp(name, "type") == 0) {
1193 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001194 &map->def.type))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001195 return -EINVAL;
1196 pr_debug("map '%s': found type = %u.\n",
1197 map_name, map->def.type);
1198 } else if (strcmp(name, "max_entries") == 0) {
1199 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001200 &map->def.max_entries))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001201 return -EINVAL;
1202 pr_debug("map '%s': found max_entries = %u.\n",
1203 map_name, map->def.max_entries);
1204 } else if (strcmp(name, "map_flags") == 0) {
1205 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001206 &map->def.map_flags))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001207 return -EINVAL;
1208 pr_debug("map '%s': found map_flags = %u.\n",
1209 map_name, map->def.map_flags);
1210 } else if (strcmp(name, "key_size") == 0) {
1211 __u32 sz;
1212
1213 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001214 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001215 return -EINVAL;
1216 pr_debug("map '%s': found key_size = %u.\n",
1217 map_name, sz);
1218 if (map->def.key_size && map->def.key_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001219 pr_warn("map '%s': conflicting key size %u != %u.\n",
1220 map_name, map->def.key_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001221 return -EINVAL;
1222 }
1223 map->def.key_size = sz;
1224 } else if (strcmp(name, "key") == 0) {
1225 __s64 sz;
1226
1227 t = btf__type_by_id(obj->btf, m->type);
1228 if (!t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001229 pr_warn("map '%s': key type [%d] not found.\n",
1230 map_name, m->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001231 return -EINVAL;
1232 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001233 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001234 pr_warn("map '%s': key spec is not PTR: %u.\n",
1235 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001236 return -EINVAL;
1237 }
1238 sz = btf__resolve_size(obj->btf, t->type);
1239 if (sz < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001240 pr_warn("map '%s': can't determine key size for type [%u]: %lld.\n",
1241 map_name, t->type, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001242 return sz;
1243 }
1244 pr_debug("map '%s': found key [%u], sz = %lld.\n",
1245 map_name, t->type, sz);
1246 if (map->def.key_size && map->def.key_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001247 pr_warn("map '%s': conflicting key size %u != %lld.\n",
1248 map_name, map->def.key_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001249 return -EINVAL;
1250 }
1251 map->def.key_size = sz;
1252 map->btf_key_type_id = t->type;
1253 } else if (strcmp(name, "value_size") == 0) {
1254 __u32 sz;
1255
1256 if (!get_map_field_int(map_name, obj->btf, def, m,
Andrii Nakryikoef99b022019-07-05 08:50:09 -07001257 &sz))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001258 return -EINVAL;
1259 pr_debug("map '%s': found value_size = %u.\n",
1260 map_name, sz);
1261 if (map->def.value_size && map->def.value_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001262 pr_warn("map '%s': conflicting value size %u != %u.\n",
1263 map_name, map->def.value_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001264 return -EINVAL;
1265 }
1266 map->def.value_size = sz;
1267 } else if (strcmp(name, "value") == 0) {
1268 __s64 sz;
1269
1270 t = btf__type_by_id(obj->btf, m->type);
1271 if (!t) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001272 pr_warn("map '%s': value type [%d] not found.\n",
1273 map_name, m->type);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001274 return -EINVAL;
1275 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001276 if (!btf_is_ptr(t)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001277 pr_warn("map '%s': value spec is not PTR: %u.\n",
1278 map_name, btf_kind(t));
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001279 return -EINVAL;
1280 }
1281 sz = btf__resolve_size(obj->btf, t->type);
1282 if (sz < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001283 pr_warn("map '%s': can't determine value size for type [%u]: %lld.\n",
1284 map_name, t->type, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001285 return sz;
1286 }
1287 pr_debug("map '%s': found value [%u], sz = %lld.\n",
1288 map_name, t->type, sz);
1289 if (map->def.value_size && map->def.value_size != sz) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001290 pr_warn("map '%s': conflicting value size %u != %lld.\n",
1291 map_name, map->def.value_size, sz);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001292 return -EINVAL;
1293 }
1294 map->def.value_size = sz;
1295 map->btf_value_type_id = t->type;
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001296 } else if (strcmp(name, "pinning") == 0) {
1297 __u32 val;
1298 int err;
1299
1300 if (!get_map_field_int(map_name, obj->btf, def, m,
1301 &val))
1302 return -EINVAL;
1303 pr_debug("map '%s': found pinning = %u.\n",
1304 map_name, val);
1305
1306 if (val != LIBBPF_PIN_NONE &&
1307 val != LIBBPF_PIN_BY_NAME) {
1308 pr_warn("map '%s': invalid pinning value %u.\n",
1309 map_name, val);
1310 return -EINVAL;
1311 }
1312 if (val == LIBBPF_PIN_BY_NAME) {
1313 err = build_map_pin_path(map, pin_root_path);
1314 if (err) {
1315 pr_warn("map '%s': couldn't build pin path.\n",
1316 map_name);
1317 return err;
1318 }
1319 }
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001320 } else {
1321 if (strict) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001322 pr_warn("map '%s': unknown field '%s'.\n",
1323 map_name, name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001324 return -ENOTSUP;
1325 }
1326 pr_debug("map '%s': ignoring unknown field '%s'.\n",
1327 map_name, name);
1328 }
1329 }
1330
1331 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001332 pr_warn("map '%s': map type isn't specified.\n", map_name);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001333 return -EINVAL;
1334 }
1335
1336 return 0;
1337}
1338
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001339static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
1340 const char *pin_root_path)
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001341{
1342 const struct btf_type *sec = NULL;
1343 int nr_types, i, vlen, err;
1344 const struct btf_type *t;
1345 const char *name;
1346 Elf_Data *data;
1347 Elf_Scn *scn;
1348
1349 if (obj->efile.btf_maps_shndx < 0)
1350 return 0;
1351
1352 scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
1353 if (scn)
1354 data = elf_getdata(scn, NULL);
1355 if (!scn || !data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001356 pr_warn("failed to get Elf_Data from map section %d (%s)\n",
1357 obj->efile.maps_shndx, MAPS_ELF_SEC);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001358 return -EINVAL;
1359 }
1360
1361 nr_types = btf__get_nr_types(obj->btf);
1362 for (i = 1; i <= nr_types; i++) {
1363 t = btf__type_by_id(obj->btf, i);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001364 if (!btf_is_datasec(t))
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001365 continue;
1366 name = btf__name_by_offset(obj->btf, t->name_off);
1367 if (strcmp(name, MAPS_ELF_SEC) == 0) {
1368 sec = t;
1369 break;
1370 }
1371 }
1372
1373 if (!sec) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001374 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001375 return -ENOENT;
1376 }
1377
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001378 vlen = btf_vlen(sec);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001379 for (i = 0; i < vlen; i++) {
1380 err = bpf_object__init_user_btf_map(obj, sec, i,
1381 obj->efile.btf_maps_shndx,
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001382 data, strict, pin_root_path);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001383 if (err)
1384 return err;
1385 }
1386
1387 return 0;
1388}
1389
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001390static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
1391 const char *pin_root_path)
Andrii Nakryikobf829272019-06-17 12:26:53 -07001392{
Andrii Nakryiko291ee022019-10-15 11:28:46 -07001393 bool strict = !relaxed_maps;
Andrii Nakryikobf829272019-06-17 12:26:53 -07001394 int err;
Eric Leblond4708bbd2016-11-15 04:05:47 +00001395
Andrii Nakryikobf829272019-06-17 12:26:53 -07001396 err = bpf_object__init_user_maps(obj, strict);
1397 if (err)
1398 return err;
1399
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001400 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001401 if (err)
1402 return err;
1403
Andrii Nakryikobf829272019-06-17 12:26:53 -07001404 err = bpf_object__init_global_data_maps(obj);
1405 if (err)
1406 return err;
1407
1408 if (obj->nr_maps) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001409 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1410 compare_bpf_map);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001411 }
1412 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +00001413}
1414
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01001415static bool section_have_execinstr(struct bpf_object *obj, int idx)
1416{
1417 Elf_Scn *scn;
1418 GElf_Shdr sh;
1419
1420 scn = elf_getscn(obj->efile.elf, idx);
1421 if (!scn)
1422 return false;
1423
1424 if (gelf_getshdr(scn, &sh) != &sh)
1425 return false;
1426
1427 if (sh.sh_flags & SHF_EXECINSTR)
1428 return true;
1429
1430 return false;
1431}
1432
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001433static void bpf_object__sanitize_btf(struct bpf_object *obj)
1434{
1435 bool has_datasec = obj->caps.btf_datasec;
1436 bool has_func = obj->caps.btf_func;
1437 struct btf *btf = obj->btf;
1438 struct btf_type *t;
1439 int i, j, vlen;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001440
1441 if (!obj->btf || (has_func && has_datasec))
1442 return;
1443
1444 for (i = 1; i <= btf__get_nr_types(btf); i++) {
1445 t = (struct btf_type *)btf__type_by_id(btf, i);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001446
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001447 if (!has_datasec && btf_is_var(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001448 /* replace VAR with INT */
1449 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
Andrii Nakryiko1d4126c2019-07-19 12:46:03 -07001450 /*
1451 * using size = 1 is the safest choice, 4 will be too
1452 * big and cause kernel BTF validation failure if
1453 * original variable took less than 4 bytes
1454 */
1455 t->size = 1;
Jakub Kicinski708852d2019-08-13 16:24:57 -07001456 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001457 } else if (!has_datasec && btf_is_datasec(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001458 /* replace DATASEC with STRUCT */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001459 const struct btf_var_secinfo *v = btf_var_secinfos(t);
1460 struct btf_member *m = btf_members(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001461 struct btf_type *vt;
1462 char *name;
1463
1464 name = (char *)btf__name_by_offset(btf, t->name_off);
1465 while (*name) {
1466 if (*name == '.')
1467 *name = '_';
1468 name++;
1469 }
1470
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001471 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001472 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1473 for (j = 0; j < vlen; j++, v++, m++) {
1474 /* order of field assignments is important */
1475 m->offset = v->offset * 8;
1476 m->type = v->type;
1477 /* preserve variable name as member name */
1478 vt = (void *)btf__type_by_id(btf, v->type);
1479 m->name_off = vt->name_off;
1480 }
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001481 } else if (!has_func && btf_is_func_proto(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001482 /* replace FUNC_PROTO with ENUM */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001483 vlen = btf_vlen(t);
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001484 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1485 t->size = sizeof(__u32); /* kernel enforced */
Andrii Nakryikob03bc682019-08-07 14:39:49 -07001486 } else if (!has_func && btf_is_func(t)) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07001487 /* replace FUNC with TYPEDEF */
1488 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1489 }
1490 }
1491}
1492
1493static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1494{
1495 if (!obj->btf_ext)
1496 return;
1497
1498 if (!obj->caps.btf_func) {
1499 btf_ext__free(obj->btf_ext);
1500 obj->btf_ext = NULL;
1501 }
1502}
1503
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001504static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
1505{
1506 return obj->efile.btf_maps_shndx >= 0;
1507}
1508
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001509static int bpf_object__init_btf(struct bpf_object *obj,
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001510 Elf_Data *btf_data,
1511 Elf_Data *btf_ext_data)
1512{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001513 bool btf_required = bpf_object__is_btf_mandatory(obj);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001514 int err = 0;
1515
1516 if (btf_data) {
1517 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1518 if (IS_ERR(obj->btf)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001519 pr_warn("Error loading ELF section %s: %d.\n",
1520 BTF_ELF_SEC, err);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001521 goto out;
1522 }
1523 err = btf__finalize_data(obj, obj->btf);
1524 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001525 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001526 goto out;
1527 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001528 }
1529 if (btf_ext_data) {
1530 if (!obj->btf) {
1531 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1532 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1533 goto out;
1534 }
1535 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1536 btf_ext_data->d_size);
1537 if (IS_ERR(obj->btf_ext)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001538 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
1539 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001540 obj->btf_ext = NULL;
1541 goto out;
1542 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001543 }
1544out:
1545 if (err || IS_ERR(obj->btf)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001546 if (btf_required)
1547 err = err ? : PTR_ERR(obj->btf);
1548 else
1549 err = 0;
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001550 if (!IS_ERR_OR_NULL(obj->btf))
1551 btf__free(obj->btf);
1552 obj->btf = NULL;
1553 }
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001554 if (btf_required && !obj->btf) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001555 pr_warn("BTF is required, but is missing or corrupted.\n");
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001556 return err == 0 ? -ENOENT : err;
1557 }
Andrii Nakryiko9c6660d2019-06-17 12:26:51 -07001558 return 0;
1559}
1560
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001561static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1562{
1563 int err = 0;
1564
1565 if (!obj->btf)
1566 return 0;
1567
1568 bpf_object__sanitize_btf(obj);
1569 bpf_object__sanitize_btf_ext(obj);
1570
1571 err = btf__load(obj->btf);
1572 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001573 pr_warn("Error loading %s into kernel: %d.\n",
1574 BTF_ELF_SEC, err);
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001575 btf__free(obj->btf);
1576 obj->btf = NULL;
Andrii Nakryiko04efe592019-07-19 12:32:42 -07001577 /* btf_ext can't exist without btf, so free it as well */
1578 if (obj->btf_ext) {
1579 btf_ext__free(obj->btf_ext);
1580 obj->btf_ext = NULL;
1581 }
1582
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001583 if (bpf_object__is_btf_mandatory(obj))
1584 return err;
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001585 }
1586 return 0;
1587}
1588
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001589static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
1590 const char *pin_root_path)
Wang Nan29603662015-07-01 02:13:56 +00001591{
1592 Elf *elf = obj->efile.elf;
1593 GElf_Ehdr *ep = &obj->efile.ehdr;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001594 Elf_Data *btf_ext_data = NULL;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001595 Elf_Data *btf_data = NULL;
Wang Nan29603662015-07-01 02:13:56 +00001596 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +00001597 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +00001598
1599 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1600 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001601 pr_warn("failed to get e_shstrndx from %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001602 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001603 }
1604
1605 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1606 char *name;
1607 GElf_Shdr sh;
1608 Elf_Data *data;
1609
1610 idx++;
1611 if (gelf_getshdr(scn, &sh) != &sh) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001612 pr_warn("failed to get section(%d) header from %s\n",
1613 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001614 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001615 }
1616
1617 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1618 if (!name) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001619 pr_warn("failed to get section(%d) name from %s\n",
1620 idx, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001621 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001622 }
1623
1624 data = elf_getdata(scn, 0);
1625 if (!data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001626 pr_warn("failed to get section(%d) data from %s(%s)\n",
1627 idx, name, obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001628 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +00001629 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001630 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1631 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +00001632 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1633 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +00001634
Daniel Borkmann1713d682019-04-09 23:20:14 +02001635 if (strcmp(name, "license") == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001636 err = bpf_object__init_license(obj,
1637 data->d_buf,
1638 data->d_size);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001639 if (err)
1640 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001641 } else if (strcmp(name, "version") == 0) {
John Fastabend54b86252019-10-18 07:41:26 -07001642 err = bpf_object__init_kversion(obj,
1643 data->d_buf,
1644 data->d_size);
1645 if (err)
1646 return err;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001647 } else if (strcmp(name, "maps") == 0) {
Wang Nan666810e2016-01-25 09:55:49 +00001648 obj->efile.maps_shndx = idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001649 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
1650 obj->efile.btf_maps_shndx = idx;
Daniel Borkmann1713d682019-04-09 23:20:14 +02001651 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
1652 btf_data = data;
Yonghong Song2993e052018-11-19 15:29:16 -08001653 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08001654 btf_ext_data = data;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001655 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +00001656 if (obj->efile.symbols) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001657 pr_warn("bpf: multiple SYMTAB in %s\n",
1658 obj->path);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001659 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00001660 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001661 obj->efile.symbols = data;
1662 obj->efile.strtabidx = sh.sh_link;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001663 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1664 if (sh.sh_flags & SHF_EXECINSTR) {
1665 if (strcmp(name, ".text") == 0)
1666 obj->efile.text_shndx = idx;
1667 err = bpf_object__add_program(obj, data->d_buf,
1668 data->d_size, name, idx);
1669 if (err) {
1670 char errmsg[STRERR_BUFSIZE];
1671 char *cp = libbpf_strerror_r(-err, errmsg,
1672 sizeof(errmsg));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001673
Kefeng Wangbe180102019-10-21 13:55:32 +08001674 pr_warn("failed to alloc program %s (%s): %s",
1675 name, obj->path, cp);
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001676 return err;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001677 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001678 } else if (strcmp(name, ".data") == 0) {
1679 obj->efile.data = data;
1680 obj->efile.data_shndx = idx;
1681 } else if (strcmp(name, ".rodata") == 0) {
1682 obj->efile.rodata = data;
1683 obj->efile.rodata_shndx = idx;
1684 } else {
1685 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nana5b8bd42015-07-01 02:14:00 +00001686 }
Wang Nanb62f06e2015-07-01 02:14:01 +00001687 } else if (sh.sh_type == SHT_REL) {
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001688 int nr_reloc = obj->efile.nr_reloc;
Wang Nanb62f06e2015-07-01 02:14:01 +00001689 void *reloc = obj->efile.reloc;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +01001690 int sec = sh.sh_info; /* points to other section */
1691
1692 /* Only do relo for section with exec instructions */
1693 if (!section_have_execinstr(obj, sec)) {
1694 pr_debug("skip relo %s(%d) for section(%d)\n",
1695 name, idx, sec);
1696 continue;
1697 }
Wang Nanb62f06e2015-07-01 02:14:01 +00001698
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001699 reloc = reallocarray(reloc, nr_reloc + 1,
Jakub Kicinski531b0142018-07-10 14:43:05 -07001700 sizeof(*obj->efile.reloc));
Wang Nanb62f06e2015-07-01 02:14:01 +00001701 if (!reloc) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001702 pr_warn("realloc failed\n");
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001703 return -ENOMEM;
Wang Nanb62f06e2015-07-01 02:14:01 +00001704 }
Andrii Nakryiko01b29d12019-06-17 12:26:52 -07001705
1706 obj->efile.reloc = reloc;
1707 obj->efile.nr_reloc++;
1708
1709 obj->efile.reloc[nr_reloc].shdr = sh;
1710 obj->efile.reloc[nr_reloc].data = data;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001711 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1712 obj->efile.bss = data;
1713 obj->efile.bss_shndx = idx;
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001714 } else {
1715 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +00001716 }
Wang Nan29603662015-07-01 02:13:56 +00001717 }
Wang Nan561bbcc2015-11-27 08:47:36 +00001718
Andrii Nakryikod3a3aa02019-10-28 16:37:27 -07001719 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001720 pr_warn("Corrupted ELF file: index of strtab invalid\n");
Andrii Nakryikof1021542019-05-29 10:36:07 -07001721 return -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +00001722 }
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001723 err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001724 if (!err)
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01001725 err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
Andrii Nakryikobf829272019-06-17 12:26:53 -07001726 if (!err)
Andrii Nakryiko063183b2019-06-17 12:26:55 -07001727 err = bpf_object__sanitize_and_load_btf(obj);
1728 if (!err)
Andrii Nakryikobf829272019-06-17 12:26:53 -07001729 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +00001730 return err;
1731}
1732
Wang Nan34090912015-07-01 02:14:02 +00001733static struct bpf_program *
1734bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1735{
1736 struct bpf_program *prog;
1737 size_t i;
1738
1739 for (i = 0; i < obj->nr_programs; i++) {
1740 prog = &obj->programs[i];
1741 if (prog->idx == idx)
1742 return prog;
1743 }
1744 return NULL;
1745}
1746
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07001747struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07001748bpf_object__find_program_by_title(const struct bpf_object *obj,
1749 const char *title)
Jakub Kicinski6d4b1982018-07-26 14:32:19 -07001750{
1751 struct bpf_program *pos;
1752
1753 bpf_object__for_each_program(pos, obj) {
1754 if (pos->section_name && !strcmp(pos->section_name, title))
1755 return pos;
1756 }
1757 return NULL;
1758}
1759
Daniel Borkmannd8599002019-04-09 23:20:13 +02001760static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1761 int shndx)
1762{
1763 return shndx == obj->efile.data_shndx ||
1764 shndx == obj->efile.bss_shndx ||
1765 shndx == obj->efile.rodata_shndx;
1766}
1767
1768static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1769 int shndx)
1770{
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001771 return shndx == obj->efile.maps_shndx ||
1772 shndx == obj->efile.btf_maps_shndx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001773}
1774
1775static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1776 int shndx)
1777{
1778 return shndx == obj->efile.text_shndx ||
1779 bpf_object__shndx_is_maps(obj, shndx) ||
1780 bpf_object__shndx_is_data(obj, shndx);
1781}
1782
1783static enum libbpf_map_type
1784bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1785{
1786 if (shndx == obj->efile.data_shndx)
1787 return LIBBPF_MAP_DATA;
1788 else if (shndx == obj->efile.bss_shndx)
1789 return LIBBPF_MAP_BSS;
1790 else if (shndx == obj->efile.rodata_shndx)
1791 return LIBBPF_MAP_RODATA;
1792 else
1793 return LIBBPF_MAP_UNSPEC;
1794}
1795
Wang Nan34090912015-07-01 02:14:02 +00001796static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001797bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1798 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +00001799{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001800 Elf_Data *symbols = obj->efile.symbols;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001801 struct bpf_map *maps = obj->maps;
1802 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +00001803 int i, nrels;
1804
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001805 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
Wang Nan34090912015-07-01 02:14:02 +00001806 nrels = shdr->sh_size / shdr->sh_entsize;
1807
1808 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1809 if (!prog->reloc_desc) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001810 pr_warn("failed to alloc memory in relocation\n");
Wang Nan34090912015-07-01 02:14:02 +00001811 return -ENOMEM;
1812 }
1813 prog->nr_reloc = nrels;
1814
1815 for (i = 0; i < nrels; i++) {
Wang Nan34090912015-07-01 02:14:02 +00001816 struct bpf_insn *insns = prog->insns;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001817 enum libbpf_map_type type;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001818 unsigned int insn_idx;
1819 unsigned int shdr_idx;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001820 const char *name;
Wang Nan34090912015-07-01 02:14:02 +00001821 size_t map_idx;
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001822 GElf_Sym sym;
1823 GElf_Rel rel;
Wang Nan34090912015-07-01 02:14:02 +00001824
1825 if (!gelf_getrel(data, i, &rel)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001826 pr_warn("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001827 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001828 }
1829
Andrii Nakryiko399dc652019-05-29 10:36:11 -07001830 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001831 pr_warn("relocation: symbol %"PRIx64" not found\n",
1832 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +00001833 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +00001834 }
1835
Daniel Borkmannd8599002019-04-09 23:20:13 +02001836 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1837 sym.st_name) ? : "<?>";
1838
1839 pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1840 (long long) (rel.r_info >> 32),
1841 (long long) sym.st_value, sym.st_name, name);
1842
1843 shdr_idx = sym.st_shndx;
Andrii Nakryikof2a3e4e2019-07-23 14:11:33 -07001844 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1845 pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
1846 insn_idx, shdr_idx);
1847
1848 if (shdr_idx >= SHN_LORESERVE) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001849 pr_warn("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
1850 name, shdr_idx, insn_idx,
1851 insns[insn_idx].code);
Andrii Nakryikof2a3e4e2019-07-23 14:11:33 -07001852 return -LIBBPF_ERRNO__RELOC;
1853 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001854 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001855 pr_warn("Program '%s' contains unrecognized relo data pointing to section %u\n",
1856 prog->section_name, shdr_idx);
Wang Nan666810e2016-01-25 09:55:49 +00001857 return -LIBBPF_ERRNO__RELOC;
1858 }
1859
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001860 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1861 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001862 pr_warn("incorrect bpf_call opcode\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001863 return -LIBBPF_ERRNO__RELOC;
1864 }
1865 prog->reloc_desc[i].type = RELO_CALL;
1866 prog->reloc_desc[i].insn_idx = insn_idx;
1867 prog->reloc_desc[i].text_off = sym.st_value;
Jakub Kicinski9a94f272018-06-28 14:41:38 -07001868 obj->has_pseudo_calls = true;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001869 continue;
1870 }
1871
Wang Nan34090912015-07-01 02:14:02 +00001872 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001873 pr_warn("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1874 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001875 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001876 }
1877
Daniel Borkmannd8599002019-04-09 23:20:13 +02001878 if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1879 bpf_object__shndx_is_data(obj, shdr_idx)) {
1880 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001881 if (type != LIBBPF_MAP_UNSPEC) {
1882 if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001883 pr_warn("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1884 name, insn_idx, insns[insn_idx].code);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001885 return -LIBBPF_ERRNO__RELOC;
1886 }
1887 if (!obj->caps.global_data) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001888 pr_warn("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1889 name, insn_idx);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02001890 return -LIBBPF_ERRNO__RELOC;
1891 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02001892 }
1893
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001894 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02001895 if (maps[map_idx].libbpf_type != type)
1896 continue;
1897 if (type != LIBBPF_MAP_UNSPEC ||
Andrii Nakryikodb488142019-06-17 12:26:54 -07001898 (maps[map_idx].sec_idx == sym.st_shndx &&
1899 maps[map_idx].sec_offset == sym.st_value)) {
1900 pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
1901 map_idx, maps[map_idx].name,
1902 maps[map_idx].sec_idx,
1903 maps[map_idx].sec_offset,
1904 insn_idx);
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001905 break;
1906 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001907 }
Joe Stringer94e5ade2017-01-22 17:11:22 -08001908
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001909 if (map_idx >= nr_maps) {
Kefeng Wangbe180102019-10-21 13:55:32 +08001910 pr_warn("bpf relocation: map_idx %d larger than %d\n",
1911 (int)map_idx, (int)nr_maps - 1);
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001912 return -LIBBPF_ERRNO__RELOC;
1913 }
Wang Nan34090912015-07-01 02:14:02 +00001914
Daniel Borkmannd8599002019-04-09 23:20:13 +02001915 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1916 RELO_DATA : RELO_LD64;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02001917 prog->reloc_desc[i].insn_idx = insn_idx;
1918 prog->reloc_desc[i].map_idx = map_idx;
1919 }
Wang Nan34090912015-07-01 02:14:02 +00001920 }
1921 return 0;
1922}
1923
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001924static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001925{
1926 struct bpf_map_def *def = &map->def;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001927 __u32 key_type_id = 0, value_type_id = 0;
Yonghong Song96408c42019-02-04 11:00:58 -08001928 int ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001929
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001930 /* if it's BTF-defined map, we don't need to search for type IDs */
1931 if (map->sec_idx == obj->efile.btf_maps_shndx)
1932 return 0;
1933
Daniel Borkmannd8599002019-04-09 23:20:13 +02001934 if (!bpf_map__is_internal(map)) {
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001935 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
Daniel Borkmannd8599002019-04-09 23:20:13 +02001936 def->value_size, &key_type_id,
1937 &value_type_id);
1938 } else {
1939 /*
1940 * LLVM annotates global data differently in BTF, that is,
1941 * only as '.data', '.bss' or '.rodata'.
1942 */
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07001943 ret = btf__find_by_name(obj->btf,
Daniel Borkmannd8599002019-04-09 23:20:13 +02001944 libbpf_type_to_btf_name[map->libbpf_type]);
1945 }
1946 if (ret < 0)
Yonghong Song96408c42019-02-04 11:00:58 -08001947 return ret;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001948
Yonghong Song96408c42019-02-04 11:00:58 -08001949 map->btf_key_type_id = key_type_id;
Daniel Borkmannd8599002019-04-09 23:20:13 +02001950 map->btf_value_type_id = bpf_map__is_internal(map) ?
1951 ret : value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001952 return 0;
1953}
1954
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001955int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1956{
1957 struct bpf_map_info info = {};
1958 __u32 len = sizeof(info);
1959 int new_fd, err;
1960 char *new_name;
1961
1962 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1963 if (err)
1964 return err;
1965
1966 new_name = strdup(info.name);
1967 if (!new_name)
1968 return -errno;
1969
1970 new_fd = open("/", O_RDONLY | O_CLOEXEC);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01001971 if (new_fd < 0) {
1972 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001973 goto err_free_new_name;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01001974 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001975
1976 new_fd = dup3(fd, new_fd, O_CLOEXEC);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01001977 if (new_fd < 0) {
1978 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001979 goto err_close_new_fd;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01001980 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001981
1982 err = zclose(map->fd);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01001983 if (err) {
1984 err = -errno;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001985 goto err_close_new_fd;
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01001986 }
Jakub Kicinski26736eb2018-07-10 14:43:06 -07001987 free(map->name);
1988
1989 map->fd = new_fd;
1990 map->name = new_name;
1991 map->def.type = info.type;
1992 map->def.key_size = info.key_size;
1993 map->def.value_size = info.value_size;
1994 map->def.max_entries = info.max_entries;
1995 map->def.map_flags = info.map_flags;
1996 map->btf_key_type_id = info.btf_key_type_id;
1997 map->btf_value_type_id = info.btf_value_type_id;
1998
1999 return 0;
2000
2001err_close_new_fd:
2002 close(new_fd);
2003err_free_new_name:
2004 free(new_name);
Toke Høiland-Jørgensend1b45742019-11-02 12:09:37 +01002005 return err;
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002006}
2007
Andrey Ignatov1a11a4c2019-02-14 15:01:42 -08002008int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
2009{
2010 if (!map || !max_entries)
2011 return -EINVAL;
2012
2013 /* If map already created, its attributes can't be changed. */
2014 if (map->fd >= 0)
2015 return -EBUSY;
2016
2017 map->def.max_entries = max_entries;
2018
2019 return 0;
2020}
2021
Wang Nan52d33522015-07-01 02:14:04 +00002022static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002023bpf_object__probe_name(struct bpf_object *obj)
2024{
2025 struct bpf_load_program_attr attr;
2026 char *cp, errmsg[STRERR_BUFSIZE];
2027 struct bpf_insn insns[] = {
2028 BPF_MOV64_IMM(BPF_REG_0, 0),
2029 BPF_EXIT_INSN(),
2030 };
2031 int ret;
2032
2033 /* make sure basic loading works */
2034
2035 memset(&attr, 0, sizeof(attr));
2036 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2037 attr.insns = insns;
2038 attr.insns_cnt = ARRAY_SIZE(insns);
2039 attr.license = "GPL";
2040
2041 ret = bpf_load_program_xattr(&attr, NULL, 0);
2042 if (ret < 0) {
2043 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002044 pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
2045 __func__, cp, errno);
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002046 return -errno;
2047 }
2048 close(ret);
2049
2050 /* now try the same program, but with the name */
2051
2052 attr.name = "test";
2053 ret = bpf_load_program_xattr(&attr, NULL, 0);
2054 if (ret >= 0) {
2055 obj->caps.name = 1;
2056 close(ret);
2057 }
2058
2059 return 0;
2060}
2061
2062static int
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002063bpf_object__probe_global_data(struct bpf_object *obj)
2064{
2065 struct bpf_load_program_attr prg_attr;
2066 struct bpf_create_map_attr map_attr;
2067 char *cp, errmsg[STRERR_BUFSIZE];
2068 struct bpf_insn insns[] = {
2069 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
2070 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
2071 BPF_MOV64_IMM(BPF_REG_0, 0),
2072 BPF_EXIT_INSN(),
2073 };
2074 int ret, map;
2075
2076 memset(&map_attr, 0, sizeof(map_attr));
2077 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
2078 map_attr.key_size = sizeof(int);
2079 map_attr.value_size = 32;
2080 map_attr.max_entries = 1;
2081
2082 map = bpf_create_map_xattr(&map_attr);
2083 if (map < 0) {
2084 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002085 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
2086 __func__, cp, errno);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002087 return -errno;
2088 }
2089
2090 insns[0].imm = map;
2091
2092 memset(&prg_attr, 0, sizeof(prg_attr));
2093 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2094 prg_attr.insns = insns;
2095 prg_attr.insns_cnt = ARRAY_SIZE(insns);
2096 prg_attr.license = "GPL";
2097
2098 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
2099 if (ret >= 0) {
2100 obj->caps.global_data = 1;
2101 close(ret);
2102 }
2103
2104 close(map);
2105 return 0;
2106}
2107
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002108static int bpf_object__probe_btf_func(struct bpf_object *obj)
2109{
2110 const char strs[] = "\0int\0x\0a";
2111 /* void x(int a) {} */
2112 __u32 types[] = {
2113 /* int */
2114 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2115 /* FUNC_PROTO */ /* [2] */
2116 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
2117 BTF_PARAM_ENC(7, 1),
2118 /* FUNC x */ /* [3] */
2119 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
2120 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02002121 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002122
Michal Rosteckicfd49212019-05-29 20:31:09 +02002123 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2124 strs, sizeof(strs));
2125 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002126 obj->caps.btf_func = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02002127 close(btf_fd);
2128 return 1;
2129 }
2130
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002131 return 0;
2132}
2133
2134static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
2135{
2136 const char strs[] = "\0x\0.data";
2137 /* static int a; */
2138 __u32 types[] = {
2139 /* int */
2140 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2141 /* VAR x */ /* [2] */
2142 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
2143 BTF_VAR_STATIC,
2144 /* DATASEC val */ /* [3] */
2145 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
2146 BTF_VAR_SECINFO_ENC(2, 0, 4),
2147 };
Michal Rosteckicfd49212019-05-29 20:31:09 +02002148 int btf_fd;
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002149
Michal Rosteckicfd49212019-05-29 20:31:09 +02002150 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2151 strs, sizeof(strs));
2152 if (btf_fd >= 0) {
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002153 obj->caps.btf_datasec = 1;
Michal Rosteckicfd49212019-05-29 20:31:09 +02002154 close(btf_fd);
2155 return 1;
2156 }
2157
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002158 return 0;
2159}
2160
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002161static int
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002162bpf_object__probe_caps(struct bpf_object *obj)
2163{
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002164 int (*probe_fn[])(struct bpf_object *obj) = {
2165 bpf_object__probe_name,
2166 bpf_object__probe_global_data,
Andrii Nakryikod7c4b392019-05-10 14:13:15 -07002167 bpf_object__probe_btf_func,
2168 bpf_object__probe_btf_datasec,
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002169 };
2170 int i, ret;
2171
2172 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
2173 ret = probe_fn[i](obj);
2174 if (ret < 0)
Stanislav Fomichev15ea1642019-05-14 20:38:49 -07002175 pr_debug("Probe #%d failed with %d.\n", i, ret);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02002176 }
2177
2178 return 0;
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002179}
2180
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002181static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
2182{
2183 struct bpf_map_info map_info = {};
2184 char msg[STRERR_BUFSIZE];
2185 __u32 map_info_len;
2186
2187 map_info_len = sizeof(map_info);
2188
2189 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
2190 pr_warn("failed to get map info for map FD %d: %s\n",
2191 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
2192 return false;
2193 }
2194
2195 return (map_info.type == map->def.type &&
2196 map_info.key_size == map->def.key_size &&
2197 map_info.value_size == map->def.value_size &&
2198 map_info.max_entries == map->def.max_entries &&
2199 map_info.map_flags == map->def.map_flags);
2200}
2201
2202static int
2203bpf_object__reuse_map(struct bpf_map *map)
2204{
2205 char *cp, errmsg[STRERR_BUFSIZE];
2206 int err, pin_fd;
2207
2208 pin_fd = bpf_obj_get(map->pin_path);
2209 if (pin_fd < 0) {
2210 err = -errno;
2211 if (err == -ENOENT) {
2212 pr_debug("found no pinned map to reuse at '%s'\n",
2213 map->pin_path);
2214 return 0;
2215 }
2216
2217 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
2218 pr_warn("couldn't retrieve pinned map '%s': %s\n",
2219 map->pin_path, cp);
2220 return err;
2221 }
2222
2223 if (!map_is_reuse_compat(map, pin_fd)) {
2224 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
2225 map->pin_path);
2226 close(pin_fd);
2227 return -EINVAL;
2228 }
2229
2230 err = bpf_map__reuse_fd(map, pin_fd);
2231 if (err) {
2232 close(pin_fd);
2233 return err;
2234 }
2235 map->pinned = true;
2236 pr_debug("reused pinned map at '%s'\n", map->pin_path);
2237
2238 return 0;
2239}
2240
Stanislav Fomichev47eff612018-11-20 17:11:19 -08002241static int
Daniel Borkmannd8599002019-04-09 23:20:13 +02002242bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
2243{
2244 char *cp, errmsg[STRERR_BUFSIZE];
2245 int err, zero = 0;
2246 __u8 *data;
2247
2248 /* Nothing to do here since kernel already zero-initializes .bss map. */
2249 if (map->libbpf_type == LIBBPF_MAP_BSS)
2250 return 0;
2251
2252 data = map->libbpf_type == LIBBPF_MAP_DATA ?
2253 obj->sections.data : obj->sections.rodata;
2254
2255 err = bpf_map_update_elem(map->fd, &zero, data, 0);
2256 /* Freeze .rodata map as read-only from syscall side. */
2257 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
2258 err = bpf_map_freeze(map->fd);
2259 if (err) {
2260 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002261 pr_warn("Error freezing map(%s) as read-only: %s\n",
2262 map->name, cp);
Daniel Borkmannd8599002019-04-09 23:20:13 +02002263 err = 0;
2264 }
2265 }
2266 return err;
2267}
2268
2269static int
Wang Nan52d33522015-07-01 02:14:04 +00002270bpf_object__create_maps(struct bpf_object *obj)
2271{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002272 struct bpf_create_map_attr create_attr = {};
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002273 int nr_cpus = 0;
Wang Nan52d33522015-07-01 02:14:04 +00002274 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002275 int err;
Wang Nan52d33522015-07-01 02:14:04 +00002276
Wang Nan9d759a92015-11-27 08:47:35 +00002277 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002278 struct bpf_map *map = &obj->maps[i];
2279 struct bpf_map_def *def = &map->def;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02002280 char *cp, errmsg[STRERR_BUFSIZE];
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002281 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00002282
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002283 if (map->pin_path) {
2284 err = bpf_object__reuse_map(map);
2285 if (err) {
2286 pr_warn("error reusing pinned map %s\n",
2287 map->name);
2288 return err;
2289 }
2290 }
2291
Jakub Kicinski26736eb2018-07-10 14:43:06 -07002292 if (map->fd >= 0) {
2293 pr_debug("skip map create (preset) %s: fd=%d\n",
2294 map->name, map->fd);
2295 continue;
2296 }
2297
Stanislav Fomichev94cb3102018-11-20 17:11:20 -08002298 if (obj->caps.name)
2299 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07002300 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002301 create_attr.map_type = def->type;
2302 create_attr.map_flags = def->map_flags;
2303 create_attr.key_size = def->key_size;
2304 create_attr.value_size = def->value_size;
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002305 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
2306 !def->max_entries) {
2307 if (!nr_cpus)
2308 nr_cpus = libbpf_num_possible_cpus();
2309 if (nr_cpus < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002310 pr_warn("failed to determine number of system CPUs: %d\n",
2311 nr_cpus);
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002312 err = nr_cpus;
2313 goto err_out;
2314 }
2315 pr_debug("map '%s': setting size to %d\n",
2316 map->name, nr_cpus);
2317 create_attr.max_entries = nr_cpus;
2318 } else {
2319 create_attr.max_entries = def->max_entries;
2320 }
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002321 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002322 create_attr.btf_key_type_id = 0;
2323 create_attr.btf_value_type_id = 0;
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08002324 if (bpf_map_type__is_map_in_map(def->type) &&
2325 map->inner_map_fd >= 0)
2326 create_attr.inner_map_fd = map->inner_map_fd;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002327
Andrii Nakryikoabd29c92019-06-17 12:26:56 -07002328 if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002329 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002330 create_attr.btf_key_type_id = map->btf_key_type_id;
2331 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002332 }
2333
2334 *pfd = bpf_create_map_xattr(&create_attr);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002335 if (*pfd < 0 && (create_attr.btf_key_type_id ||
2336 create_attr.btf_value_type_id)) {
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002337 err = -errno;
2338 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002339 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
2340 map->name, cp, err);
Andrii Nakryikoe55d54f2019-06-12 22:04:57 -07002341 create_attr.btf_fd = 0;
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07002342 create_attr.btf_key_type_id = 0;
2343 create_attr.btf_value_type_id = 0;
2344 map->btf_key_type_id = 0;
2345 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002346 *pfd = bpf_create_map_xattr(&create_attr);
2347 }
2348
Wang Nan52d33522015-07-01 02:14:04 +00002349 if (*pfd < 0) {
2350 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00002351
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002352 err = -errno;
Daniel Borkmannd8599002019-04-09 23:20:13 +02002353err_out:
Andrii Nakryikod7ff34d2019-07-06 11:06:25 -07002354 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08002355 pr_warn("failed to create map (name: '%s'): %s(%d)\n",
2356 map->name, cp, err);
Wang Nan52d33522015-07-01 02:14:04 +00002357 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00002358 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00002359 return err;
2360 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02002361
2362 if (bpf_map__is_internal(map)) {
2363 err = bpf_object__populate_internal_map(obj, map);
2364 if (err < 0) {
2365 zclose(*pfd);
2366 goto err_out;
2367 }
2368 }
2369
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01002370 if (map->pin_path && !map->pinned) {
2371 err = bpf_map__pin(map, NULL);
2372 if (err) {
2373 pr_warn("failed to auto-pin map name '%s' at '%s'\n",
2374 map->name, map->pin_path);
2375 return err;
2376 }
2377 }
2378
Andrii Nakryiko76e10222019-05-29 10:36:10 -07002379 pr_debug("created map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00002380 }
2381
Wang Nan52d33522015-07-01 02:14:04 +00002382 return 0;
2383}
2384
Wang Nan8a47a6c2015-07-01 02:14:05 +00002385static int
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002386check_btf_ext_reloc_err(struct bpf_program *prog, int err,
2387 void *btf_prog_info, const char *info_name)
2388{
2389 if (err != -ENOENT) {
Kefeng Wangbe180102019-10-21 13:55:32 +08002390 pr_warn("Error in loading %s for sec %s.\n",
2391 info_name, prog->section_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002392 return err;
2393 }
2394
2395 /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
2396
2397 if (btf_prog_info) {
2398 /*
2399 * Some info has already been found but has problem
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002400 * in the last btf_ext reloc. Must have to error out.
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002401 */
Kefeng Wangbe180102019-10-21 13:55:32 +08002402 pr_warn("Error in relocating %s for sec %s.\n",
2403 info_name, prog->section_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002404 return err;
2405 }
2406
Andrii Nakryiko399dc652019-05-29 10:36:11 -07002407 /* Have problem loading the very first info. Ignore the rest. */
Kefeng Wangbe180102019-10-21 13:55:32 +08002408 pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
2409 info_name, prog->section_name, info_name);
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002410 return 0;
2411}
2412
2413static int
2414bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2415 const char *section_name, __u32 insn_offset)
2416{
2417 int err;
2418
2419 if (!insn_offset || prog->func_info) {
2420 /*
2421 * !insn_offset => main program
2422 *
2423 * For sub prog, the main program's func_info has to
2424 * be loaded first (i.e. prog->func_info != NULL)
2425 */
2426 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
2427 section_name, insn_offset,
2428 &prog->func_info,
2429 &prog->func_info_cnt);
2430 if (err)
2431 return check_btf_ext_reloc_err(prog, err,
2432 prog->func_info,
2433 "bpf_func_info");
2434
2435 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
2436 }
2437
Martin KaFai Lau3d650142018-12-07 16:42:31 -08002438 if (!insn_offset || prog->line_info) {
2439 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
2440 section_name, insn_offset,
2441 &prog->line_info,
2442 &prog->line_info_cnt);
2443 if (err)
2444 return check_btf_ext_reloc_err(prog, err,
2445 prog->line_info,
2446 "bpf_line_info");
2447
2448 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2449 }
2450
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08002451 return 0;
2452}
2453
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002454#define BPF_CORE_SPEC_MAX_LEN 64
2455
2456/* represents BPF CO-RE field or array element accessor */
2457struct bpf_core_accessor {
2458 __u32 type_id; /* struct/union type or array element type */
2459 __u32 idx; /* field index or array index */
2460 const char *name; /* field name or NULL for array accessor */
2461};
2462
2463struct bpf_core_spec {
2464 const struct btf *btf;
2465 /* high-level spec: named fields and array indices only */
2466 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
2467 /* high-level spec length */
2468 int len;
2469 /* raw, low-level spec: 1-to-1 with accessor spec string */
2470 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
2471 /* raw spec length */
2472 int raw_len;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002473 /* field bit offset represented by spec */
2474 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002475};
2476
2477static bool str_is_empty(const char *s)
2478{
2479 return !s || !s[0];
2480}
2481
2482/*
Andrii Nakryiko511bb002019-10-15 11:28:45 -07002483 * Turn bpf_field_reloc into a low- and high-level spec representation,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002484 * validating correctness along the way, as well as calculating resulting
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002485 * field bit offset, specified by accessor string. Low-level spec captures
2486 * every single level of nestedness, including traversing anonymous
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002487 * struct/union members. High-level one only captures semantically meaningful
2488 * "turning points": named fields and array indicies.
2489 * E.g., for this case:
2490 *
2491 * struct sample {
2492 * int __unimportant;
2493 * struct {
2494 * int __1;
2495 * int __2;
2496 * int a[7];
2497 * };
2498 * };
2499 *
2500 * struct sample *s = ...;
2501 *
2502 * int x = &s->a[3]; // access string = '0:1:2:3'
2503 *
2504 * Low-level spec has 1:1 mapping with each element of access string (it's
2505 * just a parsed access string representation): [0, 1, 2, 3].
2506 *
2507 * High-level spec will capture only 3 points:
2508 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
2509 * - field 'a' access (corresponds to '2' in low-level spec);
2510 * - array element #3 access (corresponds to '3' in low-level spec).
2511 *
2512 */
2513static int bpf_core_spec_parse(const struct btf *btf,
2514 __u32 type_id,
2515 const char *spec_str,
2516 struct bpf_core_spec *spec)
2517{
2518 int access_idx, parsed_len, i;
2519 const struct btf_type *t;
2520 const char *name;
2521 __u32 id;
2522 __s64 sz;
2523
2524 if (str_is_empty(spec_str) || *spec_str == ':')
2525 return -EINVAL;
2526
2527 memset(spec, 0, sizeof(*spec));
2528 spec->btf = btf;
2529
2530 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
2531 while (*spec_str) {
2532 if (*spec_str == ':')
2533 ++spec_str;
2534 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
2535 return -EINVAL;
2536 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2537 return -E2BIG;
2538 spec_str += parsed_len;
2539 spec->raw_spec[spec->raw_len++] = access_idx;
2540 }
2541
2542 if (spec->raw_len == 0)
2543 return -EINVAL;
2544
2545 /* first spec value is always reloc type array index */
2546 t = skip_mods_and_typedefs(btf, type_id, &id);
2547 if (!t)
2548 return -EINVAL;
2549
2550 access_idx = spec->raw_spec[0];
2551 spec->spec[0].type_id = id;
2552 spec->spec[0].idx = access_idx;
2553 spec->len++;
2554
2555 sz = btf__resolve_size(btf, id);
2556 if (sz < 0)
2557 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002558 spec->bit_offset = access_idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002559
2560 for (i = 1; i < spec->raw_len; i++) {
2561 t = skip_mods_and_typedefs(btf, id, &id);
2562 if (!t)
2563 return -EINVAL;
2564
2565 access_idx = spec->raw_spec[i];
2566
2567 if (btf_is_composite(t)) {
2568 const struct btf_member *m;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002569 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002570
2571 if (access_idx >= btf_vlen(t))
2572 return -EINVAL;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002573
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002574 bit_offset = btf_member_bit_offset(t, access_idx);
2575 spec->bit_offset += bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002576
2577 m = btf_members(t) + access_idx;
2578 if (m->name_off) {
2579 name = btf__name_by_offset(btf, m->name_off);
2580 if (str_is_empty(name))
2581 return -EINVAL;
2582
2583 spec->spec[spec->len].type_id = id;
2584 spec->spec[spec->len].idx = access_idx;
2585 spec->spec[spec->len].name = name;
2586 spec->len++;
2587 }
2588
2589 id = m->type;
2590 } else if (btf_is_array(t)) {
2591 const struct btf_array *a = btf_array(t);
2592
2593 t = skip_mods_and_typedefs(btf, a->type, &id);
2594 if (!t || access_idx >= a->nelems)
2595 return -EINVAL;
2596
2597 spec->spec[spec->len].type_id = id;
2598 spec->spec[spec->len].idx = access_idx;
2599 spec->len++;
2600
2601 sz = btf__resolve_size(btf, id);
2602 if (sz < 0)
2603 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002604 spec->bit_offset += access_idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002605 } else {
Kefeng Wangbe180102019-10-21 13:55:32 +08002606 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
2607 type_id, spec_str, i, id, btf_kind(t));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002608 return -EINVAL;
2609 }
2610 }
2611
2612 return 0;
2613}
2614
2615static bool bpf_core_is_flavor_sep(const char *s)
2616{
2617 /* check X___Y name pattern, where X and Y are not underscores */
2618 return s[0] != '_' && /* X */
2619 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
2620 s[4] != '_'; /* Y */
2621}
2622
2623/* Given 'some_struct_name___with_flavor' return the length of a name prefix
2624 * before last triple underscore. Struct name part after last triple
2625 * underscore is ignored by BPF CO-RE relocation during relocation matching.
2626 */
2627static size_t bpf_core_essential_name_len(const char *name)
2628{
2629 size_t n = strlen(name);
2630 int i;
2631
2632 for (i = n - 5; i >= 0; i--) {
2633 if (bpf_core_is_flavor_sep(name + i))
2634 return i + 1;
2635 }
2636 return n;
2637}
2638
2639/* dynamically sized list of type IDs */
2640struct ids_vec {
2641 __u32 *data;
2642 int len;
2643};
2644
2645static void bpf_core_free_cands(struct ids_vec *cand_ids)
2646{
2647 free(cand_ids->data);
2648 free(cand_ids);
2649}
2650
2651static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
2652 __u32 local_type_id,
2653 const struct btf *targ_btf)
2654{
2655 size_t local_essent_len, targ_essent_len;
2656 const char *local_name, *targ_name;
2657 const struct btf_type *t;
2658 struct ids_vec *cand_ids;
2659 __u32 *new_ids;
2660 int i, err, n;
2661
2662 t = btf__type_by_id(local_btf, local_type_id);
2663 if (!t)
2664 return ERR_PTR(-EINVAL);
2665
2666 local_name = btf__name_by_offset(local_btf, t->name_off);
2667 if (str_is_empty(local_name))
2668 return ERR_PTR(-EINVAL);
2669 local_essent_len = bpf_core_essential_name_len(local_name);
2670
2671 cand_ids = calloc(1, sizeof(*cand_ids));
2672 if (!cand_ids)
2673 return ERR_PTR(-ENOMEM);
2674
2675 n = btf__get_nr_types(targ_btf);
2676 for (i = 1; i <= n; i++) {
2677 t = btf__type_by_id(targ_btf, i);
2678 targ_name = btf__name_by_offset(targ_btf, t->name_off);
2679 if (str_is_empty(targ_name))
2680 continue;
2681
2682 targ_essent_len = bpf_core_essential_name_len(targ_name);
2683 if (targ_essent_len != local_essent_len)
2684 continue;
2685
2686 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
2687 pr_debug("[%d] %s: found candidate [%d] %s\n",
2688 local_type_id, local_name, i, targ_name);
2689 new_ids = realloc(cand_ids->data, cand_ids->len + 1);
2690 if (!new_ids) {
2691 err = -ENOMEM;
2692 goto err_out;
2693 }
2694 cand_ids->data = new_ids;
2695 cand_ids->data[cand_ids->len++] = i;
2696 }
2697 }
2698 return cand_ids;
2699err_out:
2700 bpf_core_free_cands(cand_ids);
2701 return ERR_PTR(err);
2702}
2703
2704/* Check two types for compatibility, skipping const/volatile/restrict and
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002705 * typedefs, to ensure we are relocating compatible entities:
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002706 * - any two STRUCTs/UNIONs are compatible and can be mixed;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07002707 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002708 * - any two PTRs are always compatible;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07002709 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
2710 * least one of enums should be anonymous;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002711 * - for ENUMs, check sizes, names are ignored;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002712 * - for INT, size and signedness are ignored;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002713 * - for ARRAY, dimensionality is ignored, element types are checked for
2714 * compatibility recursively;
2715 * - everything else shouldn't be ever a target of relocation.
2716 * These rules are not set in stone and probably will be adjusted as we get
2717 * more experience with using BPF CO-RE relocations.
2718 */
2719static int bpf_core_fields_are_compat(const struct btf *local_btf,
2720 __u32 local_id,
2721 const struct btf *targ_btf,
2722 __u32 targ_id)
2723{
2724 const struct btf_type *local_type, *targ_type;
2725
2726recur:
2727 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
2728 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2729 if (!local_type || !targ_type)
2730 return -EINVAL;
2731
2732 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
2733 return 1;
2734 if (btf_kind(local_type) != btf_kind(targ_type))
2735 return 0;
2736
2737 switch (btf_kind(local_type)) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002738 case BTF_KIND_PTR:
2739 return 1;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07002740 case BTF_KIND_FWD:
2741 case BTF_KIND_ENUM: {
2742 const char *local_name, *targ_name;
2743 size_t local_len, targ_len;
2744
2745 local_name = btf__name_by_offset(local_btf,
2746 local_type->name_off);
2747 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
2748 local_len = bpf_core_essential_name_len(local_name);
2749 targ_len = bpf_core_essential_name_len(targ_name);
2750 /* one of them is anonymous or both w/ same flavor-less names */
2751 return local_len == 0 || targ_len == 0 ||
2752 (local_len == targ_len &&
2753 strncmp(local_name, targ_name, local_len) == 0);
2754 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002755 case BTF_KIND_INT:
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002756 /* just reject deprecated bitfield-like integers; all other
2757 * integers are by default compatible between each other
2758 */
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002759 return btf_int_offset(local_type) == 0 &&
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002760 btf_int_offset(targ_type) == 0;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002761 case BTF_KIND_ARRAY:
2762 local_id = btf_array(local_type)->type;
2763 targ_id = btf_array(targ_type)->type;
2764 goto recur;
2765 default:
Kefeng Wangbe180102019-10-21 13:55:32 +08002766 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
2767 btf_kind(local_type), local_id, targ_id);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002768 return 0;
2769 }
2770}
2771
2772/*
2773 * Given single high-level named field accessor in local type, find
2774 * corresponding high-level accessor for a target type. Along the way,
2775 * maintain low-level spec for target as well. Also keep updating target
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002776 * bit offset.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002777 *
2778 * Searching is performed through recursive exhaustive enumeration of all
2779 * fields of a struct/union. If there are any anonymous (embedded)
2780 * structs/unions, they are recursively searched as well. If field with
2781 * desired name is found, check compatibility between local and target types,
2782 * before returning result.
2783 *
2784 * 1 is returned, if field is found.
2785 * 0 is returned if no compatible field is found.
2786 * <0 is returned on error.
2787 */
2788static int bpf_core_match_member(const struct btf *local_btf,
2789 const struct bpf_core_accessor *local_acc,
2790 const struct btf *targ_btf,
2791 __u32 targ_id,
2792 struct bpf_core_spec *spec,
2793 __u32 *next_targ_id)
2794{
2795 const struct btf_type *local_type, *targ_type;
2796 const struct btf_member *local_member, *m;
2797 const char *local_name, *targ_name;
2798 __u32 local_id;
2799 int i, n, found;
2800
2801 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2802 if (!targ_type)
2803 return -EINVAL;
2804 if (!btf_is_composite(targ_type))
2805 return 0;
2806
2807 local_id = local_acc->type_id;
2808 local_type = btf__type_by_id(local_btf, local_id);
2809 local_member = btf_members(local_type) + local_acc->idx;
2810 local_name = btf__name_by_offset(local_btf, local_member->name_off);
2811
2812 n = btf_vlen(targ_type);
2813 m = btf_members(targ_type);
2814 for (i = 0; i < n; i++, m++) {
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002815 __u32 bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002816
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002817 bit_offset = btf_member_bit_offset(targ_type, i);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002818
2819 /* too deep struct/union/array nesting */
2820 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2821 return -E2BIG;
2822
2823 /* speculate this member will be the good one */
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002824 spec->bit_offset += bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002825 spec->raw_spec[spec->raw_len++] = i;
2826
2827 targ_name = btf__name_by_offset(targ_btf, m->name_off);
2828 if (str_is_empty(targ_name)) {
2829 /* embedded struct/union, we need to go deeper */
2830 found = bpf_core_match_member(local_btf, local_acc,
2831 targ_btf, m->type,
2832 spec, next_targ_id);
2833 if (found) /* either found or error */
2834 return found;
2835 } else if (strcmp(local_name, targ_name) == 0) {
2836 /* matching named field */
2837 struct bpf_core_accessor *targ_acc;
2838
2839 targ_acc = &spec->spec[spec->len++];
2840 targ_acc->type_id = targ_id;
2841 targ_acc->idx = i;
2842 targ_acc->name = targ_name;
2843
2844 *next_targ_id = m->type;
2845 found = bpf_core_fields_are_compat(local_btf,
2846 local_member->type,
2847 targ_btf, m->type);
2848 if (!found)
2849 spec->len--; /* pop accessor */
2850 return found;
2851 }
2852 /* member turned out not to be what we looked for */
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002853 spec->bit_offset -= bit_offset;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002854 spec->raw_len--;
2855 }
2856
2857 return 0;
2858}
2859
2860/*
2861 * Try to match local spec to a target type and, if successful, produce full
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002862 * target spec (high-level, low-level + bit offset).
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002863 */
2864static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
2865 const struct btf *targ_btf, __u32 targ_id,
2866 struct bpf_core_spec *targ_spec)
2867{
2868 const struct btf_type *targ_type;
2869 const struct bpf_core_accessor *local_acc;
2870 struct bpf_core_accessor *targ_acc;
2871 int i, sz, matched;
2872
2873 memset(targ_spec, 0, sizeof(*targ_spec));
2874 targ_spec->btf = targ_btf;
2875
2876 local_acc = &local_spec->spec[0];
2877 targ_acc = &targ_spec->spec[0];
2878
2879 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
2880 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
2881 &targ_id);
2882 if (!targ_type)
2883 return -EINVAL;
2884
2885 if (local_acc->name) {
2886 matched = bpf_core_match_member(local_spec->btf,
2887 local_acc,
2888 targ_btf, targ_id,
2889 targ_spec, &targ_id);
2890 if (matched <= 0)
2891 return matched;
2892 } else {
2893 /* for i=0, targ_id is already treated as array element
2894 * type (because it's the original struct), for others
2895 * we should find array element type first
2896 */
2897 if (i > 0) {
2898 const struct btf_array *a;
2899
2900 if (!btf_is_array(targ_type))
2901 return 0;
2902
2903 a = btf_array(targ_type);
2904 if (local_acc->idx >= a->nelems)
2905 return 0;
2906 if (!skip_mods_and_typedefs(targ_btf, a->type,
2907 &targ_id))
2908 return -EINVAL;
2909 }
2910
2911 /* too deep struct/union/array nesting */
2912 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2913 return -E2BIG;
2914
2915 targ_acc->type_id = targ_id;
2916 targ_acc->idx = local_acc->idx;
2917 targ_acc->name = NULL;
2918 targ_spec->len++;
2919 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
2920 targ_spec->raw_len++;
2921
2922 sz = btf__resolve_size(targ_btf, targ_id);
2923 if (sz < 0)
2924 return sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002925 targ_spec->bit_offset += local_acc->idx * sz * 8;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07002926 }
2927 }
2928
2929 return 1;
2930}
2931
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002932static int bpf_core_calc_field_relo(const struct bpf_program *prog,
2933 const struct bpf_field_reloc *relo,
2934 const struct bpf_core_spec *spec,
2935 __u32 *val, bool *validate)
2936{
2937 const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
2938 const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
2939 __u32 byte_off, byte_sz, bit_off, bit_sz;
2940 const struct btf_member *m;
2941 const struct btf_type *mt;
2942 bool bitfield;
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07002943 __s64 sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002944
2945 /* a[n] accessor needs special handling */
2946 if (!acc->name) {
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07002947 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
2948 *val = spec->bit_offset / 8;
2949 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
2950 sz = btf__resolve_size(spec->btf, acc->type_id);
2951 if (sz < 0)
2952 return -EINVAL;
2953 *val = sz;
2954 } else {
2955 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002956 bpf_program__title(prog, false),
2957 relo->kind, relo->insn_off / 8);
2958 return -EINVAL;
2959 }
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002960 if (validate)
2961 *validate = true;
2962 return 0;
2963 }
2964
2965 m = btf_members(t) + acc->idx;
2966 mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
2967 bit_off = spec->bit_offset;
2968 bit_sz = btf_member_bitfield_size(t, acc->idx);
2969
2970 bitfield = bit_sz > 0;
2971 if (bitfield) {
2972 byte_sz = mt->size;
2973 byte_off = bit_off / 8 / byte_sz * byte_sz;
2974 /* figure out smallest int size necessary for bitfield load */
2975 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
2976 if (byte_sz >= 8) {
2977 /* bitfield can't be read with 64-bit read */
2978 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
2979 bpf_program__title(prog, false),
2980 relo->kind, relo->insn_off / 8);
2981 return -E2BIG;
2982 }
2983 byte_sz *= 2;
2984 byte_off = bit_off / 8 / byte_sz * byte_sz;
2985 }
2986 } else {
Andrii Nakryiko94f060e2019-11-01 15:28:08 -07002987 sz = btf__resolve_size(spec->btf, m->type);
2988 if (sz < 0)
2989 return -EINVAL;
2990 byte_sz = sz;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07002991 byte_off = spec->bit_offset / 8;
2992 bit_sz = byte_sz * 8;
2993 }
2994
2995 /* for bitfields, all the relocatable aspects are ambiguous and we
2996 * might disagree with compiler, so turn off validation of expected
2997 * value, except for signedness
2998 */
2999 if (validate)
3000 *validate = !bitfield;
3001
3002 switch (relo->kind) {
3003 case BPF_FIELD_BYTE_OFFSET:
3004 *val = byte_off;
3005 break;
3006 case BPF_FIELD_BYTE_SIZE:
3007 *val = byte_sz;
3008 break;
3009 case BPF_FIELD_SIGNED:
3010 /* enums will be assumed unsigned */
3011 *val = btf_is_enum(mt) ||
3012 (btf_int_encoding(mt) & BTF_INT_SIGNED);
3013 if (validate)
3014 *validate = true; /* signedness is never ambiguous */
3015 break;
3016 case BPF_FIELD_LSHIFT_U64:
3017#if __BYTE_ORDER == __LITTLE_ENDIAN
3018 *val = 64 - (bit_off + bit_sz - byte_off * 8);
3019#else
3020 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
3021#endif
3022 break;
3023 case BPF_FIELD_RSHIFT_U64:
3024 *val = 64 - bit_sz;
3025 if (validate)
3026 *validate = true; /* right shift is never ambiguous */
3027 break;
3028 case BPF_FIELD_EXISTS:
3029 default:
3030 pr_warn("prog '%s': unknown relo %d at insn #%d\n",
3031 bpf_program__title(prog, false),
3032 relo->kind, relo->insn_off / 8);
3033 return -EINVAL;
3034 }
3035
3036 return 0;
3037}
3038
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003039/*
3040 * Patch relocatable BPF instruction.
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003041 *
3042 * Patched value is determined by relocation kind and target specification.
3043 * For field existence relocation target spec will be NULL if field is not
3044 * found.
3045 * Expected insn->imm value is determined using relocation kind and local
3046 * spec, and is checked before patching instruction. If actual insn->imm value
3047 * is wrong, bail out with error.
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003048 *
3049 * Currently three kinds of BPF instructions are supported:
3050 * 1. rX = <imm> (assignment with immediate operand);
3051 * 2. rX += <imm> (arithmetic operations with immediate operand);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003052 */
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003053static int bpf_core_reloc_insn(struct bpf_program *prog,
3054 const struct bpf_field_reloc *relo,
3055 const struct bpf_core_spec *local_spec,
3056 const struct bpf_core_spec *targ_spec)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003057{
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003058 bool failed = false, validate = true;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003059 __u32 orig_val, new_val;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003060 struct bpf_insn *insn;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003061 int insn_idx, err;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003062 __u8 class;
3063
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003064 if (relo->insn_off % sizeof(struct bpf_insn))
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003065 return -EINVAL;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003066 insn_idx = relo->insn_off / sizeof(struct bpf_insn);
3067
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003068 if (relo->kind == BPF_FIELD_EXISTS) {
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003069 orig_val = 1; /* can't generate EXISTS relo w/o local field */
3070 new_val = targ_spec ? 1 : 0;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003071 } else if (!targ_spec) {
3072 failed = true;
3073 new_val = (__u32)-1;
3074 } else {
3075 err = bpf_core_calc_field_relo(prog, relo, local_spec,
3076 &orig_val, &validate);
3077 if (err)
3078 return err;
3079 err = bpf_core_calc_field_relo(prog, relo, targ_spec,
3080 &new_val, NULL);
3081 if (err)
3082 return err;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003083 }
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003084
3085 insn = &prog->insns[insn_idx];
3086 class = BPF_CLASS(insn->code);
3087
3088 if (class == BPF_ALU || class == BPF_ALU64) {
3089 if (BPF_SRC(insn->code) != BPF_K)
3090 return -EINVAL;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003091 if (!failed && validate && insn->imm != orig_val) {
3092 pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
3093 bpf_program__title(prog, false), insn_idx,
3094 insn->imm, orig_val, new_val);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003095 return -EINVAL;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003096 }
3097 orig_val = insn->imm;
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003098 insn->imm = new_val;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003099 pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
3100 bpf_program__title(prog, false), insn_idx,
3101 failed ? " w/ failed reloc" : "", orig_val, new_val);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003102 } else {
Kefeng Wangbe180102019-10-21 13:55:32 +08003103 pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
3104 bpf_program__title(prog, false),
3105 insn_idx, insn->code, insn->src_reg, insn->dst_reg,
3106 insn->off, insn->imm);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003107 return -EINVAL;
3108 }
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003109
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003110 return 0;
3111}
3112
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003113static struct btf *btf_load_raw(const char *path)
3114{
3115 struct btf *btf;
3116 size_t read_cnt;
3117 struct stat st;
3118 void *data;
3119 FILE *f;
3120
3121 if (stat(path, &st))
3122 return ERR_PTR(-errno);
3123
3124 data = malloc(st.st_size);
3125 if (!data)
3126 return ERR_PTR(-ENOMEM);
3127
3128 f = fopen(path, "rb");
3129 if (!f) {
3130 btf = ERR_PTR(-errno);
3131 goto cleanup;
3132 }
3133
3134 read_cnt = fread(data, 1, st.st_size, f);
3135 fclose(f);
3136 if (read_cnt < st.st_size) {
3137 btf = ERR_PTR(-EBADF);
3138 goto cleanup;
3139 }
3140
3141 btf = btf__new(data, read_cnt);
3142
3143cleanup:
3144 free(data);
3145 return btf;
3146}
3147
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003148/*
3149 * Probe few well-known locations for vmlinux kernel image and try to load BTF
3150 * data out of it to use for target BTF.
3151 */
3152static struct btf *bpf_core_find_kernel_btf(void)
3153{
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003154 struct {
3155 const char *path_fmt;
3156 bool raw_btf;
3157 } locations[] = {
3158 /* try canonical vmlinux BTF through sysfs first */
3159 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
3160 /* fall back to trying to find vmlinux ELF on disk otherwise */
3161 { "/boot/vmlinux-%1$s" },
3162 { "/lib/modules/%1$s/vmlinux-%1$s" },
3163 { "/lib/modules/%1$s/build/vmlinux" },
3164 { "/usr/lib/modules/%1$s/kernel/vmlinux" },
3165 { "/usr/lib/debug/boot/vmlinux-%1$s" },
3166 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
3167 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003168 };
3169 char path[PATH_MAX + 1];
3170 struct utsname buf;
3171 struct btf *btf;
3172 int i;
3173
3174 uname(&buf);
3175
3176 for (i = 0; i < ARRAY_SIZE(locations); i++) {
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003177 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003178
3179 if (access(path, R_OK))
3180 continue;
3181
Andrii Nakryikoa1916a12019-08-13 11:54:43 -07003182 if (locations[i].raw_btf)
3183 btf = btf_load_raw(path);
3184 else
3185 btf = btf__parse_elf(path, NULL);
3186
3187 pr_debug("loading kernel BTF '%s': %ld\n",
3188 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003189 if (IS_ERR(btf))
3190 continue;
3191
3192 return btf;
3193 }
3194
Kefeng Wangbe180102019-10-21 13:55:32 +08003195 pr_warn("failed to find valid kernel BTF\n");
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003196 return ERR_PTR(-ESRCH);
3197}
3198
3199/* Output spec definition in the format:
3200 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
3201 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
3202 */
3203static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
3204{
3205 const struct btf_type *t;
3206 const char *s;
3207 __u32 type_id;
3208 int i;
3209
3210 type_id = spec->spec[0].type_id;
3211 t = btf__type_by_id(spec->btf, type_id);
3212 s = btf__name_by_offset(spec->btf, t->name_off);
3213 libbpf_print(level, "[%u] %s + ", type_id, s);
3214
3215 for (i = 0; i < spec->raw_len; i++)
3216 libbpf_print(level, "%d%s", spec->raw_spec[i],
3217 i == spec->raw_len - 1 ? " => " : ":");
3218
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003219 libbpf_print(level, "%u.%u @ &x",
3220 spec->bit_offset / 8, spec->bit_offset % 8);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003221
3222 for (i = 0; i < spec->len; i++) {
3223 if (spec->spec[i].name)
3224 libbpf_print(level, ".%s", spec->spec[i].name);
3225 else
3226 libbpf_print(level, "[%u]", spec->spec[i].idx);
3227 }
3228
3229}
3230
3231static size_t bpf_core_hash_fn(const void *key, void *ctx)
3232{
3233 return (size_t)key;
3234}
3235
3236static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
3237{
3238 return k1 == k2;
3239}
3240
3241static void *u32_as_hash_key(__u32 x)
3242{
3243 return (void *)(uintptr_t)x;
3244}
3245
3246/*
3247 * CO-RE relocate single instruction.
3248 *
3249 * The outline and important points of the algorithm:
3250 * 1. For given local type, find corresponding candidate target types.
3251 * Candidate type is a type with the same "essential" name, ignoring
3252 * everything after last triple underscore (___). E.g., `sample`,
3253 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
3254 * for each other. Names with triple underscore are referred to as
3255 * "flavors" and are useful, among other things, to allow to
3256 * specify/support incompatible variations of the same kernel struct, which
3257 * might differ between different kernel versions and/or build
3258 * configurations.
3259 *
3260 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
3261 * converter, when deduplicated BTF of a kernel still contains more than
3262 * one different types with the same name. In that case, ___2, ___3, etc
3263 * are appended starting from second name conflict. But start flavors are
3264 * also useful to be defined "locally", in BPF program, to extract same
3265 * data from incompatible changes between different kernel
3266 * versions/configurations. For instance, to handle field renames between
3267 * kernel versions, one can use two flavors of the struct name with the
3268 * same common name and use conditional relocations to extract that field,
3269 * depending on target kernel version.
3270 * 2. For each candidate type, try to match local specification to this
3271 * candidate target type. Matching involves finding corresponding
3272 * high-level spec accessors, meaning that all named fields should match,
3273 * as well as all array accesses should be within the actual bounds. Also,
3274 * types should be compatible (see bpf_core_fields_are_compat for details).
3275 * 3. It is supported and expected that there might be multiple flavors
3276 * matching the spec. As long as all the specs resolve to the same set of
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003277 * offsets across all candidates, there is no error. If there is any
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003278 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
3279 * imprefection of BTF deduplication, which can cause slight duplication of
3280 * the same BTF type, if some directly or indirectly referenced (by
3281 * pointer) type gets resolved to different actual types in different
3282 * object files. If such situation occurs, deduplicated BTF will end up
3283 * with two (or more) structurally identical types, which differ only in
3284 * types they refer to through pointer. This should be OK in most cases and
3285 * is not an error.
3286 * 4. Candidate types search is performed by linearly scanning through all
3287 * types in target BTF. It is anticipated that this is overall more
3288 * efficient memory-wise and not significantly worse (if not better)
3289 * CPU-wise compared to prebuilding a map from all local type names to
3290 * a list of candidate type names. It's also sped up by caching resolved
3291 * list of matching candidates per each local "root" type ID, that has at
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003292 * least one bpf_field_reloc associated with it. This list is shared
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003293 * between multiple relocations for the same type ID and is updated as some
3294 * of the candidates are pruned due to structural incompatibility.
3295 */
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003296static int bpf_core_reloc_field(struct bpf_program *prog,
3297 const struct bpf_field_reloc *relo,
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003298 int relo_idx,
3299 const struct btf *local_btf,
3300 const struct btf *targ_btf,
3301 struct hashmap *cand_cache)
3302{
3303 const char *prog_name = bpf_program__title(prog, false);
3304 struct bpf_core_spec local_spec, cand_spec, targ_spec;
3305 const void *type_key = u32_as_hash_key(relo->type_id);
3306 const struct btf_type *local_type, *cand_type;
3307 const char *local_name, *cand_name;
3308 struct ids_vec *cand_ids;
3309 __u32 local_id, cand_id;
3310 const char *spec_str;
3311 int i, j, err;
3312
3313 local_id = relo->type_id;
3314 local_type = btf__type_by_id(local_btf, local_id);
3315 if (!local_type)
3316 return -EINVAL;
3317
3318 local_name = btf__name_by_offset(local_btf, local_type->name_off);
3319 if (str_is_empty(local_name))
3320 return -EINVAL;
3321
3322 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
3323 if (str_is_empty(spec_str))
3324 return -EINVAL;
3325
3326 err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
3327 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003328 pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
3329 prog_name, relo_idx, local_id, local_name, spec_str,
3330 err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003331 return -EINVAL;
3332 }
3333
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003334 pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
3335 relo->kind);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003336 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
3337 libbpf_print(LIBBPF_DEBUG, "\n");
3338
3339 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
3340 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
3341 if (IS_ERR(cand_ids)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003342 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
3343 prog_name, relo_idx, local_id, local_name,
3344 PTR_ERR(cand_ids));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003345 return PTR_ERR(cand_ids);
3346 }
3347 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
3348 if (err) {
3349 bpf_core_free_cands(cand_ids);
3350 return err;
3351 }
3352 }
3353
3354 for (i = 0, j = 0; i < cand_ids->len; i++) {
3355 cand_id = cand_ids->data[i];
3356 cand_type = btf__type_by_id(targ_btf, cand_id);
3357 cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
3358
3359 err = bpf_core_spec_match(&local_spec, targ_btf,
3360 cand_id, &cand_spec);
3361 pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
3362 prog_name, relo_idx, i, cand_name);
3363 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
3364 libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
3365 if (err < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003366 pr_warn("prog '%s': relo #%d: matching error: %d\n",
3367 prog_name, relo_idx, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003368 return err;
3369 }
3370 if (err == 0)
3371 continue;
3372
3373 if (j == 0) {
3374 targ_spec = cand_spec;
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003375 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003376 /* if there are many candidates, they should all
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003377 * resolve to the same bit offset
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003378 */
Kefeng Wangbe180102019-10-21 13:55:32 +08003379 pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
Andrii Nakryikoee26dad2019-11-01 15:28:07 -07003380 prog_name, relo_idx, cand_spec.bit_offset,
3381 targ_spec.bit_offset);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003382 return -EINVAL;
3383 }
3384
3385 cand_ids->data[j++] = cand_spec.spec[0].type_id;
3386 }
3387
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003388 /*
3389 * For BPF_FIELD_EXISTS relo or when relaxed CO-RE reloc mode is
3390 * requested, it's expected that we might not find any candidates.
3391 * In this case, if field wasn't found in any candidate, the list of
3392 * candidates shouldn't change at all, we'll just handle relocating
3393 * appropriately, depending on relo's kind.
3394 */
3395 if (j > 0)
3396 cand_ids->len = j;
3397
3398 if (j == 0 && !prog->obj->relaxed_core_relocs &&
3399 relo->kind != BPF_FIELD_EXISTS) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003400 pr_warn("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
3401 prog_name, relo_idx, local_id, local_name, spec_str);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003402 return -ESRCH;
3403 }
3404
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003405 /* bpf_core_reloc_insn should know how to handle missing targ_spec */
3406 err = bpf_core_reloc_insn(prog, relo, &local_spec,
3407 j ? &targ_spec : NULL);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003408 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003409 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
3410 prog_name, relo_idx, relo->insn_off, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003411 return -EINVAL;
3412 }
3413
3414 return 0;
3415}
3416
3417static int
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003418bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003419{
3420 const struct btf_ext_info_sec *sec;
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003421 const struct bpf_field_reloc *rec;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003422 const struct btf_ext_info *seg;
3423 struct hashmap_entry *entry;
3424 struct hashmap *cand_cache = NULL;
3425 struct bpf_program *prog;
3426 struct btf *targ_btf;
3427 const char *sec_name;
3428 int i, err = 0;
3429
3430 if (targ_btf_path)
3431 targ_btf = btf__parse_elf(targ_btf_path, NULL);
3432 else
3433 targ_btf = bpf_core_find_kernel_btf();
3434 if (IS_ERR(targ_btf)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003435 pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003436 return PTR_ERR(targ_btf);
3437 }
3438
3439 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
3440 if (IS_ERR(cand_cache)) {
3441 err = PTR_ERR(cand_cache);
3442 goto out;
3443 }
3444
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003445 seg = &obj->btf_ext->field_reloc_info;
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003446 for_each_btf_ext_sec(seg, sec) {
3447 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3448 if (str_is_empty(sec_name)) {
3449 err = -EINVAL;
3450 goto out;
3451 }
3452 prog = bpf_object__find_program_by_title(obj, sec_name);
3453 if (!prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003454 pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
3455 sec_name);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003456 err = -EINVAL;
3457 goto out;
3458 }
3459
3460 pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
3461 sec_name, sec->num_info);
3462
3463 for_each_btf_ext_rec(seg, sec, i, rec) {
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003464 err = bpf_core_reloc_field(prog, rec, i, obj->btf,
3465 targ_btf, cand_cache);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003466 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003467 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
3468 sec_name, i, err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003469 goto out;
3470 }
3471 }
3472 }
3473
3474out:
3475 btf__free(targ_btf);
3476 if (!IS_ERR_OR_NULL(cand_cache)) {
3477 hashmap__for_each_entry(cand_cache, entry, i) {
3478 bpf_core_free_cands(entry->value);
3479 }
3480 hashmap__free(cand_cache);
3481 }
3482 return err;
3483}
3484
3485static int
3486bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
3487{
3488 int err = 0;
3489
Andrii Nakryiko511bb002019-10-15 11:28:45 -07003490 if (obj->btf_ext->field_reloc_info.len)
3491 err = bpf_core_reloc_fields(obj, targ_btf_path);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003492
3493 return err;
3494}
3495
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003496static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003497bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
3498 struct reloc_desc *relo)
3499{
3500 struct bpf_insn *insn, *new_insn;
3501 struct bpf_program *text;
3502 size_t new_cnt;
Yonghong Song2993e052018-11-19 15:29:16 -08003503 int err;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003504
3505 if (relo->type != RELO_CALL)
3506 return -LIBBPF_ERRNO__RELOC;
3507
3508 if (prog->idx == obj->efile.text_shndx) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003509 pr_warn("relo in .text insn %d into off %d\n",
3510 relo->insn_idx, relo->text_off);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003511 return -LIBBPF_ERRNO__RELOC;
3512 }
3513
3514 if (prog->main_prog_cnt == 0) {
3515 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
3516 if (!text) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003517 pr_warn("no .text section found yet relo into text exist\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003518 return -LIBBPF_ERRNO__RELOC;
3519 }
3520 new_cnt = prog->insns_cnt + text->insns_cnt;
Jakub Kicinski531b0142018-07-10 14:43:05 -07003521 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003522 if (!new_insn) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003523 pr_warn("oom in prog realloc\n");
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003524 return -ENOMEM;
3525 }
Yonghong Song2993e052018-11-19 15:29:16 -08003526
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003527 if (obj->btf_ext) {
3528 err = bpf_program_reloc_btf_ext(prog, obj,
3529 text->section_name,
3530 prog->insns_cnt);
3531 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08003532 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08003533 }
3534
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003535 memcpy(new_insn + prog->insns_cnt, text->insns,
3536 text->insns_cnt * sizeof(*insn));
3537 prog->insns = new_insn;
3538 prog->main_prog_cnt = prog->insns_cnt;
3539 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00003540 pr_debug("added %zd insn from %s to prog %s\n",
3541 text->insns_cnt, text->section_name,
3542 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003543 }
3544 insn = &prog->insns[relo->insn_idx];
3545 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003546 return 0;
3547}
3548
3549static int
Wang Nan9d759a92015-11-27 08:47:35 +00003550bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00003551{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003552 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003553
Yonghong Song2993e052018-11-19 15:29:16 -08003554 if (!prog)
3555 return 0;
3556
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003557 if (obj->btf_ext) {
3558 err = bpf_program_reloc_btf_ext(prog, obj,
3559 prog->section_name, 0);
3560 if (err)
Yonghong Song2993e052018-11-19 15:29:16 -08003561 return err;
Yonghong Song2993e052018-11-19 15:29:16 -08003562 }
3563
3564 if (!prog->reloc_desc)
Wang Nan8a47a6c2015-07-01 02:14:05 +00003565 return 0;
3566
3567 for (i = 0; i < prog->nr_reloc; i++) {
Daniel Borkmannd8599002019-04-09 23:20:13 +02003568 if (prog->reloc_desc[i].type == RELO_LD64 ||
3569 prog->reloc_desc[i].type == RELO_DATA) {
3570 bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003571 struct bpf_insn *insns = prog->insns;
3572 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003573
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003574 insn_idx = prog->reloc_desc[i].insn_idx;
3575 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003576
Daniel Borkmannd8599002019-04-09 23:20:13 +02003577 if (insn_idx + 1 >= (int)prog->insns_cnt) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003578 pr_warn("relocation out of range: '%s'\n",
3579 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003580 return -LIBBPF_ERRNO__RELOC;
3581 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02003582
3583 if (!relo_data) {
3584 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
3585 } else {
3586 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
3587 insns[insn_idx + 1].imm = insns[insn_idx].imm;
3588 }
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003589 insns[insn_idx].imm = obj->maps[map_idx].fd;
Joe Stringerf8c7a4d2019-04-09 23:20:12 +02003590 } else if (prog->reloc_desc[i].type == RELO_CALL) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003591 err = bpf_program__reloc_text(prog, obj,
3592 &prog->reloc_desc[i]);
3593 if (err)
3594 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00003595 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00003596 }
3597
3598 zfree(&prog->reloc_desc);
3599 prog->nr_reloc = 0;
3600 return 0;
3601}
3602
Wang Nan8a47a6c2015-07-01 02:14:05 +00003603static int
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003604bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
Wang Nan8a47a6c2015-07-01 02:14:05 +00003605{
3606 struct bpf_program *prog;
3607 size_t i;
3608 int err;
3609
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003610 if (obj->btf_ext) {
3611 err = bpf_object__relocate_core(obj, targ_btf_path);
3612 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003613 pr_warn("failed to perform CO-RE relocations: %d\n",
3614 err);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07003615 return err;
3616 }
3617 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00003618 for (i = 0; i < obj->nr_programs; i++) {
3619 prog = &obj->programs[i];
3620
Wang Nan9d759a92015-11-27 08:47:35 +00003621 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00003622 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003623 pr_warn("failed to relocate '%s'\n", prog->section_name);
Wang Nan8a47a6c2015-07-01 02:14:05 +00003624 return err;
3625 }
3626 }
3627 return 0;
3628}
3629
Wang Nan34090912015-07-01 02:14:02 +00003630static int bpf_object__collect_reloc(struct bpf_object *obj)
3631{
3632 int i, err;
3633
3634 if (!obj_elf_valid(obj)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003635 pr_warn("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00003636 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00003637 }
3638
3639 for (i = 0; i < obj->efile.nr_reloc; i++) {
3640 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
3641 Elf_Data *data = obj->efile.reloc[i].data;
3642 int idx = shdr->sh_info;
3643 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00003644
3645 if (shdr->sh_type != SHT_REL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003646 pr_warn("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003647 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00003648 }
3649
3650 prog = bpf_object__find_prog_by_idx(obj, idx);
3651 if (!prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003652 pr_warn("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003653 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00003654 }
3655
Andrii Nakryiko399dc652019-05-29 10:36:11 -07003656 err = bpf_program__collect_reloc(prog, shdr, data, obj);
Wang Nan34090912015-07-01 02:14:02 +00003657 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00003658 return err;
Wang Nan34090912015-07-01 02:14:02 +00003659 }
3660 return 0;
3661}
3662
Wang Nan55cffde2015-07-01 02:14:07 +00003663static int
Yonghong Song2993e052018-11-19 15:29:16 -08003664load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003665 char *license, __u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00003666{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003667 struct bpf_load_program_attr load_attr;
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02003668 char *cp, errmsg[STRERR_BUFSIZE];
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003669 int log_buf_size = BPF_LOG_BUF_SIZE;
Wang Nan55cffde2015-07-01 02:14:07 +00003670 char *log_buf;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07003671 int btf_fd, ret;
Wang Nan55cffde2015-07-01 02:14:07 +00003672
Andrii Nakryikofba01a02019-05-29 10:36:08 -07003673 if (!insns || !insns_cnt)
3674 return -EINVAL;
3675
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003676 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
Yonghong Song2993e052018-11-19 15:29:16 -08003677 load_attr.prog_type = prog->type;
3678 load_attr.expected_attach_type = prog->expected_attach_type;
Stanislav Fomichev5b32a232018-11-20 17:11:21 -08003679 if (prog->caps->name)
3680 load_attr.name = prog->name;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003681 load_attr.insns = insns;
3682 load_attr.insns_cnt = insns_cnt;
3683 load_attr.license = license;
3684 load_attr.kern_version = kern_version;
Yonghong Song2993e052018-11-19 15:29:16 -08003685 load_attr.prog_ifindex = prog->prog_ifindex;
Andrii Nakryiko3415ec62019-08-01 00:24:05 -07003686 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
3687 if (prog->obj->btf_ext)
3688 btf_fd = bpf_object__btf_fd(prog->obj);
3689 else
3690 btf_fd = -1;
Andrii Nakryiko5d01ab72019-07-26 14:24:38 -07003691 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
Yonghong Song2993e052018-11-19 15:29:16 -08003692 load_attr.func_info = prog->func_info;
3693 load_attr.func_info_rec_size = prog->func_info_rec_size;
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003694 load_attr.func_info_cnt = prog->func_info_cnt;
Martin KaFai Lau3d650142018-12-07 16:42:31 -08003695 load_attr.line_info = prog->line_info;
3696 load_attr.line_info_rec_size = prog->line_info_rec_size;
3697 load_attr.line_info_cnt = prog->line_info_cnt;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003698 load_attr.log_level = prog->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01003699 load_attr.prog_flags = prog->prog_flags;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07003700 load_attr.attach_btf_id = prog->attach_btf_id;
Wang Nan55cffde2015-07-01 02:14:07 +00003701
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003702retry_load:
3703 log_buf = malloc(log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00003704 if (!log_buf)
Kefeng Wangbe180102019-10-21 13:55:32 +08003705 pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
Wang Nan55cffde2015-07-01 02:14:07 +00003706
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003707 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
Wang Nan55cffde2015-07-01 02:14:07 +00003708
3709 if (ret >= 0) {
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003710 if (load_attr.log_level)
3711 pr_debug("verifier log:\n%s", log_buf);
Wang Nan55cffde2015-07-01 02:14:07 +00003712 *pfd = ret;
3713 ret = 0;
3714 goto out;
3715 }
3716
Alexei Starovoitovda11b412019-04-01 21:27:47 -07003717 if (errno == ENOSPC) {
3718 log_buf_size <<= 1;
3719 free(log_buf);
3720 goto retry_load;
3721 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00003722 ret = -LIBBPF_ERRNO__LOAD;
Andrey Ignatov24d6a802018-10-03 15:26:41 -07003723 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08003724 pr_warn("load bpf program failed: %s\n", cp);
Wang Nan55cffde2015-07-01 02:14:07 +00003725
Wang Nan6371ca3b2015-11-06 13:49:37 +00003726 if (log_buf && log_buf[0] != '\0') {
3727 ret = -LIBBPF_ERRNO__VERIFY;
Kefeng Wangbe180102019-10-21 13:55:32 +08003728 pr_warn("-- BEGIN DUMP LOG ---\n");
3729 pr_warn("\n%s\n", log_buf);
3730 pr_warn("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003731 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003732 pr_warn("Program too large (%zu insns), at most %d insns\n",
3733 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00003734 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00003735 } else {
Wang Nan705fa212016-07-13 10:44:02 +00003736 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003737 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00003738 int fd;
3739
Andrey Ignatovd7be1432018-03-30 15:08:01 -07003740 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
3741 load_attr.expected_attach_type = 0;
3742 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00003743 if (fd >= 0) {
3744 close(fd);
3745 ret = -LIBBPF_ERRNO__PROGTYPE;
3746 goto out;
3747 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00003748 }
Wang Nan705fa212016-07-13 10:44:02 +00003749
3750 if (log_buf)
3751 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00003752 }
3753
3754out:
3755 free(log_buf);
3756 return ret;
3757}
3758
Joe Stringer29cd77f2018-10-02 13:35:39 -07003759int
Wang Nan55cffde2015-07-01 02:14:07 +00003760bpf_program__load(struct bpf_program *prog,
Andrey Ignatove5b08632018-10-03 15:26:43 -07003761 char *license, __u32 kern_version)
Wang Nan55cffde2015-07-01 02:14:07 +00003762{
Wang Nanb5805632015-11-16 12:10:09 +00003763 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00003764
Wang Nanb5805632015-11-16 12:10:09 +00003765 if (prog->instances.nr < 0 || !prog->instances.fds) {
3766 if (prog->preprocessor) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003767 pr_warn("Internal error: can't load program '%s'\n",
3768 prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00003769 return -LIBBPF_ERRNO__INTERNAL;
3770 }
Wang Nan55cffde2015-07-01 02:14:07 +00003771
Wang Nanb5805632015-11-16 12:10:09 +00003772 prog->instances.fds = malloc(sizeof(int));
3773 if (!prog->instances.fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003774 pr_warn("Not enough memory for BPF fds\n");
Wang Nanb5805632015-11-16 12:10:09 +00003775 return -ENOMEM;
3776 }
3777 prog->instances.nr = 1;
3778 prog->instances.fds[0] = -1;
3779 }
3780
3781 if (!prog->preprocessor) {
3782 if (prog->instances.nr != 1) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003783 pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
3784 prog->section_name, prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +00003785 }
Yonghong Song2993e052018-11-19 15:29:16 -08003786 err = load_program(prog, prog->insns, prog->insns_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003787 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00003788 if (!err)
3789 prog->instances.fds[0] = fd;
3790 goto out;
3791 }
3792
3793 for (i = 0; i < prog->instances.nr; i++) {
3794 struct bpf_prog_prep_result result;
3795 bpf_program_prep_t preprocessor = prog->preprocessor;
3796
Andrii Nakryiko1ad9cbb2019-02-13 10:25:53 -08003797 memset(&result, 0, sizeof(result));
Wang Nanb5805632015-11-16 12:10:09 +00003798 err = preprocessor(prog, i, prog->insns,
3799 prog->insns_cnt, &result);
3800 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003801 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
3802 i, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00003803 goto out;
3804 }
3805
3806 if (!result.new_insn_ptr || !result.new_insn_cnt) {
3807 pr_debug("Skip loading the %dth instance of program '%s'\n",
3808 i, prog->section_name);
3809 prog->instances.fds[i] = -1;
3810 if (result.pfd)
3811 *result.pfd = -1;
3812 continue;
3813 }
3814
Yonghong Song2993e052018-11-19 15:29:16 -08003815 err = load_program(prog, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00003816 result.new_insn_cnt,
Martin KaFai Lauf0187f02018-12-07 16:42:29 -08003817 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00003818
3819 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003820 pr_warn("Loading the %dth instance of program '%s' failed\n",
3821 i, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00003822 goto out;
3823 }
3824
3825 if (result.pfd)
3826 *result.pfd = fd;
3827 prog->instances.fds[i] = fd;
3828 }
3829out:
Wang Nan55cffde2015-07-01 02:14:07 +00003830 if (err)
Kefeng Wangbe180102019-10-21 13:55:32 +08003831 pr_warn("failed to load program '%s'\n", prog->section_name);
Wang Nan55cffde2015-07-01 02:14:07 +00003832 zfree(&prog->insns);
3833 prog->insns_cnt = 0;
3834 return err;
3835}
3836
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07003837static bool bpf_program__is_function_storage(const struct bpf_program *prog,
3838 const struct bpf_object *obj)
Jakub Kicinski9a94f272018-06-28 14:41:38 -07003839{
3840 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
3841}
3842
Wang Nan55cffde2015-07-01 02:14:07 +00003843static int
Quentin Monnet60276f92019-05-24 11:36:47 +01003844bpf_object__load_progs(struct bpf_object *obj, int log_level)
Wang Nan55cffde2015-07-01 02:14:07 +00003845{
3846 size_t i;
3847 int err;
3848
3849 for (i = 0; i < obj->nr_programs; i++) {
Jakub Kicinski9a94f272018-06-28 14:41:38 -07003850 if (bpf_program__is_function_storage(&obj->programs[i], obj))
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08003851 continue;
Quentin Monnet501b1252019-05-29 15:26:41 +01003852 obj->programs[i].log_level |= log_level;
Wang Nan55cffde2015-07-01 02:14:07 +00003853 err = bpf_program__load(&obj->programs[i],
3854 obj->license,
3855 obj->kern_version);
3856 if (err)
3857 return err;
3858 }
3859 return 0;
3860}
3861
Alexei Starovoitov12a86542019-10-30 15:32:12 -07003862static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id);
3863
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003864static struct bpf_object *
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07003865__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003866 struct bpf_object_open_opts *opts)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003867{
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01003868 const char *pin_root_path;
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07003869 struct bpf_program *prog;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003870 struct bpf_object *obj;
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003871 const char *obj_name;
3872 char tmp_name[64];
3873 bool relaxed_maps;
Wang Nan6371ca3b2015-11-06 13:49:37 +00003874 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003875
3876 if (elf_version(EV_CURRENT) == EV_NONE) {
Kefeng Wangbe180102019-10-21 13:55:32 +08003877 pr_warn("failed to init libelf for %s\n",
3878 path ? : "(mem buf)");
Wang Nan6371ca3b2015-11-06 13:49:37 +00003879 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003880 }
3881
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003882 if (!OPTS_VALID(opts, bpf_object_open_opts))
3883 return ERR_PTR(-EINVAL);
3884
3885 obj_name = OPTS_GET(opts, object_name, path);
3886 if (obj_buf) {
3887 if (!obj_name) {
3888 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
3889 (unsigned long)obj_buf,
3890 (unsigned long)obj_buf_sz);
3891 obj_name = tmp_name;
3892 }
3893 path = obj_name;
3894 pr_debug("loading object '%s' from buffer\n", obj_name);
3895 }
3896
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003897 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003898 if (IS_ERR(obj))
3899 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003900
Andrii Nakryiko62561eb2019-10-15 11:28:47 -07003901 obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003902 relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01003903 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003904
Wang Nan6371ca3b2015-11-06 13:49:37 +00003905 CHECK_ERR(bpf_object__elf_init(obj), err, out);
3906 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
Daniel Borkmann8837fe52019-04-24 00:45:56 +02003907 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
Toke Høiland-Jørgensen57a00f42019-11-02 12:09:41 +01003908 CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
3909 err, out);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003910 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003911 bpf_object__elf_finish(obj);
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07003912
3913 bpf_object__for_each_program(prog, obj) {
3914 enum bpf_prog_type prog_type;
3915 enum bpf_attach_type attach_type;
Alexei Starovoitov12a86542019-10-30 15:32:12 -07003916 __u32 btf_id;
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07003917
3918 err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
3919 &attach_type);
3920 if (err == -ESRCH)
3921 /* couldn't guess, but user might manually specify */
3922 continue;
3923 if (err)
3924 goto out;
3925
3926 bpf_program__set_type(prog, prog_type);
3927 bpf_program__set_expected_attach_type(prog, attach_type);
Alexei Starovoitov12a86542019-10-30 15:32:12 -07003928 if (prog_type == BPF_PROG_TYPE_TRACING) {
3929 err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id);
3930 if (err)
3931 goto out;
3932 prog->attach_btf_id = btf_id;
3933 }
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07003934 }
3935
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003936 return obj;
3937out:
3938 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00003939 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003940}
3941
Andrii Nakryiko5e61f272019-10-04 15:40:34 -07003942static struct bpf_object *
3943__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003944{
Andrii Nakryikoe00aca62019-10-22 10:21:00 -07003945 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003946 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
3947 );
3948
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003949 /* param validation */
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003950 if (!attr->file)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00003951 return NULL;
3952
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003953 pr_debug("loading %s\n", attr->file);
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003954 return __bpf_object__open(attr->file, NULL, 0, &opts);
John Fastabendc034a172018-10-15 11:19:55 -07003955}
3956
3957struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
3958{
3959 return __bpf_object__open_xattr(attr, 0);
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07003960}
3961
3962struct bpf_object *bpf_object__open(const char *path)
3963{
3964 struct bpf_object_open_attr attr = {
3965 .file = path,
3966 .prog_type = BPF_PROG_TYPE_UNSPEC,
3967 };
3968
3969 return bpf_object__open_xattr(&attr);
Wang Nan6c956392015-07-01 02:13:54 +00003970}
3971
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003972struct bpf_object *
3973bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
3974{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003975 if (!path)
3976 return ERR_PTR(-EINVAL);
3977
3978 pr_debug("loading %s\n", path);
3979
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003980 return __bpf_object__open(path, NULL, 0, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003981}
3982
3983struct bpf_object *
3984bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
3985 struct bpf_object_open_opts *opts)
Wang Nan6c956392015-07-01 02:13:54 +00003986{
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003987 if (!obj_buf || obj_buf_sz == 0)
3988 return ERR_PTR(-EINVAL);
Wang Nan6c956392015-07-01 02:13:54 +00003989
Andrii Nakryiko291ee022019-10-15 11:28:46 -07003990 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003991}
3992
3993struct bpf_object *
3994bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
3995 const char *name)
3996{
Andrii Nakryikoe00aca62019-10-22 10:21:00 -07003997 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
Andrii Nakryiko2ce84502019-10-04 15:40:35 -07003998 .object_name = name,
3999 /* wrong default, but backwards-compatible */
4000 .relaxed_maps = true,
4001 );
4002
4003 /* returning NULL is wrong, but backwards-compatible */
4004 if (!obj_buf || obj_buf_sz == 0)
4005 return NULL;
4006
4007 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004008}
4009
Wang Nan52d33522015-07-01 02:14:04 +00004010int bpf_object__unload(struct bpf_object *obj)
4011{
4012 size_t i;
4013
4014 if (!obj)
4015 return -EINVAL;
4016
Wang Nan9d759a92015-11-27 08:47:35 +00004017 for (i = 0; i < obj->nr_maps; i++)
4018 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00004019
Wang Nan55cffde2015-07-01 02:14:07 +00004020 for (i = 0; i < obj->nr_programs; i++)
4021 bpf_program__unload(&obj->programs[i]);
4022
Wang Nan52d33522015-07-01 02:14:04 +00004023 return 0;
4024}
4025
Quentin Monnet60276f92019-05-24 11:36:47 +01004026int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
Wang Nan52d33522015-07-01 02:14:04 +00004027{
Quentin Monnet60276f92019-05-24 11:36:47 +01004028 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00004029 int err;
4030
Quentin Monnet60276f92019-05-24 11:36:47 +01004031 if (!attr)
4032 return -EINVAL;
4033 obj = attr->obj;
Wang Nan52d33522015-07-01 02:14:04 +00004034 if (!obj)
4035 return -EINVAL;
4036
4037 if (obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004038 pr_warn("object should not be loaded twice\n");
Wang Nan52d33522015-07-01 02:14:04 +00004039 return -EINVAL;
4040 }
4041
4042 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00004043
4044 CHECK_ERR(bpf_object__create_maps(obj), err, out);
Andrii Nakryikoddc7c302019-08-07 14:39:51 -07004045 CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
Quentin Monnet60276f92019-05-24 11:36:47 +01004046 CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00004047
4048 return 0;
4049out:
4050 bpf_object__unload(obj);
Kefeng Wangbe180102019-10-21 13:55:32 +08004051 pr_warn("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00004052 return err;
Wang Nan52d33522015-07-01 02:14:04 +00004053}
4054
Quentin Monnet60276f92019-05-24 11:36:47 +01004055int bpf_object__load(struct bpf_object *obj)
4056{
4057 struct bpf_object_load_attr attr = {
4058 .obj = obj,
4059 };
4060
4061 return bpf_object__load_xattr(&attr);
4062}
4063
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004064static int make_parent_dir(const char *path)
4065{
4066 char *cp, errmsg[STRERR_BUFSIZE];
4067 char *dname, *dir;
4068 int err = 0;
4069
4070 dname = strdup(path);
4071 if (dname == NULL)
4072 return -ENOMEM;
4073
4074 dir = dirname(dname);
4075 if (mkdir(dir, 0700) && errno != EEXIST)
4076 err = -errno;
4077
4078 free(dname);
4079 if (err) {
4080 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4081 pr_warn("failed to mkdir %s: %s\n", path, cp);
4082 }
4083 return err;
4084}
4085
Joe Stringerf3675402017-01-26 13:19:56 -08004086static int check_path(const char *path)
4087{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004088 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08004089 struct statfs st_fs;
4090 char *dname, *dir;
4091 int err = 0;
4092
4093 if (path == NULL)
4094 return -EINVAL;
4095
4096 dname = strdup(path);
4097 if (dname == NULL)
4098 return -ENOMEM;
4099
4100 dir = dirname(dname);
4101 if (statfs(dir, &st_fs)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004102 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08004103 pr_warn("failed to statfs %s: %s\n", dir, cp);
Joe Stringerf3675402017-01-26 13:19:56 -08004104 err = -errno;
4105 }
4106 free(dname);
4107
4108 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004109 pr_warn("specified path %s is not on BPF FS\n", path);
Joe Stringerf3675402017-01-26 13:19:56 -08004110 err = -EINVAL;
4111 }
4112
4113 return err;
4114}
4115
4116int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
4117 int instance)
4118{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004119 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerf3675402017-01-26 13:19:56 -08004120 int err;
4121
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004122 err = make_parent_dir(path);
4123 if (err)
4124 return err;
4125
Joe Stringerf3675402017-01-26 13:19:56 -08004126 err = check_path(path);
4127 if (err)
4128 return err;
4129
4130 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004131 pr_warn("invalid program pointer\n");
Joe Stringerf3675402017-01-26 13:19:56 -08004132 return -EINVAL;
4133 }
4134
4135 if (instance < 0 || instance >= prog->instances.nr) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004136 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
4137 instance, prog->section_name, prog->instances.nr);
Joe Stringerf3675402017-01-26 13:19:56 -08004138 return -EINVAL;
4139 }
4140
4141 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
Andrey Ignatov24d6a802018-10-03 15:26:41 -07004142 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
Kefeng Wangbe180102019-10-21 13:55:32 +08004143 pr_warn("failed to pin program: %s\n", cp);
Joe Stringerf3675402017-01-26 13:19:56 -08004144 return -errno;
4145 }
4146 pr_debug("pinned program '%s'\n", path);
4147
4148 return 0;
4149}
4150
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004151int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
4152 int instance)
4153{
4154 int err;
4155
4156 err = check_path(path);
4157 if (err)
4158 return err;
4159
4160 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004161 pr_warn("invalid program pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004162 return -EINVAL;
4163 }
4164
4165 if (instance < 0 || instance >= prog->instances.nr) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004166 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
4167 instance, prog->section_name, prog->instances.nr);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004168 return -EINVAL;
4169 }
4170
4171 err = unlink(path);
4172 if (err != 0)
4173 return -errno;
4174 pr_debug("unpinned program '%s'\n", path);
4175
4176 return 0;
4177}
4178
Joe Stringerf3675402017-01-26 13:19:56 -08004179int bpf_program__pin(struct bpf_program *prog, const char *path)
4180{
4181 int i, err;
4182
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004183 err = make_parent_dir(path);
4184 if (err)
4185 return err;
4186
Joe Stringerf3675402017-01-26 13:19:56 -08004187 err = check_path(path);
4188 if (err)
4189 return err;
4190
4191 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004192 pr_warn("invalid program pointer\n");
Joe Stringerf3675402017-01-26 13:19:56 -08004193 return -EINVAL;
4194 }
4195
4196 if (prog->instances.nr <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004197 pr_warn("no instances of prog %s to pin\n",
Joe Stringerf3675402017-01-26 13:19:56 -08004198 prog->section_name);
4199 return -EINVAL;
4200 }
4201
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08004202 if (prog->instances.nr == 1) {
4203 /* don't create subdirs when pinning single instance */
4204 return bpf_program__pin_instance(prog, path, 0);
4205 }
4206
Joe Stringerf3675402017-01-26 13:19:56 -08004207 for (i = 0; i < prog->instances.nr; i++) {
4208 char buf[PATH_MAX];
4209 int len;
4210
4211 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004212 if (len < 0) {
4213 err = -EINVAL;
4214 goto err_unpin;
4215 } else if (len >= PATH_MAX) {
4216 err = -ENAMETOOLONG;
4217 goto err_unpin;
4218 }
4219
4220 err = bpf_program__pin_instance(prog, buf, i);
4221 if (err)
4222 goto err_unpin;
4223 }
4224
4225 return 0;
4226
4227err_unpin:
4228 for (i = i - 1; i >= 0; i--) {
4229 char buf[PATH_MAX];
4230 int len;
4231
4232 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
4233 if (len < 0)
4234 continue;
4235 else if (len >= PATH_MAX)
4236 continue;
4237
4238 bpf_program__unpin_instance(prog, buf, i);
4239 }
4240
4241 rmdir(path);
4242
4243 return err;
4244}
4245
4246int bpf_program__unpin(struct bpf_program *prog, const char *path)
4247{
4248 int i, err;
4249
4250 err = check_path(path);
4251 if (err)
4252 return err;
4253
4254 if (prog == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004255 pr_warn("invalid program pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004256 return -EINVAL;
4257 }
4258
4259 if (prog->instances.nr <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004260 pr_warn("no instances of prog %s to pin\n",
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004261 prog->section_name);
4262 return -EINVAL;
4263 }
4264
Stanislav Fomichevfd734c52018-11-09 08:21:42 -08004265 if (prog->instances.nr == 1) {
4266 /* don't create subdirs when pinning single instance */
4267 return bpf_program__unpin_instance(prog, path, 0);
4268 }
4269
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004270 for (i = 0; i < prog->instances.nr; i++) {
4271 char buf[PATH_MAX];
4272 int len;
4273
4274 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
Joe Stringerf3675402017-01-26 13:19:56 -08004275 if (len < 0)
4276 return -EINVAL;
4277 else if (len >= PATH_MAX)
4278 return -ENAMETOOLONG;
4279
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004280 err = bpf_program__unpin_instance(prog, buf, i);
Joe Stringerf3675402017-01-26 13:19:56 -08004281 if (err)
4282 return err;
4283 }
4284
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004285 err = rmdir(path);
4286 if (err)
4287 return -errno;
4288
Joe Stringerf3675402017-01-26 13:19:56 -08004289 return 0;
4290}
4291
Joe Stringerb6989f32017-01-26 13:19:57 -08004292int bpf_map__pin(struct bpf_map *map, const char *path)
4293{
Thomas Richter1ce6a9f2018-07-30 10:53:23 +02004294 char *cp, errmsg[STRERR_BUFSIZE];
Joe Stringerb6989f32017-01-26 13:19:57 -08004295 int err;
4296
Joe Stringerb6989f32017-01-26 13:19:57 -08004297 if (map == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004298 pr_warn("invalid map pointer\n");
Joe Stringerb6989f32017-01-26 13:19:57 -08004299 return -EINVAL;
4300 }
4301
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004302 if (map->pin_path) {
4303 if (path && strcmp(path, map->pin_path)) {
4304 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
4305 bpf_map__name(map), map->pin_path, path);
4306 return -EINVAL;
4307 } else if (map->pinned) {
4308 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
4309 bpf_map__name(map), map->pin_path);
4310 return 0;
4311 }
4312 } else {
4313 if (!path) {
4314 pr_warn("missing a path to pin map '%s' at\n",
4315 bpf_map__name(map));
4316 return -EINVAL;
4317 } else if (map->pinned) {
4318 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
4319 return -EEXIST;
4320 }
4321
4322 map->pin_path = strdup(path);
4323 if (!map->pin_path) {
4324 err = -errno;
4325 goto out_err;
4326 }
Joe Stringerb6989f32017-01-26 13:19:57 -08004327 }
4328
Toke Høiland-Jørgensen196f8482019-11-02 12:09:39 +01004329 err = make_parent_dir(map->pin_path);
4330 if (err)
4331 return err;
4332
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004333 err = check_path(map->pin_path);
4334 if (err)
4335 return err;
4336
4337 if (bpf_obj_pin(map->fd, map->pin_path)) {
4338 err = -errno;
4339 goto out_err;
4340 }
4341
4342 map->pinned = true;
4343 pr_debug("pinned map '%s'\n", map->pin_path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004344
Joe Stringerb6989f32017-01-26 13:19:57 -08004345 return 0;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004346
4347out_err:
4348 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4349 pr_warn("failed to pin map: %s\n", cp);
4350 return err;
Joe Stringerb6989f32017-01-26 13:19:57 -08004351}
4352
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004353int bpf_map__unpin(struct bpf_map *map, const char *path)
Joe Stringerd5148d82017-01-26 13:19:58 -08004354{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004355 int err;
4356
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004357 if (map == NULL) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004358 pr_warn("invalid map pointer\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004359 return -EINVAL;
4360 }
4361
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004362 if (map->pin_path) {
4363 if (path && strcmp(path, map->pin_path)) {
4364 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
4365 bpf_map__name(map), map->pin_path, path);
4366 return -EINVAL;
4367 }
4368 path = map->pin_path;
4369 } else if (!path) {
4370 pr_warn("no path to unpin map '%s' from\n",
4371 bpf_map__name(map));
4372 return -EINVAL;
4373 }
4374
4375 err = check_path(path);
4376 if (err)
4377 return err;
4378
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004379 err = unlink(path);
4380 if (err != 0)
4381 return -errno;
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004382
4383 map->pinned = false;
4384 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004385
4386 return 0;
4387}
4388
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004389int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
4390{
4391 char *new = NULL;
4392
4393 if (path) {
4394 new = strdup(path);
4395 if (!new)
4396 return -errno;
4397 }
4398
4399 free(map->pin_path);
4400 map->pin_path = new;
4401 return 0;
4402}
4403
4404const char *bpf_map__get_pin_path(const struct bpf_map *map)
4405{
4406 return map->pin_path;
4407}
4408
4409bool bpf_map__is_pinned(const struct bpf_map *map)
4410{
4411 return map->pinned;
4412}
4413
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004414int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
4415{
Joe Stringerd5148d82017-01-26 13:19:58 -08004416 struct bpf_map *map;
4417 int err;
4418
4419 if (!obj)
4420 return -ENOENT;
4421
4422 if (!obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004423 pr_warn("object not yet loaded; load it first\n");
Joe Stringerd5148d82017-01-26 13:19:58 -08004424 return -ENOENT;
4425 }
4426
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08004427 bpf_object__for_each_map(map, obj) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004428 char *pin_path = NULL;
Joe Stringerd5148d82017-01-26 13:19:58 -08004429 char buf[PATH_MAX];
Joe Stringerd5148d82017-01-26 13:19:58 -08004430
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004431 if (path) {
4432 int len;
4433
4434 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4435 bpf_map__name(map));
4436 if (len < 0) {
4437 err = -EINVAL;
4438 goto err_unpin_maps;
4439 } else if (len >= PATH_MAX) {
4440 err = -ENAMETOOLONG;
4441 goto err_unpin_maps;
4442 }
4443 pin_path = buf;
4444 } else if (!map->pin_path) {
4445 continue;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004446 }
4447
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004448 err = bpf_map__pin(map, pin_path);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004449 if (err)
4450 goto err_unpin_maps;
4451 }
4452
4453 return 0;
4454
4455err_unpin_maps:
4456 while ((map = bpf_map__prev(map, obj))) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004457 if (!map->pin_path)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004458 continue;
4459
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004460 bpf_map__unpin(map, NULL);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004461 }
4462
4463 return err;
4464}
4465
4466int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
4467{
4468 struct bpf_map *map;
4469 int err;
4470
4471 if (!obj)
4472 return -ENOENT;
4473
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08004474 bpf_object__for_each_map(map, obj) {
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004475 char *pin_path = NULL;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004476 char buf[PATH_MAX];
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004477
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004478 if (path) {
4479 int len;
Joe Stringerd5148d82017-01-26 13:19:58 -08004480
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004481 len = snprintf(buf, PATH_MAX, "%s/%s", path,
4482 bpf_map__name(map));
4483 if (len < 0)
4484 return -EINVAL;
4485 else if (len >= PATH_MAX)
4486 return -ENAMETOOLONG;
4487 pin_path = buf;
4488 } else if (!map->pin_path) {
4489 continue;
4490 }
4491
4492 err = bpf_map__unpin(map, pin_path);
Joe Stringerd5148d82017-01-26 13:19:58 -08004493 if (err)
4494 return err;
4495 }
4496
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004497 return 0;
4498}
4499
4500int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
4501{
4502 struct bpf_program *prog;
4503 int err;
4504
4505 if (!obj)
4506 return -ENOENT;
4507
4508 if (!obj->loaded) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004509 pr_warn("object not yet loaded; load it first\n");
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004510 return -ENOENT;
4511 }
4512
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004513 bpf_object__for_each_program(prog, obj) {
4514 char buf[PATH_MAX];
4515 int len;
4516
4517 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08004518 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004519 if (len < 0) {
4520 err = -EINVAL;
4521 goto err_unpin_programs;
4522 } else if (len >= PATH_MAX) {
4523 err = -ENAMETOOLONG;
4524 goto err_unpin_programs;
4525 }
4526
4527 err = bpf_program__pin(prog, buf);
4528 if (err)
4529 goto err_unpin_programs;
4530 }
4531
4532 return 0;
4533
4534err_unpin_programs:
4535 while ((prog = bpf_program__prev(prog, obj))) {
4536 char buf[PATH_MAX];
4537 int len;
4538
4539 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08004540 prog->pin_name);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004541 if (len < 0)
4542 continue;
4543 else if (len >= PATH_MAX)
4544 continue;
4545
4546 bpf_program__unpin(prog, buf);
4547 }
4548
4549 return err;
4550}
4551
4552int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
4553{
4554 struct bpf_program *prog;
4555 int err;
4556
4557 if (!obj)
4558 return -ENOENT;
4559
Joe Stringerd5148d82017-01-26 13:19:58 -08004560 bpf_object__for_each_program(prog, obj) {
4561 char buf[PATH_MAX];
4562 int len;
4563
4564 len = snprintf(buf, PATH_MAX, "%s/%s", path,
Stanislav Fomichev33a2c752018-11-09 08:21:43 -08004565 prog->pin_name);
Joe Stringerd5148d82017-01-26 13:19:58 -08004566 if (len < 0)
4567 return -EINVAL;
4568 else if (len >= PATH_MAX)
4569 return -ENAMETOOLONG;
4570
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004571 err = bpf_program__unpin(prog, buf);
Joe Stringerd5148d82017-01-26 13:19:58 -08004572 if (err)
4573 return err;
4574 }
4575
4576 return 0;
4577}
4578
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004579int bpf_object__pin(struct bpf_object *obj, const char *path)
4580{
4581 int err;
4582
4583 err = bpf_object__pin_maps(obj, path);
4584 if (err)
4585 return err;
4586
4587 err = bpf_object__pin_programs(obj, path);
4588 if (err) {
4589 bpf_object__unpin_maps(obj, path);
4590 return err;
4591 }
4592
4593 return 0;
4594}
4595
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004596void bpf_object__close(struct bpf_object *obj)
4597{
Wang Nana5b8bd42015-07-01 02:14:00 +00004598 size_t i;
4599
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004600 if (!obj)
4601 return;
4602
Wang Nan10931d22016-11-26 07:03:26 +00004603 if (obj->clear_priv)
4604 obj->clear_priv(obj, obj->priv);
4605
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004606 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00004607 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004608 btf__free(obj->btf);
Yonghong Song2993e052018-11-19 15:29:16 -08004609 btf_ext__free(obj->btf_ext);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004610
Wang Nan9d759a92015-11-27 08:47:35 +00004611 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00004612 zfree(&obj->maps[i].name);
Toke Høiland-Jørgensen4580b252019-11-02 12:09:38 +01004613 zfree(&obj->maps[i].pin_path);
Wang Nan9d759a92015-11-27 08:47:35 +00004614 if (obj->maps[i].clear_priv)
4615 obj->maps[i].clear_priv(&obj->maps[i],
4616 obj->maps[i].priv);
4617 obj->maps[i].priv = NULL;
4618 obj->maps[i].clear_priv = NULL;
4619 }
Daniel Borkmannd8599002019-04-09 23:20:13 +02004620
4621 zfree(&obj->sections.rodata);
4622 zfree(&obj->sections.data);
Wang Nan9d759a92015-11-27 08:47:35 +00004623 zfree(&obj->maps);
4624 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00004625
4626 if (obj->programs && obj->nr_programs) {
4627 for (i = 0; i < obj->nr_programs; i++)
4628 bpf_program__exit(&obj->programs[i]);
4629 }
4630 zfree(&obj->programs);
4631
Wang Nan9a208ef2015-07-01 02:14:10 +00004632 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00004633 free(obj);
4634}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004635
Wang Nan9a208ef2015-07-01 02:14:10 +00004636struct bpf_object *
4637bpf_object__next(struct bpf_object *prev)
4638{
4639 struct bpf_object *next;
4640
4641 if (!prev)
4642 next = list_first_entry(&bpf_objects_list,
4643 struct bpf_object,
4644 list);
4645 else
4646 next = list_next_entry(prev, list);
4647
4648 /* Empty list is noticed here so don't need checking on entry. */
4649 if (&next->list == &bpf_objects_list)
4650 return NULL;
4651
4652 return next;
4653}
4654
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004655const char *bpf_object__name(const struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00004656{
Andrii Nakryikoc9e4c302019-10-04 15:40:36 -07004657 return obj ? obj->name : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00004658}
4659
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004660unsigned int bpf_object__kversion(const struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00004661{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03004662 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00004663}
4664
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004665struct btf *bpf_object__btf(const struct bpf_object *obj)
Andrey Ignatov789f6ba2019-02-14 15:01:43 -08004666{
4667 return obj ? obj->btf : NULL;
4668}
4669
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07004670int bpf_object__btf_fd(const struct bpf_object *obj)
4671{
4672 return obj->btf ? btf__fd(obj->btf) : -1;
4673}
4674
Wang Nan10931d22016-11-26 07:03:26 +00004675int bpf_object__set_priv(struct bpf_object *obj, void *priv,
4676 bpf_object_clear_priv_t clear_priv)
4677{
4678 if (obj->priv && obj->clear_priv)
4679 obj->clear_priv(obj, obj->priv);
4680
4681 obj->priv = priv;
4682 obj->clear_priv = clear_priv;
4683 return 0;
4684}
4685
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004686void *bpf_object__priv(const struct bpf_object *obj)
Wang Nan10931d22016-11-26 07:03:26 +00004687{
4688 return obj ? obj->priv : ERR_PTR(-EINVAL);
4689}
4690
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004691static struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004692__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
4693 bool forward)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004694{
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004695 size_t nr_programs = obj->nr_programs;
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004696 ssize_t idx;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004697
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004698 if (!nr_programs)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004699 return NULL;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004700
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004701 if (!p)
4702 /* Iter from the beginning */
4703 return forward ? &obj->programs[0] :
4704 &obj->programs[nr_programs - 1];
4705
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004706 if (p->obj != obj) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004707 pr_warn("error: program handler doesn't match object\n");
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004708 return NULL;
4709 }
4710
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004711 idx = (p - obj->programs) + (forward ? 1 : -1);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004712 if (idx >= obj->nr_programs || idx < 0)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004713 return NULL;
4714 return &obj->programs[idx];
4715}
4716
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004717struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004718bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004719{
4720 struct bpf_program *prog = prev;
4721
4722 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004723 prog = __bpf_program__iter(prog, obj, true);
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004724 } while (prog && bpf_program__is_function_storage(prog, obj));
4725
4726 return prog;
4727}
4728
4729struct bpf_program *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004730bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004731{
4732 struct bpf_program *prog = next;
4733
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08004734 do {
Martin KaFai Laua83d6e72018-11-12 15:44:53 -08004735 prog = __bpf_program__iter(prog, obj, false);
Jakub Kicinskieac7d842018-06-28 14:41:39 -07004736 } while (prog && bpf_program__is_function_storage(prog, obj));
4737
4738 return prog;
4739}
4740
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03004741int bpf_program__set_priv(struct bpf_program *prog, void *priv,
4742 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004743{
4744 if (prog->priv && prog->clear_priv)
4745 prog->clear_priv(prog, prog->priv);
4746
4747 prog->priv = priv;
4748 prog->clear_priv = clear_priv;
4749 return 0;
4750}
4751
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004752void *bpf_program__priv(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004753{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03004754 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004755}
4756
Jakub Kicinski9aba3612018-06-28 14:41:37 -07004757void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
4758{
4759 prog->prog_ifindex = ifindex;
4760}
4761
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004762const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004763{
4764 const char *title;
4765
4766 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09004767 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004768 title = strdup(title);
4769 if (!title) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004770 pr_warn("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00004771 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004772 }
4773 }
4774
4775 return title;
4776}
4777
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004778int bpf_program__fd(const struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004779{
Wang Nanb5805632015-11-16 12:10:09 +00004780 return bpf_program__nth_fd(prog, 0);
4781}
4782
4783int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
4784 bpf_program_prep_t prep)
4785{
4786 int *instances_fds;
4787
4788 if (nr_instances <= 0 || !prep)
4789 return -EINVAL;
4790
4791 if (prog->instances.nr > 0 || prog->instances.fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004792 pr_warn("Can't set pre-processor after loading\n");
Wang Nanb5805632015-11-16 12:10:09 +00004793 return -EINVAL;
4794 }
4795
4796 instances_fds = malloc(sizeof(int) * nr_instances);
4797 if (!instances_fds) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004798 pr_warn("alloc memory failed for fds\n");
Wang Nanb5805632015-11-16 12:10:09 +00004799 return -ENOMEM;
4800 }
4801
4802 /* fill all fd with -1 */
4803 memset(instances_fds, -1, sizeof(int) * nr_instances);
4804
4805 prog->instances.nr = nr_instances;
4806 prog->instances.fds = instances_fds;
4807 prog->preprocessor = prep;
4808 return 0;
4809}
4810
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004811int bpf_program__nth_fd(const struct bpf_program *prog, int n)
Wang Nanb5805632015-11-16 12:10:09 +00004812{
4813 int fd;
4814
Jakub Kicinski1e960042018-07-26 14:32:18 -07004815 if (!prog)
4816 return -EINVAL;
4817
Wang Nanb5805632015-11-16 12:10:09 +00004818 if (n >= prog->instances.nr || n < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004819 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
4820 n, prog->section_name, prog->instances.nr);
Wang Nanb5805632015-11-16 12:10:09 +00004821 return -EINVAL;
4822 }
4823
4824 fd = prog->instances.fds[n];
4825 if (fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08004826 pr_warn("%dth instance of program '%s' is invalid\n",
4827 n, prog->section_name);
Wang Nanb5805632015-11-16 12:10:09 +00004828 return -ENOENT;
4829 }
4830
4831 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00004832}
Wang Nan9d759a92015-11-27 08:47:35 +00004833
Andrii Nakryikof1eead92019-10-20 20:38:57 -07004834enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
4835{
4836 return prog->type;
4837}
4838
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07004839void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00004840{
4841 prog->type = type;
4842}
4843
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004844static bool bpf_program__is_type(const struct bpf_program *prog,
Wang Nan5f44e4c82016-07-13 10:44:01 +00004845 enum bpf_prog_type type)
4846{
4847 return prog ? (prog->type == type) : false;
4848}
4849
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07004850#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
4851int bpf_program__set_##NAME(struct bpf_program *prog) \
4852{ \
4853 if (!prog) \
4854 return -EINVAL; \
4855 bpf_program__set_type(prog, TYPE); \
4856 return 0; \
4857} \
4858 \
4859bool bpf_program__is_##NAME(const struct bpf_program *prog) \
4860{ \
4861 return bpf_program__is_type(prog, TYPE); \
4862} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00004863
Joe Stringer7803ba72017-01-22 17:11:24 -08004864BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08004865BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08004866BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
4867BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08004868BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93fd2018-04-17 10:28:46 -07004869BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08004870BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
4871BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Alexei Starovoitov12a86542019-10-30 15:32:12 -07004872BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
Wang Nan5f44e4c82016-07-13 10:44:01 +00004873
Andrii Nakryikof1eead92019-10-20 20:38:57 -07004874enum bpf_attach_type
4875bpf_program__get_expected_attach_type(struct bpf_program *prog)
4876{
4877 return prog->expected_attach_type;
4878}
4879
John Fastabend16962b22018-04-23 14:30:38 -07004880void bpf_program__set_expected_attach_type(struct bpf_program *prog,
4881 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004882{
4883 prog->expected_attach_type = type;
4884}
4885
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004886#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
4887 { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004888
Andrey Ignatov956b6202018-09-26 15:24:53 -07004889/* Programs that can NOT be attached. */
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004890#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004891
Andrey Ignatov956b6202018-09-26 15:24:53 -07004892/* Programs that can be attached. */
4893#define BPF_APROG_SEC(string, ptype, atype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004894 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
Andrey Ignatov81efee72018-04-17 10:28:45 -07004895
Andrey Ignatov956b6202018-09-26 15:24:53 -07004896/* Programs that must specify expected attach type at load time. */
4897#define BPF_EAPROG_SEC(string, ptype, eatype) \
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004898 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
4899
4900/* Programs that use BTF to identify attach point */
Alexei Starovoitov12a86542019-10-30 15:32:12 -07004901#define BPF_PROG_BTF(string, ptype, eatype) \
4902 BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
Andrey Ignatov956b6202018-09-26 15:24:53 -07004903
4904/* Programs that can be attached but attach type can't be identified by section
4905 * name. Kept for backward compatibility.
4906 */
4907#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
Andrey Ignatove50b0a62018-03-30 15:08:03 -07004908
Roman Gushchin583c9002017-12-13 15:18:51 +00004909static const struct {
4910 const char *sec;
4911 size_t len;
4912 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004913 enum bpf_attach_type expected_attach_type;
Alexei Starovoitovf75a6972019-10-15 20:24:59 -07004914 bool is_attachable;
4915 bool is_attach_btf;
Andrey Ignatov956b6202018-09-26 15:24:53 -07004916 enum bpf_attach_type attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00004917} section_names[] = {
Andrey Ignatov956b6202018-09-26 15:24:53 -07004918 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
4919 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07004920 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004921 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07004922 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004923 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
4924 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
4925 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07004926 BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004927 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Andrii Nakryiko32dff6d2019-10-20 20:38:58 -07004928 BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Alexei Starovoitov12a86542019-10-30 15:32:12 -07004929 BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING,
4930 BPF_TRACE_RAW_TP),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004931 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
4932 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
4933 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
4934 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
4935 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
4936 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Andrey Ignatovbafa7af2018-09-26 15:24:54 -07004937 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
4938 BPF_CGROUP_INET_INGRESS),
4939 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
4940 BPF_CGROUP_INET_EGRESS),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004941 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
4942 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
4943 BPF_CGROUP_INET_SOCK_CREATE),
4944 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
4945 BPF_CGROUP_INET4_POST_BIND),
4946 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
4947 BPF_CGROUP_INET6_POST_BIND),
4948 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
4949 BPF_CGROUP_DEVICE),
4950 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
4951 BPF_CGROUP_SOCK_OPS),
Andrey Ignatovc6f68512018-09-26 15:24:55 -07004952 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
4953 BPF_SK_SKB_STREAM_PARSER),
4954 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
4955 BPF_SK_SKB_STREAM_VERDICT),
Andrey Ignatov956b6202018-09-26 15:24:53 -07004956 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
4957 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
4958 BPF_SK_MSG_VERDICT),
4959 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
4960 BPF_LIRC_MODE2),
4961 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
4962 BPF_FLOW_DISSECTOR),
4963 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4964 BPF_CGROUP_INET4_BIND),
4965 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4966 BPF_CGROUP_INET6_BIND),
4967 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4968 BPF_CGROUP_INET4_CONNECT),
4969 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4970 BPF_CGROUP_INET6_CONNECT),
4971 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4972 BPF_CGROUP_UDP4_SENDMSG),
4973 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4974 BPF_CGROUP_UDP6_SENDMSG),
Daniel Borkmann9bb59ac2019-06-07 01:48:59 +02004975 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4976 BPF_CGROUP_UDP4_RECVMSG),
4977 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4978 BPF_CGROUP_UDP6_RECVMSG),
Andrey Ignatov063cc9f2019-03-08 09:15:26 -08004979 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
4980 BPF_CGROUP_SYSCTL),
Stanislav Fomichev4cdbfb52019-06-27 13:38:49 -07004981 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
4982 BPF_CGROUP_GETSOCKOPT),
4983 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
4984 BPF_CGROUP_SETSOCKOPT),
Roman Gushchin583c9002017-12-13 15:18:51 +00004985};
Roman Gushchin583c9002017-12-13 15:18:51 +00004986
Andrey Ignatov956b6202018-09-26 15:24:53 -07004987#undef BPF_PROG_SEC_IMPL
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004988#undef BPF_PROG_SEC
Andrey Ignatov956b6202018-09-26 15:24:53 -07004989#undef BPF_APROG_SEC
4990#undef BPF_EAPROG_SEC
4991#undef BPF_APROG_COMPAT
Andrey Ignatovd7be1432018-03-30 15:08:01 -07004992
Taeung Songc76e4c22019-01-21 22:06:38 +09004993#define MAX_TYPE_NAME_SIZE 32
4994
4995static char *libbpf_get_type_names(bool attach_type)
4996{
4997 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
4998 char *buf;
4999
5000 buf = malloc(len);
5001 if (!buf)
5002 return NULL;
5003
5004 buf[0] = '\0';
5005 /* Forge string buf with all available names */
5006 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
5007 if (attach_type && !section_names[i].is_attachable)
5008 continue;
5009
5010 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
5011 free(buf);
5012 return NULL;
5013 }
5014 strcat(buf, " ");
5015 strcat(buf, section_names[i].sec);
5016 }
5017
5018 return buf;
5019}
5020
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005021int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
5022 enum bpf_attach_type *expected_attach_type)
Roman Gushchin583c9002017-12-13 15:18:51 +00005023{
Taeung Songc76e4c22019-01-21 22:06:38 +09005024 char *type_names;
Roman Gushchin583c9002017-12-13 15:18:51 +00005025 int i;
5026
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005027 if (!name)
5028 return -EINVAL;
Roman Gushchin583c9002017-12-13 15:18:51 +00005029
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005030 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
5031 if (strncmp(name, section_names[i].sec, section_names[i].len))
5032 continue;
5033 *prog_type = section_names[i].prog_type;
5034 *expected_attach_type = section_names[i].expected_attach_type;
5035 return 0;
5036 }
Kefeng Wangbe180102019-10-21 13:55:32 +08005037 pr_warn("failed to guess program type based on ELF section name '%s'\n", name);
Taeung Songc76e4c22019-01-21 22:06:38 +09005038 type_names = libbpf_get_type_names(false);
5039 if (type_names != NULL) {
5040 pr_info("supported section(type) names are:%s\n", type_names);
5041 free(type_names);
5042 }
5043
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005044 return -ESRCH;
Jakub Kicinskib60df2a2018-07-10 14:42:59 -07005045}
Roman Gushchin583c9002017-12-13 15:18:51 +00005046
Alexei Starovoitov12a86542019-10-30 15:32:12 -07005047#define BTF_PREFIX "btf_trace_"
5048static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id)
5049{
5050 struct btf *btf = bpf_core_find_kernel_btf();
5051 char raw_tp_btf_name[128] = BTF_PREFIX;
5052 char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1;
5053 int ret, i, err = -EINVAL;
5054
5055 if (IS_ERR(btf)) {
5056 pr_warn("vmlinux BTF is not found\n");
5057 return -EINVAL;
5058 }
5059
5060 if (!name)
5061 goto out;
5062
5063 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
5064 if (!section_names[i].is_attach_btf)
5065 continue;
5066 if (strncmp(name, section_names[i].sec, section_names[i].len))
5067 continue;
5068 /* prepend "btf_trace_" prefix per kernel convention */
5069 strncat(dst, name + section_names[i].len,
5070 sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX));
5071 ret = btf__find_by_name(btf, raw_tp_btf_name);
5072 if (ret <= 0) {
5073 pr_warn("%s is not found in vmlinux BTF\n", dst);
5074 goto out;
5075 }
5076 *btf_id = ret;
5077 err = 0;
5078 goto out;
5079 }
5080 pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
5081 err = -ESRCH;
5082out:
5083 btf__free(btf);
5084 return err;
5085}
5086
Andrey Ignatov956b6202018-09-26 15:24:53 -07005087int libbpf_attach_type_by_name(const char *name,
5088 enum bpf_attach_type *attach_type)
5089{
Taeung Songc76e4c22019-01-21 22:06:38 +09005090 char *type_names;
Andrey Ignatov956b6202018-09-26 15:24:53 -07005091 int i;
5092
5093 if (!name)
5094 return -EINVAL;
5095
5096 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
5097 if (strncmp(name, section_names[i].sec, section_names[i].len))
5098 continue;
Andrey Ignatov36153532018-10-31 12:57:18 -07005099 if (!section_names[i].is_attachable)
Andrey Ignatov956b6202018-09-26 15:24:53 -07005100 return -EINVAL;
5101 *attach_type = section_names[i].attach_type;
5102 return 0;
5103 }
Kefeng Wangbe180102019-10-21 13:55:32 +08005104 pr_warn("failed to guess attach type based on ELF section name '%s'\n", name);
Taeung Songc76e4c22019-01-21 22:06:38 +09005105 type_names = libbpf_get_type_names(true);
5106 if (type_names != NULL) {
5107 pr_info("attachable section(type) names are:%s\n", type_names);
5108 free(type_names);
5109 }
5110
Andrey Ignatov956b6202018-09-26 15:24:53 -07005111 return -EINVAL;
5112}
5113
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005114int bpf_map__fd(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00005115{
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03005116 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00005117}
5118
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005119const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00005120{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03005121 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00005122}
5123
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005124const char *bpf_map__name(const struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00005125{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03005126 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00005127}
5128
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07005129__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005130{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07005131 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005132}
5133
Martin KaFai Lau5b891af2018-07-24 08:40:21 -07005134__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005135{
Martin KaFai Lau61746dbe2018-05-22 15:04:24 -07005136 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07005137}
5138
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03005139int bpf_map__set_priv(struct bpf_map *map, void *priv,
5140 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00005141{
5142 if (!map)
5143 return -EINVAL;
5144
5145 if (map->priv) {
5146 if (map->clear_priv)
5147 map->clear_priv(map, map->priv);
5148 }
5149
5150 map->priv = priv;
5151 map->clear_priv = clear_priv;
5152 return 0;
5153}
5154
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005155void *bpf_map__priv(const struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00005156{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03005157 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00005158}
5159
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005160bool bpf_map__is_offload_neutral(const struct bpf_map *map)
Jakub Kicinskif83fb222018-07-10 14:43:01 -07005161{
5162 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
5163}
5164
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005165bool bpf_map__is_internal(const struct bpf_map *map)
Daniel Borkmannd8599002019-04-09 23:20:13 +02005166{
5167 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
5168}
5169
Jakub Kicinski9aba3612018-06-28 14:41:37 -07005170void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
5171{
5172 map->map_ifindex = ifindex;
5173}
5174
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08005175int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
5176{
5177 if (!bpf_map_type__is_map_in_map(map->def.type)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005178 pr_warn("error: unsupported map type\n");
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08005179 return -EINVAL;
5180 }
5181 if (map->inner_map_fd != -1) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005182 pr_warn("error: inner_map_fd already specified\n");
Nikita V. Shirokovaddb9fc2018-11-20 20:55:56 -08005183 return -EINVAL;
5184 }
5185 map->inner_map_fd = fd;
5186 return 0;
5187}
5188
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005189static struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005190__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
Wang Nan9d759a92015-11-27 08:47:35 +00005191{
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005192 ssize_t idx;
Wang Nan9d759a92015-11-27 08:47:35 +00005193 struct bpf_map *s, *e;
5194
5195 if (!obj || !obj->maps)
5196 return NULL;
5197
5198 s = obj->maps;
5199 e = obj->maps + obj->nr_maps;
5200
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005201 if ((m < s) || (m >= e)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005202 pr_warn("error in %s: map handler doesn't belong to object\n",
5203 __func__);
Wang Nan9d759a92015-11-27 08:47:35 +00005204 return NULL;
5205 }
5206
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005207 idx = (m - obj->maps) + i;
5208 if (idx >= obj->nr_maps || idx < 0)
Wang Nan9d759a92015-11-27 08:47:35 +00005209 return NULL;
5210 return &obj->maps[idx];
5211}
Wang Nan561bbcc2015-11-27 08:47:36 +00005212
5213struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005214bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005215{
5216 if (prev == NULL)
5217 return obj->maps;
5218
5219 return __bpf_map__iter(prev, obj, 1);
5220}
5221
5222struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005223bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
Stanislav Fomichev0c19a9f2018-11-09 08:21:41 -08005224{
5225 if (next == NULL) {
5226 if (!obj->nr_maps)
5227 return NULL;
5228 return obj->maps + obj->nr_maps - 1;
5229 }
5230
5231 return __bpf_map__iter(next, obj, -1);
5232}
5233
5234struct bpf_map *
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005235bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00005236{
5237 struct bpf_map *pos;
5238
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08005239 bpf_object__for_each_map(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00005240 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00005241 return pos;
5242 }
5243 return NULL;
5244}
Wang Nan5a6acad2016-11-26 07:03:27 +00005245
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01005246int
Andrii Nakryikoa324aae2019-06-17 15:48:58 -07005247bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
Maciej Fijalkowskif3cea322019-02-01 22:42:23 +01005248{
5249 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
5250}
5251
Wang Nan5a6acad2016-11-26 07:03:27 +00005252struct bpf_map *
5253bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
5254{
Andrii Nakryikodb488142019-06-17 12:26:54 -07005255 return ERR_PTR(-ENOTSUP);
Wang Nan5a6acad2016-11-26 07:03:27 +00005256}
Joe Stringere28ff1a2017-01-22 17:11:25 -08005257
5258long libbpf_get_error(const void *ptr)
5259{
Hariprasad Kelamd98363b2019-05-25 14:32:57 +05305260 return PTR_ERR_OR_ZERO(ptr);
Joe Stringere28ff1a2017-01-22 17:11:25 -08005261}
John Fastabend6f6d33f2017-08-15 22:34:22 -07005262
5263int bpf_prog_load(const char *file, enum bpf_prog_type type,
5264 struct bpf_object **pobj, int *prog_fd)
5265{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005266 struct bpf_prog_load_attr attr;
5267
5268 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
5269 attr.file = file;
5270 attr.prog_type = type;
5271 attr.expected_attach_type = 0;
5272
5273 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
5274}
5275
5276int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
5277 struct bpf_object **pobj, int *prog_fd)
5278{
Leo Yan33bae182019-07-02 18:25:31 +08005279 struct bpf_object_open_attr open_attr = {};
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005280 struct bpf_program *prog, *first_prog = NULL;
John Fastabend6f6d33f2017-08-15 22:34:22 -07005281 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07005282 struct bpf_map *map;
John Fastabend6f6d33f2017-08-15 22:34:22 -07005283 int err;
5284
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005285 if (!attr)
5286 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07005287 if (!attr->file)
5288 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07005289
Leo Yan33bae182019-07-02 18:25:31 +08005290 open_attr.file = attr->file;
5291 open_attr.prog_type = attr->prog_type;
5292
Jakub Kicinski07f2d4e2018-07-10 14:43:02 -07005293 obj = bpf_object__open_xattr(&open_attr);
Jakub Kicinski35976832018-05-10 10:09:34 -07005294 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07005295 return -ENOENT;
5296
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005297 bpf_object__for_each_program(prog, obj) {
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005298 enum bpf_attach_type attach_type = attr->expected_attach_type;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005299 /*
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005300 * to preserve backwards compatibility, bpf_prog_load treats
5301 * attr->prog_type, if specified, as an override to whatever
5302 * bpf_object__open guessed
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005303 */
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005304 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
5305 bpf_program__set_type(prog, attr->prog_type);
5306 bpf_program__set_expected_attach_type(prog,
5307 attach_type);
5308 }
5309 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
5310 /*
5311 * we haven't guessed from section name and user
5312 * didn't provide a fallback type, too bad...
5313 */
5314 bpf_object__close(obj);
5315 return -EINVAL;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005316 }
5317
Andrii Nakryikodd4436b2019-10-20 20:38:59 -07005318 prog->prog_ifindex = attr->ifindex;
Alexei Starovoitovda11b412019-04-01 21:27:47 -07005319 prog->log_level = attr->log_level;
Jiong Wang04656192019-05-24 23:25:19 +01005320 prog->prog_flags = attr->prog_flags;
Taeung Song69495d22018-09-03 08:30:07 +09005321 if (!first_prog)
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005322 first_prog = prog;
5323 }
5324
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08005325 bpf_object__for_each_map(map, obj) {
Jakub Kicinskif83fb222018-07-10 14:43:01 -07005326 if (!bpf_map__is_offload_neutral(map))
5327 map->map_ifindex = attr->ifindex;
David Beckettf0307a72018-05-16 14:02:49 -07005328 }
5329
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005330 if (!first_prog) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005331 pr_warn("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07005332 bpf_object__close(obj);
5333 return -ENOENT;
5334 }
5335
John Fastabend6f6d33f2017-08-15 22:34:22 -07005336 err = bpf_object__load(obj);
5337 if (err) {
5338 bpf_object__close(obj);
5339 return -EINVAL;
5340 }
5341
5342 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08005343 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07005344 return 0;
5345}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005346
Andrii Nakryiko1c2e9ef2019-07-01 16:58:56 -07005347struct bpf_link {
5348 int (*destroy)(struct bpf_link *link);
5349};
5350
5351int bpf_link__destroy(struct bpf_link *link)
5352{
5353 int err;
5354
5355 if (!link)
5356 return 0;
5357
5358 err = link->destroy(link);
5359 free(link);
5360
5361 return err;
5362}
5363
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07005364struct bpf_link_fd {
5365 struct bpf_link link; /* has to be at the top of struct */
5366 int fd; /* hook FD */
5367};
5368
5369static int bpf_link__destroy_perf_event(struct bpf_link *link)
5370{
5371 struct bpf_link_fd *l = (void *)link;
5372 int err;
5373
5374 err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
5375 if (err)
5376 err = -errno;
5377
5378 close(l->fd);
5379 return err;
5380}
5381
5382struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
5383 int pfd)
5384{
5385 char errmsg[STRERR_BUFSIZE];
5386 struct bpf_link_fd *link;
5387 int prog_fd, err;
5388
5389 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005390 pr_warn("program '%s': invalid perf event FD %d\n",
5391 bpf_program__title(prog, false), pfd);
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07005392 return ERR_PTR(-EINVAL);
5393 }
5394 prog_fd = bpf_program__fd(prog);
5395 if (prog_fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005396 pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
5397 bpf_program__title(prog, false));
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07005398 return ERR_PTR(-EINVAL);
5399 }
5400
5401 link = malloc(sizeof(*link));
5402 if (!link)
5403 return ERR_PTR(-ENOMEM);
5404 link->link.destroy = &bpf_link__destroy_perf_event;
5405 link->fd = pfd;
5406
5407 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
5408 err = -errno;
5409 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08005410 pr_warn("program '%s': failed to attach to pfd %d: %s\n",
5411 bpf_program__title(prog, false), pfd,
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07005412 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5413 return ERR_PTR(err);
5414 }
5415 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5416 err = -errno;
5417 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08005418 pr_warn("program '%s': failed to enable pfd %d: %s\n",
5419 bpf_program__title(prog, false), pfd,
Andrii Nakryiko63f2f5e2019-07-01 16:58:57 -07005420 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5421 return ERR_PTR(err);
5422 }
5423 return (struct bpf_link *)link;
5424}
5425
Andrii Nakryikob2650022019-07-01 16:58:58 -07005426/*
5427 * this function is expected to parse integer in the range of [0, 2^31-1] from
5428 * given file using scanf format string fmt. If actual parsed value is
5429 * negative, the result might be indistinguishable from error
5430 */
5431static int parse_uint_from_file(const char *file, const char *fmt)
5432{
5433 char buf[STRERR_BUFSIZE];
5434 int err, ret;
5435 FILE *f;
5436
5437 f = fopen(file, "r");
5438 if (!f) {
5439 err = -errno;
5440 pr_debug("failed to open '%s': %s\n", file,
5441 libbpf_strerror_r(err, buf, sizeof(buf)));
5442 return err;
5443 }
5444 err = fscanf(f, fmt, &ret);
5445 if (err != 1) {
5446 err = err == EOF ? -EIO : -errno;
5447 pr_debug("failed to parse '%s': %s\n", file,
5448 libbpf_strerror_r(err, buf, sizeof(buf)));
5449 fclose(f);
5450 return err;
5451 }
5452 fclose(f);
5453 return ret;
5454}
5455
5456static int determine_kprobe_perf_type(void)
5457{
5458 const char *file = "/sys/bus/event_source/devices/kprobe/type";
5459
5460 return parse_uint_from_file(file, "%d\n");
5461}
5462
5463static int determine_uprobe_perf_type(void)
5464{
5465 const char *file = "/sys/bus/event_source/devices/uprobe/type";
5466
5467 return parse_uint_from_file(file, "%d\n");
5468}
5469
5470static int determine_kprobe_retprobe_bit(void)
5471{
5472 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
5473
5474 return parse_uint_from_file(file, "config:%d\n");
5475}
5476
5477static int determine_uprobe_retprobe_bit(void)
5478{
5479 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
5480
5481 return parse_uint_from_file(file, "config:%d\n");
5482}
5483
5484static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5485 uint64_t offset, int pid)
5486{
5487 struct perf_event_attr attr = {};
5488 char errmsg[STRERR_BUFSIZE];
5489 int type, pfd, err;
5490
5491 type = uprobe ? determine_uprobe_perf_type()
5492 : determine_kprobe_perf_type();
5493 if (type < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005494 pr_warn("failed to determine %s perf type: %s\n",
5495 uprobe ? "uprobe" : "kprobe",
5496 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005497 return type;
5498 }
5499 if (retprobe) {
5500 int bit = uprobe ? determine_uprobe_retprobe_bit()
5501 : determine_kprobe_retprobe_bit();
5502
5503 if (bit < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005504 pr_warn("failed to determine %s retprobe bit: %s\n",
5505 uprobe ? "uprobe" : "kprobe",
5506 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005507 return bit;
5508 }
5509 attr.config |= 1 << bit;
5510 }
5511 attr.size = sizeof(attr);
5512 attr.type = type;
Andrii Nakryiko36db2a92019-07-08 21:00:07 -07005513 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
5514 attr.config2 = offset; /* kprobe_addr or probe_offset */
Andrii Nakryikob2650022019-07-01 16:58:58 -07005515
5516 /* pid filter is meaningful only for uprobes */
5517 pfd = syscall(__NR_perf_event_open, &attr,
5518 pid < 0 ? -1 : pid /* pid */,
5519 pid == -1 ? 0 : -1 /* cpu */,
5520 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5521 if (pfd < 0) {
5522 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005523 pr_warn("%s perf_event_open() failed: %s\n",
5524 uprobe ? "uprobe" : "kprobe",
5525 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005526 return err;
5527 }
5528 return pfd;
5529}
5530
5531struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
5532 bool retprobe,
5533 const char *func_name)
5534{
5535 char errmsg[STRERR_BUFSIZE];
5536 struct bpf_link *link;
5537 int pfd, err;
5538
5539 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
5540 0 /* offset */, -1 /* pid */);
5541 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005542 pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
5543 bpf_program__title(prog, false),
5544 retprobe ? "kretprobe" : "kprobe", func_name,
5545 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005546 return ERR_PTR(pfd);
5547 }
5548 link = bpf_program__attach_perf_event(prog, pfd);
5549 if (IS_ERR(link)) {
5550 close(pfd);
5551 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08005552 pr_warn("program '%s': failed to attach to %s '%s': %s\n",
5553 bpf_program__title(prog, false),
5554 retprobe ? "kretprobe" : "kprobe", func_name,
5555 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005556 return link;
5557 }
5558 return link;
5559}
5560
5561struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
5562 bool retprobe, pid_t pid,
5563 const char *binary_path,
5564 size_t func_offset)
5565{
5566 char errmsg[STRERR_BUFSIZE];
5567 struct bpf_link *link;
5568 int pfd, err;
5569
5570 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
5571 binary_path, func_offset, pid);
5572 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005573 pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
5574 bpf_program__title(prog, false),
5575 retprobe ? "uretprobe" : "uprobe",
5576 binary_path, func_offset,
5577 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005578 return ERR_PTR(pfd);
5579 }
5580 link = bpf_program__attach_perf_event(prog, pfd);
5581 if (IS_ERR(link)) {
5582 close(pfd);
5583 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08005584 pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
5585 bpf_program__title(prog, false),
5586 retprobe ? "uretprobe" : "uprobe",
5587 binary_path, func_offset,
5588 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikob2650022019-07-01 16:58:58 -07005589 return link;
5590 }
5591 return link;
5592}
5593
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07005594static int determine_tracepoint_id(const char *tp_category,
5595 const char *tp_name)
5596{
5597 char file[PATH_MAX];
5598 int ret;
5599
5600 ret = snprintf(file, sizeof(file),
5601 "/sys/kernel/debug/tracing/events/%s/%s/id",
5602 tp_category, tp_name);
5603 if (ret < 0)
5604 return -errno;
5605 if (ret >= sizeof(file)) {
5606 pr_debug("tracepoint %s/%s path is too long\n",
5607 tp_category, tp_name);
5608 return -E2BIG;
5609 }
5610 return parse_uint_from_file(file, "%d\n");
5611}
5612
5613static int perf_event_open_tracepoint(const char *tp_category,
5614 const char *tp_name)
5615{
5616 struct perf_event_attr attr = {};
5617 char errmsg[STRERR_BUFSIZE];
5618 int tp_id, pfd, err;
5619
5620 tp_id = determine_tracepoint_id(tp_category, tp_name);
5621 if (tp_id < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005622 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
5623 tp_category, tp_name,
5624 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07005625 return tp_id;
5626 }
5627
5628 attr.type = PERF_TYPE_TRACEPOINT;
5629 attr.size = sizeof(attr);
5630 attr.config = tp_id;
5631
5632 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
5633 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5634 if (pfd < 0) {
5635 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005636 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
5637 tp_category, tp_name,
5638 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07005639 return err;
5640 }
5641 return pfd;
5642}
5643
5644struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
5645 const char *tp_category,
5646 const char *tp_name)
5647{
5648 char errmsg[STRERR_BUFSIZE];
5649 struct bpf_link *link;
5650 int pfd, err;
5651
5652 pfd = perf_event_open_tracepoint(tp_category, tp_name);
5653 if (pfd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005654 pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
5655 bpf_program__title(prog, false),
5656 tp_category, tp_name,
5657 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07005658 return ERR_PTR(pfd);
5659 }
5660 link = bpf_program__attach_perf_event(prog, pfd);
5661 if (IS_ERR(link)) {
5662 close(pfd);
5663 err = PTR_ERR(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08005664 pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
5665 bpf_program__title(prog, false),
5666 tp_category, tp_name,
5667 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
Andrii Nakryikof6de59c2019-07-01 16:58:59 -07005668 return link;
5669 }
5670 return link;
5671}
5672
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07005673static int bpf_link__destroy_fd(struct bpf_link *link)
5674{
5675 struct bpf_link_fd *l = (void *)link;
5676
5677 return close(l->fd);
5678}
5679
5680struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
5681 const char *tp_name)
5682{
5683 char errmsg[STRERR_BUFSIZE];
5684 struct bpf_link_fd *link;
5685 int prog_fd, pfd;
5686
5687 prog_fd = bpf_program__fd(prog);
5688 if (prog_fd < 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005689 pr_warn("program '%s': can't attach before loaded\n",
5690 bpf_program__title(prog, false));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07005691 return ERR_PTR(-EINVAL);
5692 }
5693
5694 link = malloc(sizeof(*link));
5695 if (!link)
5696 return ERR_PTR(-ENOMEM);
5697 link->link.destroy = &bpf_link__destroy_fd;
5698
5699 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
5700 if (pfd < 0) {
5701 pfd = -errno;
5702 free(link);
Kefeng Wangbe180102019-10-21 13:55:32 +08005703 pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
5704 bpf_program__title(prog, false), tp_name,
5705 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
Andrii Nakryiko84bf5e12019-07-01 16:59:00 -07005706 return ERR_PTR(pfd);
5707 }
5708 link->fd = pfd;
5709 return (struct bpf_link *)link;
5710}
5711
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005712enum bpf_perf_event_ret
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005713bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
5714 void **copy_mem, size_t *copy_size,
5715 bpf_perf_event_print_t fn, void *private_data)
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005716{
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005717 struct perf_event_mmap_page *header = mmap_mem;
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02005718 __u64 data_head = ring_buffer_read_head(header);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005719 __u64 data_tail = header->data_tail;
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005720 void *base = ((__u8 *)header) + page_size;
5721 int ret = LIBBPF_PERF_EVENT_CONT;
5722 struct perf_event_header *ehdr;
5723 size_t ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005724
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005725 while (data_head != data_tail) {
5726 ehdr = base + (data_tail & (mmap_size - 1));
5727 ehdr_size = ehdr->size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005728
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005729 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
5730 void *copy_start = ehdr;
5731 size_t len_first = base + mmap_size - copy_start;
5732 size_t len_secnd = ehdr_size - len_first;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005733
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005734 if (*copy_size < ehdr_size) {
5735 free(*copy_mem);
5736 *copy_mem = malloc(ehdr_size);
5737 if (!*copy_mem) {
5738 *copy_size = 0;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005739 ret = LIBBPF_PERF_EVENT_ERROR;
5740 break;
5741 }
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005742 *copy_size = ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005743 }
5744
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005745 memcpy(*copy_mem, copy_start, len_first);
5746 memcpy(*copy_mem + len_first, base, len_secnd);
5747 ehdr = *copy_mem;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005748 }
5749
Daniel Borkmann3dca2112018-10-21 02:09:28 +02005750 ret = fn(ehdr, private_data);
5751 data_tail += ehdr_size;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005752 if (ret != LIBBPF_PERF_EVENT_CONT)
5753 break;
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005754 }
5755
Daniel Borkmanna64af0e2018-10-19 15:51:03 +02005756 ring_buffer_write_tail(header, data_tail);
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07005757 return ret;
5758}
Song Liu34be16462019-03-11 22:30:38 -07005759
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005760struct perf_buffer;
5761
5762struct perf_buffer_params {
5763 struct perf_event_attr *attr;
5764 /* if event_cb is specified, it takes precendence */
5765 perf_buffer_event_fn event_cb;
5766 /* sample_cb and lost_cb are higher-level common-case callbacks */
5767 perf_buffer_sample_fn sample_cb;
5768 perf_buffer_lost_fn lost_cb;
5769 void *ctx;
5770 int cpu_cnt;
5771 int *cpus;
5772 int *map_keys;
5773};
5774
5775struct perf_cpu_buf {
5776 struct perf_buffer *pb;
5777 void *base; /* mmap()'ed memory */
5778 void *buf; /* for reconstructing segmented data */
5779 size_t buf_size;
5780 int fd;
5781 int cpu;
5782 int map_key;
5783};
5784
5785struct perf_buffer {
5786 perf_buffer_event_fn event_cb;
5787 perf_buffer_sample_fn sample_cb;
5788 perf_buffer_lost_fn lost_cb;
5789 void *ctx; /* passed into callbacks */
5790
5791 size_t page_size;
5792 size_t mmap_size;
5793 struct perf_cpu_buf **cpu_bufs;
5794 struct epoll_event *events;
5795 int cpu_cnt;
5796 int epoll_fd; /* perf event FD */
5797 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
5798};
5799
5800static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
5801 struct perf_cpu_buf *cpu_buf)
5802{
5803 if (!cpu_buf)
5804 return;
5805 if (cpu_buf->base &&
5806 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
Kefeng Wangbe180102019-10-21 13:55:32 +08005807 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005808 if (cpu_buf->fd >= 0) {
5809 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
5810 close(cpu_buf->fd);
5811 }
5812 free(cpu_buf->buf);
5813 free(cpu_buf);
5814}
5815
5816void perf_buffer__free(struct perf_buffer *pb)
5817{
5818 int i;
5819
5820 if (!pb)
5821 return;
5822 if (pb->cpu_bufs) {
5823 for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
5824 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
5825
5826 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
5827 perf_buffer__free_cpu_buf(pb, cpu_buf);
5828 }
5829 free(pb->cpu_bufs);
5830 }
5831 if (pb->epoll_fd >= 0)
5832 close(pb->epoll_fd);
5833 free(pb->events);
5834 free(pb);
5835}
5836
5837static struct perf_cpu_buf *
5838perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
5839 int cpu, int map_key)
5840{
5841 struct perf_cpu_buf *cpu_buf;
5842 char msg[STRERR_BUFSIZE];
5843 int err;
5844
5845 cpu_buf = calloc(1, sizeof(*cpu_buf));
5846 if (!cpu_buf)
5847 return ERR_PTR(-ENOMEM);
5848
5849 cpu_buf->pb = pb;
5850 cpu_buf->cpu = cpu;
5851 cpu_buf->map_key = map_key;
5852
5853 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
5854 -1, PERF_FLAG_FD_CLOEXEC);
5855 if (cpu_buf->fd < 0) {
5856 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005857 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
5858 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005859 goto error;
5860 }
5861
5862 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
5863 PROT_READ | PROT_WRITE, MAP_SHARED,
5864 cpu_buf->fd, 0);
5865 if (cpu_buf->base == MAP_FAILED) {
5866 cpu_buf->base = NULL;
5867 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005868 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
5869 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005870 goto error;
5871 }
5872
5873 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5874 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005875 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
5876 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005877 goto error;
5878 }
5879
5880 return cpu_buf;
5881
5882error:
5883 perf_buffer__free_cpu_buf(pb, cpu_buf);
5884 return (struct perf_cpu_buf *)ERR_PTR(err);
5885}
5886
5887static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5888 struct perf_buffer_params *p);
5889
5890struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
5891 const struct perf_buffer_opts *opts)
5892{
5893 struct perf_buffer_params p = {};
Arnaldo Carvalho de Melo4be6e052019-07-19 11:34:07 -03005894 struct perf_event_attr attr = { 0, };
5895
5896 attr.config = PERF_COUNT_SW_BPF_OUTPUT,
5897 attr.type = PERF_TYPE_SOFTWARE;
5898 attr.sample_type = PERF_SAMPLE_RAW;
5899 attr.sample_period = 1;
5900 attr.wakeup_events = 1;
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005901
5902 p.attr = &attr;
5903 p.sample_cb = opts ? opts->sample_cb : NULL;
5904 p.lost_cb = opts ? opts->lost_cb : NULL;
5905 p.ctx = opts ? opts->ctx : NULL;
5906
5907 return __perf_buffer__new(map_fd, page_cnt, &p);
5908}
5909
5910struct perf_buffer *
5911perf_buffer__new_raw(int map_fd, size_t page_cnt,
5912 const struct perf_buffer_raw_opts *opts)
5913{
5914 struct perf_buffer_params p = {};
5915
5916 p.attr = opts->attr;
5917 p.event_cb = opts->event_cb;
5918 p.ctx = opts->ctx;
5919 p.cpu_cnt = opts->cpu_cnt;
5920 p.cpus = opts->cpus;
5921 p.map_keys = opts->map_keys;
5922
5923 return __perf_buffer__new(map_fd, page_cnt, &p);
5924}
5925
5926static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5927 struct perf_buffer_params *p)
5928{
5929 struct bpf_map_info map = {};
5930 char msg[STRERR_BUFSIZE];
5931 struct perf_buffer *pb;
5932 __u32 map_info_len;
5933 int err, i;
5934
5935 if (page_cnt & (page_cnt - 1)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005936 pr_warn("page count should be power of two, but is %zu\n",
5937 page_cnt);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005938 return ERR_PTR(-EINVAL);
5939 }
5940
5941 map_info_len = sizeof(map);
5942 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
5943 if (err) {
5944 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005945 pr_warn("failed to get map info for map FD %d: %s\n",
5946 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005947 return ERR_PTR(err);
5948 }
5949
5950 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
Kefeng Wangbe180102019-10-21 13:55:32 +08005951 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
5952 map.name);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005953 return ERR_PTR(-EINVAL);
5954 }
5955
5956 pb = calloc(1, sizeof(*pb));
5957 if (!pb)
5958 return ERR_PTR(-ENOMEM);
5959
5960 pb->event_cb = p->event_cb;
5961 pb->sample_cb = p->sample_cb;
5962 pb->lost_cb = p->lost_cb;
5963 pb->ctx = p->ctx;
5964
5965 pb->page_size = getpagesize();
5966 pb->mmap_size = pb->page_size * page_cnt;
5967 pb->map_fd = map_fd;
5968
5969 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
5970 if (pb->epoll_fd < 0) {
5971 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08005972 pr_warn("failed to create epoll instance: %s\n",
5973 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005974 goto error;
5975 }
5976
5977 if (p->cpu_cnt > 0) {
5978 pb->cpu_cnt = p->cpu_cnt;
5979 } else {
5980 pb->cpu_cnt = libbpf_num_possible_cpus();
5981 if (pb->cpu_cnt < 0) {
5982 err = pb->cpu_cnt;
5983 goto error;
5984 }
5985 if (map.max_entries < pb->cpu_cnt)
5986 pb->cpu_cnt = map.max_entries;
5987 }
5988
5989 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
5990 if (!pb->events) {
5991 err = -ENOMEM;
Kefeng Wangbe180102019-10-21 13:55:32 +08005992 pr_warn("failed to allocate events: out of memory\n");
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005993 goto error;
5994 }
5995 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
5996 if (!pb->cpu_bufs) {
5997 err = -ENOMEM;
Kefeng Wangbe180102019-10-21 13:55:32 +08005998 pr_warn("failed to allocate buffers: out of memory\n");
Andrii Nakryikofb84b822019-07-06 11:06:24 -07005999 goto error;
6000 }
6001
6002 for (i = 0; i < pb->cpu_cnt; i++) {
6003 struct perf_cpu_buf *cpu_buf;
6004 int cpu, map_key;
6005
6006 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
6007 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
6008
6009 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
6010 if (IS_ERR(cpu_buf)) {
6011 err = PTR_ERR(cpu_buf);
6012 goto error;
6013 }
6014
6015 pb->cpu_bufs[i] = cpu_buf;
6016
6017 err = bpf_map_update_elem(pb->map_fd, &map_key,
6018 &cpu_buf->fd, 0);
6019 if (err) {
6020 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006021 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
6022 cpu, map_key, cpu_buf->fd,
6023 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006024 goto error;
6025 }
6026
6027 pb->events[i].events = EPOLLIN;
6028 pb->events[i].data.ptr = cpu_buf;
6029 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
6030 &pb->events[i]) < 0) {
6031 err = -errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006032 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
6033 cpu, cpu_buf->fd,
6034 libbpf_strerror_r(err, msg, sizeof(msg)));
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006035 goto error;
6036 }
6037 }
6038
6039 return pb;
6040
6041error:
6042 if (pb)
6043 perf_buffer__free(pb);
6044 return ERR_PTR(err);
6045}
6046
6047struct perf_sample_raw {
6048 struct perf_event_header header;
6049 uint32_t size;
6050 char data[0];
6051};
6052
6053struct perf_sample_lost {
6054 struct perf_event_header header;
6055 uint64_t id;
6056 uint64_t lost;
6057 uint64_t sample_id;
6058};
6059
6060static enum bpf_perf_event_ret
6061perf_buffer__process_record(struct perf_event_header *e, void *ctx)
6062{
6063 struct perf_cpu_buf *cpu_buf = ctx;
6064 struct perf_buffer *pb = cpu_buf->pb;
6065 void *data = e;
6066
6067 /* user wants full control over parsing perf event */
6068 if (pb->event_cb)
6069 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
6070
6071 switch (e->type) {
6072 case PERF_RECORD_SAMPLE: {
6073 struct perf_sample_raw *s = data;
6074
6075 if (pb->sample_cb)
6076 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
6077 break;
6078 }
6079 case PERF_RECORD_LOST: {
6080 struct perf_sample_lost *s = data;
6081
6082 if (pb->lost_cb)
6083 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
6084 break;
6085 }
6086 default:
Kefeng Wangbe180102019-10-21 13:55:32 +08006087 pr_warn("unknown perf sample type %d\n", e->type);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006088 return LIBBPF_PERF_EVENT_ERROR;
6089 }
6090 return LIBBPF_PERF_EVENT_CONT;
6091}
6092
6093static int perf_buffer__process_records(struct perf_buffer *pb,
6094 struct perf_cpu_buf *cpu_buf)
6095{
6096 enum bpf_perf_event_ret ret;
6097
6098 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
6099 pb->page_size, &cpu_buf->buf,
6100 &cpu_buf->buf_size,
6101 perf_buffer__process_record, cpu_buf);
6102 if (ret != LIBBPF_PERF_EVENT_CONT)
6103 return ret;
6104 return 0;
6105}
6106
6107int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
6108{
6109 int i, cnt, err;
6110
6111 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
6112 for (i = 0; i < cnt; i++) {
6113 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
6114
6115 err = perf_buffer__process_records(pb, cpu_buf);
6116 if (err) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006117 pr_warn("error while processing records: %d\n", err);
Andrii Nakryikofb84b822019-07-06 11:06:24 -07006118 return err;
6119 }
6120 }
6121 return cnt < 0 ? -errno : cnt;
6122}
6123
Song Liu34be16462019-03-11 22:30:38 -07006124struct bpf_prog_info_array_desc {
6125 int array_offset; /* e.g. offset of jited_prog_insns */
6126 int count_offset; /* e.g. offset of jited_prog_len */
6127 int size_offset; /* > 0: offset of rec size,
6128 * < 0: fix size of -size_offset
6129 */
6130};
6131
6132static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
6133 [BPF_PROG_INFO_JITED_INSNS] = {
6134 offsetof(struct bpf_prog_info, jited_prog_insns),
6135 offsetof(struct bpf_prog_info, jited_prog_len),
6136 -1,
6137 },
6138 [BPF_PROG_INFO_XLATED_INSNS] = {
6139 offsetof(struct bpf_prog_info, xlated_prog_insns),
6140 offsetof(struct bpf_prog_info, xlated_prog_len),
6141 -1,
6142 },
6143 [BPF_PROG_INFO_MAP_IDS] = {
6144 offsetof(struct bpf_prog_info, map_ids),
6145 offsetof(struct bpf_prog_info, nr_map_ids),
6146 -(int)sizeof(__u32),
6147 },
6148 [BPF_PROG_INFO_JITED_KSYMS] = {
6149 offsetof(struct bpf_prog_info, jited_ksyms),
6150 offsetof(struct bpf_prog_info, nr_jited_ksyms),
6151 -(int)sizeof(__u64),
6152 },
6153 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
6154 offsetof(struct bpf_prog_info, jited_func_lens),
6155 offsetof(struct bpf_prog_info, nr_jited_func_lens),
6156 -(int)sizeof(__u32),
6157 },
6158 [BPF_PROG_INFO_FUNC_INFO] = {
6159 offsetof(struct bpf_prog_info, func_info),
6160 offsetof(struct bpf_prog_info, nr_func_info),
6161 offsetof(struct bpf_prog_info, func_info_rec_size),
6162 },
6163 [BPF_PROG_INFO_LINE_INFO] = {
6164 offsetof(struct bpf_prog_info, line_info),
6165 offsetof(struct bpf_prog_info, nr_line_info),
6166 offsetof(struct bpf_prog_info, line_info_rec_size),
6167 },
6168 [BPF_PROG_INFO_JITED_LINE_INFO] = {
6169 offsetof(struct bpf_prog_info, jited_line_info),
6170 offsetof(struct bpf_prog_info, nr_jited_line_info),
6171 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
6172 },
6173 [BPF_PROG_INFO_PROG_TAGS] = {
6174 offsetof(struct bpf_prog_info, prog_tags),
6175 offsetof(struct bpf_prog_info, nr_prog_tags),
6176 -(int)sizeof(__u8) * BPF_TAG_SIZE,
6177 },
6178
6179};
6180
6181static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
6182{
6183 __u32 *array = (__u32 *)info;
6184
6185 if (offset >= 0)
6186 return array[offset / sizeof(__u32)];
6187 return -(int)offset;
6188}
6189
6190static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
6191{
6192 __u64 *array = (__u64 *)info;
6193
6194 if (offset >= 0)
6195 return array[offset / sizeof(__u64)];
6196 return -(int)offset;
6197}
6198
6199static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
6200 __u32 val)
6201{
6202 __u32 *array = (__u32 *)info;
6203
6204 if (offset >= 0)
6205 array[offset / sizeof(__u32)] = val;
6206}
6207
6208static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
6209 __u64 val)
6210{
6211 __u64 *array = (__u64 *)info;
6212
6213 if (offset >= 0)
6214 array[offset / sizeof(__u64)] = val;
6215}
6216
6217struct bpf_prog_info_linear *
6218bpf_program__get_prog_info_linear(int fd, __u64 arrays)
6219{
6220 struct bpf_prog_info_linear *info_linear;
6221 struct bpf_prog_info info = {};
6222 __u32 info_len = sizeof(info);
6223 __u32 data_len = 0;
6224 int i, err;
6225 void *ptr;
6226
6227 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
6228 return ERR_PTR(-EINVAL);
6229
6230 /* step 1: get array dimensions */
6231 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
6232 if (err) {
6233 pr_debug("can't get prog info: %s", strerror(errno));
6234 return ERR_PTR(-EFAULT);
6235 }
6236
6237 /* step 2: calculate total size of all arrays */
6238 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
6239 bool include_array = (arrays & (1UL << i)) > 0;
6240 struct bpf_prog_info_array_desc *desc;
6241 __u32 count, size;
6242
6243 desc = bpf_prog_info_array_desc + i;
6244
6245 /* kernel is too old to support this field */
6246 if (info_len < desc->array_offset + sizeof(__u32) ||
6247 info_len < desc->count_offset + sizeof(__u32) ||
6248 (desc->size_offset > 0 && info_len < desc->size_offset))
6249 include_array = false;
6250
6251 if (!include_array) {
6252 arrays &= ~(1UL << i); /* clear the bit */
6253 continue;
6254 }
6255
6256 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
6257 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
6258
6259 data_len += count * size;
6260 }
6261
6262 /* step 3: allocate continuous memory */
6263 data_len = roundup(data_len, sizeof(__u64));
6264 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
6265 if (!info_linear)
6266 return ERR_PTR(-ENOMEM);
6267
6268 /* step 4: fill data to info_linear->info */
6269 info_linear->arrays = arrays;
6270 memset(&info_linear->info, 0, sizeof(info));
6271 ptr = info_linear->data;
6272
6273 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
6274 struct bpf_prog_info_array_desc *desc;
6275 __u32 count, size;
6276
6277 if ((arrays & (1UL << i)) == 0)
6278 continue;
6279
6280 desc = bpf_prog_info_array_desc + i;
6281 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
6282 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
6283 bpf_prog_info_set_offset_u32(&info_linear->info,
6284 desc->count_offset, count);
6285 bpf_prog_info_set_offset_u32(&info_linear->info,
6286 desc->size_offset, size);
6287 bpf_prog_info_set_offset_u64(&info_linear->info,
6288 desc->array_offset,
6289 ptr_to_u64(ptr));
6290 ptr += count * size;
6291 }
6292
6293 /* step 5: call syscall again to get required arrays */
6294 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
6295 if (err) {
6296 pr_debug("can't get prog info: %s", strerror(errno));
6297 free(info_linear);
6298 return ERR_PTR(-EFAULT);
6299 }
6300
6301 /* step 6: verify the data */
6302 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
6303 struct bpf_prog_info_array_desc *desc;
6304 __u32 v1, v2;
6305
6306 if ((arrays & (1UL << i)) == 0)
6307 continue;
6308
6309 desc = bpf_prog_info_array_desc + i;
6310 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
6311 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
6312 desc->count_offset);
6313 if (v1 != v2)
Kefeng Wangbe180102019-10-21 13:55:32 +08006314 pr_warn("%s: mismatch in element count\n", __func__);
Song Liu34be16462019-03-11 22:30:38 -07006315
6316 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
6317 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
6318 desc->size_offset);
6319 if (v1 != v2)
Kefeng Wangbe180102019-10-21 13:55:32 +08006320 pr_warn("%s: mismatch in rec size\n", __func__);
Song Liu34be16462019-03-11 22:30:38 -07006321 }
6322
6323 /* step 7: update info_len and data_len */
6324 info_linear->info_len = sizeof(struct bpf_prog_info);
6325 info_linear->data_len = data_len;
6326
6327 return info_linear;
6328}
6329
6330void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
6331{
6332 int i;
6333
6334 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
6335 struct bpf_prog_info_array_desc *desc;
6336 __u64 addr, offs;
6337
6338 if ((info_linear->arrays & (1UL << i)) == 0)
6339 continue;
6340
6341 desc = bpf_prog_info_array_desc + i;
6342 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
6343 desc->array_offset);
6344 offs = addr - ptr_to_u64(info_linear->data);
6345 bpf_prog_info_set_offset_u64(&info_linear->info,
6346 desc->array_offset, offs);
6347 }
6348}
6349
6350void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
6351{
6352 int i;
6353
6354 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
6355 struct bpf_prog_info_array_desc *desc;
6356 __u64 addr, offs;
6357
6358 if ((info_linear->arrays & (1UL << i)) == 0)
6359 continue;
6360
6361 desc = bpf_prog_info_array_desc + i;
6362 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
6363 desc->array_offset);
6364 addr = offs + ptr_to_u64(info_linear->data);
6365 bpf_prog_info_set_offset_u64(&info_linear->info,
6366 desc->array_offset, addr);
6367 }
6368}
Hechao Li6446b312019-06-10 17:56:50 -07006369
6370int libbpf_num_possible_cpus(void)
6371{
6372 static const char *fcpu = "/sys/devices/system/cpu/possible";
6373 int len = 0, n = 0, il = 0, ir = 0;
6374 unsigned int start = 0, end = 0;
Takshak Chahande56fbc242019-07-31 15:10:55 -07006375 int tmp_cpus = 0;
Hechao Li6446b312019-06-10 17:56:50 -07006376 static int cpus;
6377 char buf[128];
6378 int error = 0;
6379 int fd = -1;
6380
Takshak Chahande56fbc242019-07-31 15:10:55 -07006381 tmp_cpus = READ_ONCE(cpus);
6382 if (tmp_cpus > 0)
6383 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07006384
6385 fd = open(fcpu, O_RDONLY);
6386 if (fd < 0) {
6387 error = errno;
Kefeng Wangbe180102019-10-21 13:55:32 +08006388 pr_warn("Failed to open file %s: %s\n", fcpu, strerror(error));
Hechao Li6446b312019-06-10 17:56:50 -07006389 return -error;
6390 }
6391 len = read(fd, buf, sizeof(buf));
6392 close(fd);
6393 if (len <= 0) {
6394 error = len ? errno : EINVAL;
Kefeng Wangbe180102019-10-21 13:55:32 +08006395 pr_warn("Failed to read # of possible cpus from %s: %s\n",
6396 fcpu, strerror(error));
Hechao Li6446b312019-06-10 17:56:50 -07006397 return -error;
6398 }
6399 if (len == sizeof(buf)) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006400 pr_warn("File %s size overflow\n", fcpu);
Hechao Li6446b312019-06-10 17:56:50 -07006401 return -EOVERFLOW;
6402 }
6403 buf[len] = '\0';
6404
Takshak Chahande56fbc242019-07-31 15:10:55 -07006405 for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
Hechao Li6446b312019-06-10 17:56:50 -07006406 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
6407 if (buf[ir] == ',' || buf[ir] == '\0') {
6408 buf[ir] = '\0';
6409 n = sscanf(&buf[il], "%u-%u", &start, &end);
6410 if (n <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006411 pr_warn("Failed to get # CPUs from %s\n",
6412 &buf[il]);
Hechao Li6446b312019-06-10 17:56:50 -07006413 return -EINVAL;
6414 } else if (n == 1) {
6415 end = start;
6416 }
Takshak Chahande56fbc242019-07-31 15:10:55 -07006417 tmp_cpus += end - start + 1;
Hechao Li6446b312019-06-10 17:56:50 -07006418 il = ir + 1;
6419 }
6420 }
Takshak Chahande56fbc242019-07-31 15:10:55 -07006421 if (tmp_cpus <= 0) {
Kefeng Wangbe180102019-10-21 13:55:32 +08006422 pr_warn("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
Hechao Li6446b312019-06-10 17:56:50 -07006423 return -EINVAL;
6424 }
Takshak Chahande56fbc242019-07-31 15:10:55 -07006425
6426 WRITE_ONCE(cpus, tmp_cpus);
6427 return tmp_cpus;
Hechao Li6446b312019-06-10 17:56:50 -07006428}