blob: c1a57323e25da44c97583da8c301473da6674a51 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Wang Nan69d262a2015-10-14 12:41:13 +00002/*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
Wang Nana08357d2015-11-16 12:10:13 +00009#include <linux/bpf.h>
Wang Nan69d262a2015-10-14 12:41:13 +000010#include <bpf/libbpf.h>
Wang Nan8690a2a2016-02-22 09:10:32 +000011#include <bpf/bpf.h>
Wang Nan69d262a2015-10-14 12:41:13 +000012#include <linux/err.h>
Arnaldo Carvalho de Melo877a7a12017-04-17 11:39:06 -030013#include <linux/kernel.h>
Wang Nan03e01f52015-11-16 12:10:08 +000014#include <linux/string.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030015#include <linux/zalloc.h>
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030016#include <errno.h>
Wang Nan69d262a2015-10-14 12:41:13 +000017#include "debug.h"
Arnaldo Carvalho de Meloebc52ae2019-01-29 13:06:18 +010018#include "evlist.h"
Wang Nan69d262a2015-10-14 12:41:13 +000019#include "bpf-loader.h"
Wang Nana08357d2015-11-16 12:10:13 +000020#include "bpf-prologue.h"
Wang Nanaa3abf32015-10-14 12:41:15 +000021#include "probe-event.h"
22#include "probe-finder.h" // for MAX_PROBES
Wang Nan2d055bf2016-02-22 09:10:34 +000023#include "parse-events.h"
Arnaldo Carvalho de Melo8ec20b12017-04-18 10:57:25 -030024#include "strfilter.h"
Arnaldo Carvalho de Melo630aec12019-08-28 09:59:10 -030025#include "util.h"
Wang Nand509db02015-10-14 12:41:20 +000026#include "llvm-utils.h"
Wang Nanedd695b2016-11-26 07:03:39 +000027#include "c++/clang-c.h"
Wang Nan69d262a2015-10-14 12:41:13 +000028
Arnaldo Carvalho de Melo964f3842019-08-21 11:57:50 -030029#include <internal/xyarray.h>
30
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080031static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080032 const char *fmt, va_list args)
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080033{
Stanislav Fomicheva8a1f7d2019-02-04 16:20:55 -080034 return veprintf(1, verbose, pr_fmt(fmt), args);
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080035}
Wang Nan69d262a2015-10-14 12:41:13 +000036
Wang Nanaa3abf32015-10-14 12:41:15 +000037struct bpf_prog_priv {
Wang Nanb4ee6d42016-07-13 10:44:05 +000038 bool is_tp;
39 char *sys_name;
40 char *evt_name;
Wang Nanaa3abf32015-10-14 12:41:15 +000041 struct perf_probe_event pev;
Wang Nana08357d2015-11-16 12:10:13 +000042 bool need_prologue;
43 struct bpf_insn *insns_buf;
Wang Nand35b3282015-11-17 08:32:47 +000044 int nr_types;
45 int *type_mapping;
Wang Nanaa3abf32015-10-14 12:41:15 +000046};
47
Wang Nanba1fae42015-11-06 13:49:43 +000048static bool libbpf_initialized;
49
50struct bpf_object *
51bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
52{
53 struct bpf_object *obj;
54
55 if (!libbpf_initialized) {
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080056 libbpf_set_print(libbpf_perf_print);
Wang Nanba1fae42015-11-06 13:49:43 +000057 libbpf_initialized = true;
58 }
59
60 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
YueHaibingab4e32f2018-05-11 19:21:42 +080061 if (IS_ERR_OR_NULL(obj)) {
Wang Nanba1fae42015-11-06 13:49:43 +000062 pr_debug("bpf: failed to load buffer\n");
63 return ERR_PTR(-EINVAL);
64 }
65
66 return obj;
67}
68
Wang Nand509db02015-10-14 12:41:20 +000069struct bpf_object *bpf__prepare_load(const char *filename, bool source)
Wang Nan69d262a2015-10-14 12:41:13 +000070{
71 struct bpf_object *obj;
Wang Nan69d262a2015-10-14 12:41:13 +000072
73 if (!libbpf_initialized) {
Yonghong Song6f1ae8b2019-02-01 16:14:17 -080074 libbpf_set_print(libbpf_perf_print);
Wang Nan69d262a2015-10-14 12:41:13 +000075 libbpf_initialized = true;
76 }
77
Wang Nand509db02015-10-14 12:41:20 +000078 if (source) {
79 int err;
80 void *obj_buf;
81 size_t obj_buf_sz;
82
Wang Nanedd695b2016-11-26 07:03:39 +000083 perf_clang__init();
84 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
85 perf_clang__cleanup();
86 if (err) {
Arnaldo Carvalho de Melo87252322018-01-18 13:07:00 -030087 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
Wang Nanedd695b2016-11-26 07:03:39 +000088 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
89 if (err)
90 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
91 } else
Ingo Molnare4a8b0a2018-12-03 11:22:00 +010092 pr_debug("bpf: successful builtin compilation\n");
Wang Nand509db02015-10-14 12:41:20 +000093 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
Wang Nan2bd42de2016-11-26 07:03:30 +000094
YueHaibingab4e32f2018-05-11 19:21:42 +080095 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
Wang Nan2bd42de2016-11-26 07:03:30 +000096 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
97
Wang Nand509db02015-10-14 12:41:20 +000098 free(obj_buf);
99 } else
100 obj = bpf_object__open(filename);
101
YueHaibingab4e32f2018-05-11 19:21:42 +0800102 if (IS_ERR_OR_NULL(obj)) {
Wang Nan69d262a2015-10-14 12:41:13 +0000103 pr_debug("bpf: failed to load %s\n", filename);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000104 return obj;
Wang Nan69d262a2015-10-14 12:41:13 +0000105 }
106
107 return obj;
108}
109
110void bpf__clear(void)
111{
112 struct bpf_object *obj, *tmp;
113
Wang Nanaa3abf32015-10-14 12:41:15 +0000114 bpf_object__for_each_safe(obj, tmp) {
115 bpf__unprobe(obj);
Wang Nan69d262a2015-10-14 12:41:13 +0000116 bpf_object__close(obj);
Wang Nanaa3abf32015-10-14 12:41:15 +0000117 }
118}
119
120static void
Wang Nan80cdce72016-02-19 11:43:51 +0000121clear_prog_priv(struct bpf_program *prog __maybe_unused,
122 void *_priv)
Wang Nanaa3abf32015-10-14 12:41:15 +0000123{
124 struct bpf_prog_priv *priv = _priv;
125
126 cleanup_perf_probe_events(&priv->pev, 1);
Wang Nana08357d2015-11-16 12:10:13 +0000127 zfree(&priv->insns_buf);
Wang Nand35b3282015-11-17 08:32:47 +0000128 zfree(&priv->type_mapping);
Wang Nanb4ee6d42016-07-13 10:44:05 +0000129 zfree(&priv->sys_name);
130 zfree(&priv->evt_name);
Wang Nanaa3abf32015-10-14 12:41:15 +0000131 free(priv);
132}
133
134static int
Wang Nan0bb93492015-11-27 08:47:37 +0000135prog_config__exec(const char *value, struct perf_probe_event *pev)
Wang Nan361f2b12015-11-16 12:10:05 +0000136{
137 pev->uprobes = true;
138 pev->target = strdup(value);
139 if (!pev->target)
140 return -ENOMEM;
141 return 0;
142}
143
Wang Nan5dbd16c2015-11-16 12:10:06 +0000144static int
Wang Nan0bb93492015-11-27 08:47:37 +0000145prog_config__module(const char *value, struct perf_probe_event *pev)
Wang Nan5dbd16c2015-11-16 12:10:06 +0000146{
147 pev->uprobes = false;
148 pev->target = strdup(value);
149 if (!pev->target)
150 return -ENOMEM;
151 return 0;
152}
153
Wang Nan03e01f52015-11-16 12:10:08 +0000154static int
Wang Nan0bb93492015-11-27 08:47:37 +0000155prog_config__bool(const char *value, bool *pbool, bool invert)
Wang Nan03e01f52015-11-16 12:10:08 +0000156{
157 int err;
158 bool bool_value;
159
160 if (!pbool)
161 return -EINVAL;
162
163 err = strtobool(value, &bool_value);
164 if (err)
165 return err;
166
167 *pbool = invert ? !bool_value : bool_value;
168 return 0;
169}
170
171static int
Wang Nan0bb93492015-11-27 08:47:37 +0000172prog_config__inlines(const char *value,
173 struct perf_probe_event *pev __maybe_unused)
Wang Nan03e01f52015-11-16 12:10:08 +0000174{
Wang Nan0bb93492015-11-27 08:47:37 +0000175 return prog_config__bool(value, &probe_conf.no_inlines, true);
Wang Nan03e01f52015-11-16 12:10:08 +0000176}
177
178static int
Wang Nan0bb93492015-11-27 08:47:37 +0000179prog_config__force(const char *value,
180 struct perf_probe_event *pev __maybe_unused)
Wang Nan03e01f52015-11-16 12:10:08 +0000181{
Wang Nan0bb93492015-11-27 08:47:37 +0000182 return prog_config__bool(value, &probe_conf.force_add, false);
Wang Nan03e01f52015-11-16 12:10:08 +0000183}
184
Wang Nan361f2b12015-11-16 12:10:05 +0000185static struct {
186 const char *key;
187 const char *usage;
188 const char *desc;
189 int (*func)(const char *, struct perf_probe_event *);
Wang Nan0bb93492015-11-27 08:47:37 +0000190} bpf_prog_config_terms[] = {
Wang Nan361f2b12015-11-16 12:10:05 +0000191 {
192 .key = "exec",
193 .usage = "exec=<full path of file>",
194 .desc = "Set uprobe target",
Wang Nan0bb93492015-11-27 08:47:37 +0000195 .func = prog_config__exec,
Wang Nan361f2b12015-11-16 12:10:05 +0000196 },
Wang Nan5dbd16c2015-11-16 12:10:06 +0000197 {
198 .key = "module",
199 .usage = "module=<module name> ",
200 .desc = "Set kprobe module",
Wang Nan0bb93492015-11-27 08:47:37 +0000201 .func = prog_config__module,
Wang Nan03e01f52015-11-16 12:10:08 +0000202 },
203 {
204 .key = "inlines",
205 .usage = "inlines=[yes|no] ",
206 .desc = "Probe at inline symbol",
Wang Nan0bb93492015-11-27 08:47:37 +0000207 .func = prog_config__inlines,
Wang Nan03e01f52015-11-16 12:10:08 +0000208 },
209 {
210 .key = "force",
211 .usage = "force=[yes|no] ",
212 .desc = "Forcibly add events with existing name",
Wang Nan0bb93492015-11-27 08:47:37 +0000213 .func = prog_config__force,
Wang Nan03e01f52015-11-16 12:10:08 +0000214 },
Wang Nan361f2b12015-11-16 12:10:05 +0000215};
216
217static int
Wang Nan0bb93492015-11-27 08:47:37 +0000218do_prog_config(const char *key, const char *value,
219 struct perf_probe_event *pev)
Wang Nan361f2b12015-11-16 12:10:05 +0000220{
221 unsigned int i;
222
223 pr_debug("config bpf program: %s=%s\n", key, value);
Wang Nan0bb93492015-11-27 08:47:37 +0000224 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
225 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
226 return bpf_prog_config_terms[i].func(value, pev);
Wang Nan361f2b12015-11-16 12:10:05 +0000227
Wang Nan0bb93492015-11-27 08:47:37 +0000228 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
Wang Nan361f2b12015-11-16 12:10:05 +0000229 key, value);
230
Wang Nan0bb93492015-11-27 08:47:37 +0000231 pr_debug("\nHint: Valid options are:\n");
232 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
233 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
234 bpf_prog_config_terms[i].desc);
Wang Nan361f2b12015-11-16 12:10:05 +0000235 pr_debug("\n");
236
Wang Nan0bb93492015-11-27 08:47:37 +0000237 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
Wang Nan361f2b12015-11-16 12:10:05 +0000238}
239
240static const char *
Wang Nan0bb93492015-11-27 08:47:37 +0000241parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
Wang Nan361f2b12015-11-16 12:10:05 +0000242{
243 char *text = strdup(config_str);
244 char *sep, *line;
245 const char *main_str = NULL;
246 int err = 0;
247
248 if (!text) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200249 pr_debug("Not enough memory: dup config_str failed\n");
Wang Nan361f2b12015-11-16 12:10:05 +0000250 return ERR_PTR(-ENOMEM);
251 }
252
253 line = text;
254 while ((sep = strchr(line, ';'))) {
255 char *equ;
256
257 *sep = '\0';
258 equ = strchr(line, '=');
259 if (!equ) {
260 pr_warning("WARNING: invalid config in BPF object: %s\n",
261 line);
262 pr_warning("\tShould be 'key=value'.\n");
263 goto nextline;
264 }
265 *equ = '\0';
266
Wang Nan0bb93492015-11-27 08:47:37 +0000267 err = do_prog_config(line, equ + 1, pev);
Wang Nan361f2b12015-11-16 12:10:05 +0000268 if (err)
269 break;
270nextline:
271 line = sep + 1;
272 }
273
274 if (!err)
275 main_str = config_str + (line - text);
276 free(text);
277
278 return err ? ERR_PTR(err) : main_str;
279}
280
281static int
Wang Nanb4ee6d42016-07-13 10:44:05 +0000282parse_prog_config(const char *config_str, const char **p_main_str,
283 bool *is_tp, struct perf_probe_event *pev)
Wang Nan361f2b12015-11-16 12:10:05 +0000284{
285 int err;
Wang Nan0bb93492015-11-27 08:47:37 +0000286 const char *main_str = parse_prog_config_kvpair(config_str, pev);
Wang Nan361f2b12015-11-16 12:10:05 +0000287
288 if (IS_ERR(main_str))
289 return PTR_ERR(main_str);
290
Wang Nanb4ee6d42016-07-13 10:44:05 +0000291 *p_main_str = main_str;
292 if (!strchr(main_str, '=')) {
293 /* Is a tracepoint event? */
294 const char *s = strchr(main_str, ':');
295
296 if (!s) {
297 pr_debug("bpf: '%s' is not a valid tracepoint\n",
298 config_str);
299 return -BPF_LOADER_ERRNO__CONFIG;
300 }
301
302 *is_tp = true;
303 return 0;
304 }
305
306 *is_tp = false;
Wang Nan361f2b12015-11-16 12:10:05 +0000307 err = parse_perf_probe_command(main_str, pev);
308 if (err < 0) {
309 pr_debug("bpf: '%s' is not a valid config string\n",
310 config_str);
311 /* parse failed, don't need clear pev. */
312 return -BPF_LOADER_ERRNO__CONFIG;
313 }
314 return 0;
315}
316
317static int
Wang Nanaa3abf32015-10-14 12:41:15 +0000318config_bpf_program(struct bpf_program *prog)
319{
320 struct perf_probe_event *pev = NULL;
321 struct bpf_prog_priv *priv = NULL;
Wang Nanb4ee6d42016-07-13 10:44:05 +0000322 const char *config_str, *main_str;
323 bool is_tp = false;
Wang Nanaa3abf32015-10-14 12:41:15 +0000324 int err;
325
Wang Nan03e01f52015-11-16 12:10:08 +0000326 /* Initialize per-program probing setting */
327 probe_conf.no_inlines = false;
328 probe_conf.force_add = false;
329
Wang Nanaa3abf32015-10-14 12:41:15 +0000330 config_str = bpf_program__title(prog, false);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000331 if (IS_ERR(config_str)) {
Wang Nanaa3abf32015-10-14 12:41:15 +0000332 pr_debug("bpf: unable to get title for program\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000333 return PTR_ERR(config_str);
Wang Nanaa3abf32015-10-14 12:41:15 +0000334 }
335
336 priv = calloc(sizeof(*priv), 1);
337 if (!priv) {
338 pr_debug("bpf: failed to alloc priv\n");
339 return -ENOMEM;
340 }
341 pev = &priv->pev;
342
343 pr_debug("bpf: config program '%s'\n", config_str);
Wang Nanb4ee6d42016-07-13 10:44:05 +0000344 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
Wang Nan361f2b12015-11-16 12:10:05 +0000345 if (err)
Wang Nanaa3abf32015-10-14 12:41:15 +0000346 goto errout;
Wang Nanaa3abf32015-10-14 12:41:15 +0000347
Wang Nanb4ee6d42016-07-13 10:44:05 +0000348 if (is_tp) {
349 char *s = strchr(main_str, ':');
350
351 priv->is_tp = true;
352 priv->sys_name = strndup(main_str, s - main_str);
353 priv->evt_name = strdup(s + 1);
354 goto set_priv;
355 }
356
Wang Nanaa3abf32015-10-14 12:41:15 +0000357 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
358 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
359 config_str, PERF_BPF_PROBE_GROUP);
Wang Nand3e0ce32015-11-06 13:58:09 +0000360 err = -BPF_LOADER_ERRNO__GROUP;
Wang Nanaa3abf32015-10-14 12:41:15 +0000361 goto errout;
362 } else if (!pev->group)
363 pev->group = strdup(PERF_BPF_PROBE_GROUP);
364
365 if (!pev->group) {
366 pr_debug("bpf: strdup failed\n");
367 err = -ENOMEM;
368 goto errout;
369 }
370
371 if (!pev->event) {
Wang Nand3e0ce32015-11-06 13:58:09 +0000372 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
Wang Nanaa3abf32015-10-14 12:41:15 +0000373 config_str);
Wang Nand3e0ce32015-11-06 13:58:09 +0000374 err = -BPF_LOADER_ERRNO__EVENTNAME;
Wang Nanaa3abf32015-10-14 12:41:15 +0000375 goto errout;
376 }
377 pr_debug("bpf: config '%s' is ok\n", config_str);
378
Wang Nanb4ee6d42016-07-13 10:44:05 +0000379set_priv:
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -0300380 err = bpf_program__set_priv(prog, priv, clear_prog_priv);
Wang Nanaa3abf32015-10-14 12:41:15 +0000381 if (err) {
382 pr_debug("Failed to set priv for program '%s'\n", config_str);
383 goto errout;
384 }
385
386 return 0;
387
388errout:
389 if (pev)
390 clear_perf_probe_event(pev);
391 free(priv);
392 return err;
393}
394
395static int bpf__prepare_probe(void)
396{
397 static int err = 0;
398 static bool initialized = false;
399
400 /*
401 * Make err static, so if init failed the first, bpf__prepare_probe()
402 * fails each time without calling init_probe_symbol_maps multiple
403 * times.
404 */
405 if (initialized)
406 return err;
407
408 initialized = true;
409 err = init_probe_symbol_maps(false);
410 if (err < 0)
411 pr_debug("Failed to init_probe_symbol_maps\n");
412 probe_conf.max_probes = MAX_PROBES;
413 return err;
414}
415
Wang Nana08357d2015-11-16 12:10:13 +0000416static int
417preproc_gen_prologue(struct bpf_program *prog, int n,
418 struct bpf_insn *orig_insns, int orig_insns_cnt,
419 struct bpf_prog_prep_result *res)
420{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300421 struct bpf_prog_priv *priv = bpf_program__priv(prog);
Wang Nana08357d2015-11-16 12:10:13 +0000422 struct probe_trace_event *tev;
423 struct perf_probe_event *pev;
Wang Nana08357d2015-11-16 12:10:13 +0000424 struct bpf_insn *buf;
425 size_t prologue_cnt = 0;
Wang Nand35b3282015-11-17 08:32:47 +0000426 int i, err;
Wang Nana08357d2015-11-16 12:10:13 +0000427
Wang Nanb4ee6d42016-07-13 10:44:05 +0000428 if (IS_ERR(priv) || !priv || priv->is_tp)
Wang Nana08357d2015-11-16 12:10:13 +0000429 goto errout;
430
431 pev = &priv->pev;
432
Wang Nand35b3282015-11-17 08:32:47 +0000433 if (n < 0 || n >= priv->nr_types)
Wang Nana08357d2015-11-16 12:10:13 +0000434 goto errout;
435
Wang Nand35b3282015-11-17 08:32:47 +0000436 /* Find a tev belongs to that type */
437 for (i = 0; i < pev->ntevs; i++) {
438 if (priv->type_mapping[i] == n)
439 break;
440 }
441
442 if (i >= pev->ntevs) {
443 pr_debug("Internal error: prologue type %d not found\n", n);
444 return -BPF_LOADER_ERRNO__PROLOGUE;
445 }
446
447 tev = &pev->tevs[i];
Wang Nana08357d2015-11-16 12:10:13 +0000448
449 buf = priv->insns_buf;
450 err = bpf__gen_prologue(tev->args, tev->nargs,
451 buf, &prologue_cnt,
452 BPF_MAXINSNS - orig_insns_cnt);
453 if (err) {
454 const char *title;
455
456 title = bpf_program__title(prog, false);
457 if (!title)
458 title = "[unknown]";
459
460 pr_debug("Failed to generate prologue for program %s\n",
461 title);
462 return err;
463 }
464
465 memcpy(&buf[prologue_cnt], orig_insns,
466 sizeof(struct bpf_insn) * orig_insns_cnt);
467
468 res->new_insn_ptr = buf;
469 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
470 res->pfd = NULL;
471 return 0;
472
473errout:
474 pr_debug("Internal error in preproc_gen_prologue\n");
475 return -BPF_LOADER_ERRNO__PROLOGUE;
476}
477
Wang Nand35b3282015-11-17 08:32:47 +0000478/*
479 * compare_tev_args is reflexive, transitive and antisymmetric.
480 * I can proof it but this margin is too narrow to contain.
481 */
482static int compare_tev_args(const void *ptev1, const void *ptev2)
483{
484 int i, ret;
485 const struct probe_trace_event *tev1 =
486 *(const struct probe_trace_event **)ptev1;
487 const struct probe_trace_event *tev2 =
488 *(const struct probe_trace_event **)ptev2;
489
490 ret = tev2->nargs - tev1->nargs;
491 if (ret)
492 return ret;
493
494 for (i = 0; i < tev1->nargs; i++) {
495 struct probe_trace_arg *arg1, *arg2;
496 struct probe_trace_arg_ref *ref1, *ref2;
497
498 arg1 = &tev1->args[i];
499 arg2 = &tev2->args[i];
500
501 ret = strcmp(arg1->value, arg2->value);
502 if (ret)
503 return ret;
504
505 ref1 = arg1->ref;
506 ref2 = arg2->ref;
507
508 while (ref1 && ref2) {
509 ret = ref2->offset - ref1->offset;
510 if (ret)
511 return ret;
512
513 ref1 = ref1->next;
514 ref2 = ref2->next;
515 }
516
517 if (ref1 || ref2)
518 return ref2 ? 1 : -1;
519 }
520
521 return 0;
522}
523
524/*
525 * Assign a type number to each tevs in a pev.
526 * mapping is an array with same slots as tevs in that pev.
527 * nr_types will be set to number of types.
528 */
529static int map_prologue(struct perf_probe_event *pev, int *mapping,
530 int *nr_types)
531{
532 int i, type = 0;
533 struct probe_trace_event **ptevs;
534
535 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
536
537 ptevs = malloc(array_sz);
538 if (!ptevs) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200539 pr_debug("Not enough memory: alloc ptevs failed\n");
Wang Nand35b3282015-11-17 08:32:47 +0000540 return -ENOMEM;
541 }
542
543 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
544 for (i = 0; i < pev->ntevs; i++)
545 ptevs[i] = &pev->tevs[i];
546
547 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
548 compare_tev_args);
549
550 for (i = 0; i < pev->ntevs; i++) {
551 int n;
552
553 n = ptevs[i] - pev->tevs;
554 if (i == 0) {
555 mapping[n] = type;
556 pr_debug("mapping[%d]=%d\n", n, type);
557 continue;
558 }
559
560 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
561 mapping[n] = type;
562 else
563 mapping[n] = ++type;
564
565 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
566 }
567 free(ptevs);
568 *nr_types = type + 1;
569
570 return 0;
571}
572
Wang Nana08357d2015-11-16 12:10:13 +0000573static int hook_load_preprocessor(struct bpf_program *prog)
574{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300575 struct bpf_prog_priv *priv = bpf_program__priv(prog);
Wang Nana08357d2015-11-16 12:10:13 +0000576 struct perf_probe_event *pev;
Wang Nana08357d2015-11-16 12:10:13 +0000577 bool need_prologue = false;
578 int err, i;
579
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300580 if (IS_ERR(priv) || !priv) {
Wang Nana08357d2015-11-16 12:10:13 +0000581 pr_debug("Internal error when hook preprocessor\n");
582 return -BPF_LOADER_ERRNO__INTERNAL;
583 }
584
Wang Nanb4ee6d42016-07-13 10:44:05 +0000585 if (priv->is_tp) {
586 priv->need_prologue = false;
587 return 0;
588 }
589
Wang Nana08357d2015-11-16 12:10:13 +0000590 pev = &priv->pev;
591 for (i = 0; i < pev->ntevs; i++) {
592 struct probe_trace_event *tev = &pev->tevs[i];
593
594 if (tev->nargs > 0) {
595 need_prologue = true;
596 break;
597 }
598 }
599
600 /*
601 * Since all tevs don't have argument, we don't need generate
602 * prologue.
603 */
604 if (!need_prologue) {
605 priv->need_prologue = false;
606 return 0;
607 }
608
609 priv->need_prologue = true;
610 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
611 if (!priv->insns_buf) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200612 pr_debug("Not enough memory: alloc insns_buf failed\n");
Wang Nana08357d2015-11-16 12:10:13 +0000613 return -ENOMEM;
614 }
615
Wang Nand35b3282015-11-17 08:32:47 +0000616 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
617 if (!priv->type_mapping) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200618 pr_debug("Not enough memory: alloc type_mapping failed\n");
Wang Nand35b3282015-11-17 08:32:47 +0000619 return -ENOMEM;
620 }
621 memset(priv->type_mapping, -1,
622 sizeof(int) * pev->ntevs);
623
624 err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
625 if (err)
626 return err;
627
628 err = bpf_program__set_prep(prog, priv->nr_types,
Wang Nana08357d2015-11-16 12:10:13 +0000629 preproc_gen_prologue);
630 return err;
631}
632
Wang Nanaa3abf32015-10-14 12:41:15 +0000633int bpf__probe(struct bpf_object *obj)
634{
635 int err = 0;
636 struct bpf_program *prog;
637 struct bpf_prog_priv *priv;
638 struct perf_probe_event *pev;
639
640 err = bpf__prepare_probe();
641 if (err) {
642 pr_debug("bpf__prepare_probe failed\n");
643 return err;
644 }
645
646 bpf_object__for_each_program(prog, obj) {
647 err = config_bpf_program(prog);
648 if (err)
649 goto out;
650
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300651 priv = bpf_program__priv(prog);
652 if (IS_ERR(priv) || !priv) {
653 err = PTR_ERR(priv);
Wang Nanaa3abf32015-10-14 12:41:15 +0000654 goto out;
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300655 }
Wang Nanb4ee6d42016-07-13 10:44:05 +0000656
657 if (priv->is_tp) {
658 bpf_program__set_tracepoint(prog);
659 continue;
660 }
661
662 bpf_program__set_kprobe(prog);
Wang Nanaa3abf32015-10-14 12:41:15 +0000663 pev = &priv->pev;
664
665 err = convert_perf_probe_events(pev, 1);
666 if (err < 0) {
He Kuang4d416432017-02-07 07:34:12 +0000667 pr_debug("bpf_probe: failed to convert perf probe events\n");
Wang Nanaa3abf32015-10-14 12:41:15 +0000668 goto out;
669 }
670
671 err = apply_perf_probe_events(pev, 1);
672 if (err < 0) {
He Kuang4d416432017-02-07 07:34:12 +0000673 pr_debug("bpf_probe: failed to apply perf probe events\n");
Wang Nanaa3abf32015-10-14 12:41:15 +0000674 goto out;
675 }
Wang Nana08357d2015-11-16 12:10:13 +0000676
677 /*
678 * After probing, let's consider prologue, which
679 * adds program fetcher to BPF programs.
680 *
681 * hook_load_preprocessorr() hooks pre-processor
682 * to bpf_program, let it generate prologue
683 * dynamically during loading.
684 */
685 err = hook_load_preprocessor(prog);
686 if (err)
687 goto out;
Wang Nanaa3abf32015-10-14 12:41:15 +0000688 }
689out:
690 return err < 0 ? err : 0;
691}
692
693#define EVENTS_WRITE_BUFSIZE 4096
694int bpf__unprobe(struct bpf_object *obj)
695{
696 int err, ret = 0;
697 struct bpf_program *prog;
Wang Nanaa3abf32015-10-14 12:41:15 +0000698
699 bpf_object__for_each_program(prog, obj) {
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300700 struct bpf_prog_priv *priv = bpf_program__priv(prog);
Wang Nanaa3abf32015-10-14 12:41:15 +0000701 int i;
702
Wang Nanb4ee6d42016-07-13 10:44:05 +0000703 if (IS_ERR(priv) || !priv || priv->is_tp)
Wang Nanaa3abf32015-10-14 12:41:15 +0000704 continue;
705
706 for (i = 0; i < priv->pev.ntevs; i++) {
707 struct probe_trace_event *tev = &priv->pev.tevs[i];
708 char name_buf[EVENTS_WRITE_BUFSIZE];
709 struct strfilter *delfilter;
710
711 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
712 "%s:%s", tev->group, tev->event);
713 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
714
715 delfilter = strfilter__new(name_buf, NULL);
716 if (!delfilter) {
717 pr_debug("Failed to create filter for unprobing\n");
718 ret = -ENOMEM;
719 continue;
720 }
721
722 err = del_perf_probe_events(delfilter);
723 strfilter__delete(delfilter);
724 if (err) {
725 pr_debug("Failed to delete %s\n", name_buf);
726 ret = err;
727 continue;
728 }
729 }
730 }
731 return ret;
732}
733
Wang Nan1e5e3ee2015-10-14 12:41:16 +0000734int bpf__load(struct bpf_object *obj)
735{
736 int err;
737
738 err = bpf_object__load(obj);
739 if (err) {
Arnaldo Carvalho de Melo739e2ed2018-07-31 11:58:57 -0300740 char bf[128];
741 libbpf_strerror(err, bf, sizeof(bf));
742 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
Wang Nan1e5e3ee2015-10-14 12:41:16 +0000743 return err;
744 }
745 return 0;
746}
747
Wang Nancd102d72016-07-13 10:44:04 +0000748int bpf__foreach_event(struct bpf_object *obj,
749 bpf_prog_iter_callback_t func,
750 void *arg)
Wang Nan4edf30e2015-10-14 12:41:17 +0000751{
752 struct bpf_program *prog;
753 int err;
754
755 bpf_object__for_each_program(prog, obj) {
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300756 struct bpf_prog_priv *priv = bpf_program__priv(prog);
Wang Nan4edf30e2015-10-14 12:41:17 +0000757 struct probe_trace_event *tev;
758 struct perf_probe_event *pev;
Wang Nan4edf30e2015-10-14 12:41:17 +0000759 int i, fd;
760
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -0300761 if (IS_ERR(priv) || !priv) {
Wang Nan4edf30e2015-10-14 12:41:17 +0000762 pr_debug("bpf: failed to get private field\n");
Wang Nand3e0ce32015-11-06 13:58:09 +0000763 return -BPF_LOADER_ERRNO__INTERNAL;
Wang Nan4edf30e2015-10-14 12:41:17 +0000764 }
765
Wang Nanb4ee6d42016-07-13 10:44:05 +0000766 if (priv->is_tp) {
767 fd = bpf_program__fd(prog);
Arnaldo Carvalho de Meloaf4a0992019-07-15 16:22:57 -0300768 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
Wang Nanb4ee6d42016-07-13 10:44:05 +0000769 if (err) {
770 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
771 return err;
772 }
773 continue;
774 }
775
Wang Nan4edf30e2015-10-14 12:41:17 +0000776 pev = &priv->pev;
777 for (i = 0; i < pev->ntevs; i++) {
778 tev = &pev->tevs[i];
779
Wang Nand35b3282015-11-17 08:32:47 +0000780 if (priv->need_prologue) {
781 int type = priv->type_mapping[i];
782
783 fd = bpf_program__nth_fd(prog, type);
784 } else {
Wang Nana08357d2015-11-16 12:10:13 +0000785 fd = bpf_program__fd(prog);
Wang Nand35b3282015-11-17 08:32:47 +0000786 }
Wang Nana08357d2015-11-16 12:10:13 +0000787
Wang Nan4edf30e2015-10-14 12:41:17 +0000788 if (fd < 0) {
789 pr_debug("bpf: failed to get file descriptor\n");
790 return fd;
791 }
792
Arnaldo Carvalho de Meloaf4a0992019-07-15 16:22:57 -0300793 err = (*func)(tev->group, tev->event, fd, obj, arg);
Wang Nan4edf30e2015-10-14 12:41:17 +0000794 if (err) {
795 pr_debug("bpf: call back failed, stop iterate\n");
796 return err;
797 }
798 }
799 }
800 return 0;
801}
802
Wang Nan066dacb2016-02-22 09:10:30 +0000803enum bpf_map_op_type {
804 BPF_MAP_OP_SET_VALUE,
Wang Nan7630b3e2016-02-22 09:10:33 +0000805 BPF_MAP_OP_SET_EVSEL,
Wang Nan066dacb2016-02-22 09:10:30 +0000806};
807
808enum bpf_map_key_type {
809 BPF_MAP_KEY_ALL,
Wang Nan2d055bf2016-02-22 09:10:34 +0000810 BPF_MAP_KEY_RANGES,
Wang Nan066dacb2016-02-22 09:10:30 +0000811};
812
813struct bpf_map_op {
814 struct list_head list;
815 enum bpf_map_op_type op_type;
816 enum bpf_map_key_type key_type;
817 union {
Wang Nan2d055bf2016-02-22 09:10:34 +0000818 struct parse_events_array array;
819 } k;
820 union {
Wang Nan066dacb2016-02-22 09:10:30 +0000821 u64 value;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200822 struct evsel *evsel;
Wang Nan066dacb2016-02-22 09:10:30 +0000823 } v;
824};
825
826struct bpf_map_priv {
827 struct list_head ops_list;
828};
829
830static void
831bpf_map_op__delete(struct bpf_map_op *op)
832{
833 if (!list_empty(&op->list))
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -0300834 list_del_init(&op->list);
Wang Nan2d055bf2016-02-22 09:10:34 +0000835 if (op->key_type == BPF_MAP_KEY_RANGES)
836 parse_events__clear_array(&op->k.array);
Wang Nan066dacb2016-02-22 09:10:30 +0000837 free(op);
838}
839
840static void
841bpf_map_priv__purge(struct bpf_map_priv *priv)
842{
843 struct bpf_map_op *pos, *n;
844
845 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
846 list_del_init(&pos->list);
847 bpf_map_op__delete(pos);
848 }
849}
850
851static void
852bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
853 void *_priv)
854{
855 struct bpf_map_priv *priv = _priv;
856
857 bpf_map_priv__purge(priv);
858 free(priv);
859}
860
Wang Nan2d055bf2016-02-22 09:10:34 +0000861static int
862bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
863{
864 op->key_type = BPF_MAP_KEY_ALL;
865 if (!term)
866 return 0;
867
868 if (term->array.nr_ranges) {
869 size_t memsz = term->array.nr_ranges *
870 sizeof(op->k.array.ranges[0]);
871
872 op->k.array.ranges = memdup(term->array.ranges, memsz);
873 if (!op->k.array.ranges) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200874 pr_debug("Not enough memory to alloc indices for map\n");
Wang Nan2d055bf2016-02-22 09:10:34 +0000875 return -ENOMEM;
876 }
877 op->key_type = BPF_MAP_KEY_RANGES;
878 op->k.array.nr_ranges = term->array.nr_ranges;
879 }
880 return 0;
881}
882
Wang Nan066dacb2016-02-22 09:10:30 +0000883static struct bpf_map_op *
Wang Nan2d055bf2016-02-22 09:10:34 +0000884bpf_map_op__new(struct parse_events_term *term)
Wang Nan066dacb2016-02-22 09:10:30 +0000885{
886 struct bpf_map_op *op;
Wang Nan2d055bf2016-02-22 09:10:34 +0000887 int err;
Wang Nan066dacb2016-02-22 09:10:30 +0000888
889 op = zalloc(sizeof(*op));
890 if (!op) {
891 pr_debug("Failed to alloc bpf_map_op\n");
892 return ERR_PTR(-ENOMEM);
893 }
894 INIT_LIST_HEAD(&op->list);
895
Wang Nan2d055bf2016-02-22 09:10:34 +0000896 err = bpf_map_op_setkey(op, term);
897 if (err) {
898 free(op);
899 return ERR_PTR(err);
900 }
Wang Nan066dacb2016-02-22 09:10:30 +0000901 return op;
902}
903
Wang Nand7888572016-04-08 15:07:24 +0000904static struct bpf_map_op *
905bpf_map_op__clone(struct bpf_map_op *op)
906{
907 struct bpf_map_op *newop;
908
909 newop = memdup(op, sizeof(*op));
910 if (!newop) {
911 pr_debug("Failed to alloc bpf_map_op\n");
912 return NULL;
913 }
914
915 INIT_LIST_HEAD(&newop->list);
916 if (op->key_type == BPF_MAP_KEY_RANGES) {
917 size_t memsz = op->k.array.nr_ranges *
918 sizeof(op->k.array.ranges[0]);
919
920 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
921 if (!newop->k.array.ranges) {
922 pr_debug("Failed to alloc indices for map\n");
923 free(newop);
924 return NULL;
925 }
926 }
927
928 return newop;
929}
930
931static struct bpf_map_priv *
932bpf_map_priv__clone(struct bpf_map_priv *priv)
933{
934 struct bpf_map_priv *newpriv;
935 struct bpf_map_op *pos, *newop;
936
937 newpriv = zalloc(sizeof(*newpriv));
938 if (!newpriv) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200939 pr_debug("Not enough memory to alloc map private\n");
Wang Nand7888572016-04-08 15:07:24 +0000940 return NULL;
941 }
942 INIT_LIST_HEAD(&newpriv->ops_list);
943
944 list_for_each_entry(pos, &priv->ops_list, list) {
945 newop = bpf_map_op__clone(pos);
946 if (!newop) {
947 bpf_map_priv__purge(newpriv);
948 return NULL;
949 }
950 list_add_tail(&newop->list, &newpriv->ops_list);
951 }
952
953 return newpriv;
954}
955
Wang Nan066dacb2016-02-22 09:10:30 +0000956static int
957bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
958{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -0300959 const char *map_name = bpf_map__name(map);
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -0300960 struct bpf_map_priv *priv = bpf_map__priv(map);
Wang Nan066dacb2016-02-22 09:10:30 +0000961
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -0300962 if (IS_ERR(priv)) {
Wang Nan066dacb2016-02-22 09:10:30 +0000963 pr_debug("Failed to get private from map %s\n", map_name);
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -0300964 return PTR_ERR(priv);
Wang Nan066dacb2016-02-22 09:10:30 +0000965 }
966
967 if (!priv) {
968 priv = zalloc(sizeof(*priv));
969 if (!priv) {
Alexander Alemayhu042cfb52016-10-13 18:18:11 +0200970 pr_debug("Not enough memory to alloc map private\n");
Wang Nan066dacb2016-02-22 09:10:30 +0000971 return -ENOMEM;
972 }
973 INIT_LIST_HEAD(&priv->ops_list);
974
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -0300975 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
Wang Nan066dacb2016-02-22 09:10:30 +0000976 free(priv);
977 return -BPF_LOADER_ERRNO__INTERNAL;
978 }
979 }
980
981 list_add_tail(&op->list, &priv->ops_list);
982 return 0;
983}
984
Wang Nan7630b3e2016-02-22 09:10:33 +0000985static struct bpf_map_op *
Wang Nan2d055bf2016-02-22 09:10:34 +0000986bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
Wang Nan7630b3e2016-02-22 09:10:33 +0000987{
988 struct bpf_map_op *op;
989 int err;
990
Wang Nan2d055bf2016-02-22 09:10:34 +0000991 op = bpf_map_op__new(term);
Wang Nan7630b3e2016-02-22 09:10:33 +0000992 if (IS_ERR(op))
993 return op;
994
995 err = bpf_map__add_op(map, op);
996 if (err) {
997 bpf_map_op__delete(op);
998 return ERR_PTR(err);
999 }
1000 return op;
1001}
1002
Wang Nan066dacb2016-02-22 09:10:30 +00001003static int
1004__bpf_map__config_value(struct bpf_map *map,
1005 struct parse_events_term *term)
1006{
Wang Nan066dacb2016-02-22 09:10:30 +00001007 struct bpf_map_op *op;
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03001008 const char *map_name = bpf_map__name(map);
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001009 const struct bpf_map_def *def = bpf_map__def(map);
Wang Nan066dacb2016-02-22 09:10:30 +00001010
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001011 if (IS_ERR(def)) {
Wang Nan066dacb2016-02-22 09:10:30 +00001012 pr_debug("Unable to get map definition from '%s'\n",
1013 map_name);
1014 return -BPF_LOADER_ERRNO__INTERNAL;
1015 }
1016
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001017 if (def->type != BPF_MAP_TYPE_ARRAY) {
Wang Nan066dacb2016-02-22 09:10:30 +00001018 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1019 map_name);
1020 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1021 }
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001022 if (def->key_size < sizeof(unsigned int)) {
Wang Nan066dacb2016-02-22 09:10:30 +00001023 pr_debug("Map %s has incorrect key size\n", map_name);
1024 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1025 }
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001026 switch (def->value_size) {
Wang Nan066dacb2016-02-22 09:10:30 +00001027 case 1:
1028 case 2:
1029 case 4:
1030 case 8:
1031 break;
1032 default:
1033 pr_debug("Map %s has incorrect value size\n", map_name);
1034 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1035 }
1036
Wang Nan2d055bf2016-02-22 09:10:34 +00001037 op = bpf_map__add_newop(map, term);
Wang Nan066dacb2016-02-22 09:10:30 +00001038 if (IS_ERR(op))
1039 return PTR_ERR(op);
1040 op->op_type = BPF_MAP_OP_SET_VALUE;
1041 op->v.value = term->val.num;
Wang Nan7630b3e2016-02-22 09:10:33 +00001042 return 0;
Wang Nan066dacb2016-02-22 09:10:30 +00001043}
1044
1045static int
1046bpf_map__config_value(struct bpf_map *map,
1047 struct parse_events_term *term,
Jiri Olsa63503db2019-07-21 13:23:52 +02001048 struct evlist *evlist __maybe_unused)
Wang Nan066dacb2016-02-22 09:10:30 +00001049{
1050 if (!term->err_val) {
1051 pr_debug("Config value not set\n");
1052 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1053 }
1054
1055 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001056 pr_debug("ERROR: wrong value type for 'value'\n");
Wang Nan066dacb2016-02-22 09:10:30 +00001057 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1058 }
1059
1060 return __bpf_map__config_value(map, term);
1061}
1062
Wang Nan7630b3e2016-02-22 09:10:33 +00001063static int
1064__bpf_map__config_event(struct bpf_map *map,
1065 struct parse_events_term *term,
Jiri Olsa63503db2019-07-21 13:23:52 +02001066 struct evlist *evlist)
Wang Nan7630b3e2016-02-22 09:10:33 +00001067{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001068 struct evsel *evsel;
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001069 const struct bpf_map_def *def;
Wang Nan7630b3e2016-02-22 09:10:33 +00001070 struct bpf_map_op *op;
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03001071 const char *map_name = bpf_map__name(map);
Wang Nan7630b3e2016-02-22 09:10:33 +00001072
Wang Nan7630b3e2016-02-22 09:10:33 +00001073 evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
1074 if (!evsel) {
1075 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1076 map_name, term->val.str);
1077 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1078 }
1079
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001080 def = bpf_map__def(map);
1081 if (IS_ERR(def)) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001082 pr_debug("Unable to get map definition from '%s'\n",
1083 map_name);
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001084 return PTR_ERR(def);
Wang Nan7630b3e2016-02-22 09:10:33 +00001085 }
1086
1087 /*
1088 * No need to check key_size and value_size:
1089 * kernel has already checked them.
1090 */
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001091 if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001092 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1093 map_name);
1094 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1095 }
1096
Wang Nan2d055bf2016-02-22 09:10:34 +00001097 op = bpf_map__add_newop(map, term);
Wang Nan7630b3e2016-02-22 09:10:33 +00001098 if (IS_ERR(op))
1099 return PTR_ERR(op);
1100 op->op_type = BPF_MAP_OP_SET_EVSEL;
1101 op->v.evsel = evsel;
1102 return 0;
1103}
1104
1105static int
1106bpf_map__config_event(struct bpf_map *map,
1107 struct parse_events_term *term,
Jiri Olsa63503db2019-07-21 13:23:52 +02001108 struct evlist *evlist)
Wang Nan7630b3e2016-02-22 09:10:33 +00001109{
1110 if (!term->err_val) {
1111 pr_debug("Config value not set\n");
1112 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1113 }
1114
1115 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1116 pr_debug("ERROR: wrong value type for 'event'\n");
1117 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1118 }
1119
1120 return __bpf_map__config_event(map, term, evlist);
1121}
1122
Wang Nan066dacb2016-02-22 09:10:30 +00001123struct bpf_obj_config__map_func {
1124 const char *config_opt;
1125 int (*config_func)(struct bpf_map *, struct parse_events_term *,
Jiri Olsa63503db2019-07-21 13:23:52 +02001126 struct evlist *);
Wang Nan066dacb2016-02-22 09:10:30 +00001127};
1128
1129struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1130 {"value", bpf_map__config_value},
Wang Nan7630b3e2016-02-22 09:10:33 +00001131 {"event", bpf_map__config_event},
Wang Nan066dacb2016-02-22 09:10:30 +00001132};
1133
1134static int
Wang Nan2d055bf2016-02-22 09:10:34 +00001135config_map_indices_range_check(struct parse_events_term *term,
1136 struct bpf_map *map,
1137 const char *map_name)
1138{
1139 struct parse_events_array *array = &term->array;
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001140 const struct bpf_map_def *def;
Wang Nan2d055bf2016-02-22 09:10:34 +00001141 unsigned int i;
Wang Nan2d055bf2016-02-22 09:10:34 +00001142
1143 if (!array->nr_ranges)
1144 return 0;
1145 if (!array->ranges) {
1146 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1147 map_name, (int)array->nr_ranges);
1148 return -BPF_LOADER_ERRNO__INTERNAL;
1149 }
1150
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001151 def = bpf_map__def(map);
1152 if (IS_ERR(def)) {
Wang Nan2d055bf2016-02-22 09:10:34 +00001153 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1154 map_name);
1155 return -BPF_LOADER_ERRNO__INTERNAL;
1156 }
1157
1158 for (i = 0; i < array->nr_ranges; i++) {
1159 unsigned int start = array->ranges[i].start;
1160 size_t length = array->ranges[i].length;
1161 unsigned int idx = start + length - 1;
1162
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001163 if (idx >= def->max_entries) {
Wang Nan2d055bf2016-02-22 09:10:34 +00001164 pr_debug("ERROR: index %d too large\n", idx);
1165 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1166 }
1167 }
1168 return 0;
1169}
1170
1171static int
Wang Nan066dacb2016-02-22 09:10:30 +00001172bpf__obj_config_map(struct bpf_object *obj,
1173 struct parse_events_term *term,
Jiri Olsa63503db2019-07-21 13:23:52 +02001174 struct evlist *evlist,
Wang Nan066dacb2016-02-22 09:10:30 +00001175 int *key_scan_pos)
1176{
1177 /* key is "map:<mapname>.<config opt>" */
1178 char *map_name = strdup(term->config + sizeof("map:") - 1);
1179 struct bpf_map *map;
1180 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1181 char *map_opt;
1182 size_t i;
1183
1184 if (!map_name)
1185 return -ENOMEM;
1186
1187 map_opt = strchr(map_name, '.');
1188 if (!map_opt) {
1189 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1190 goto out;
1191 }
1192
1193 *map_opt++ = '\0';
1194 if (*map_opt == '\0') {
1195 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1196 goto out;
1197 }
1198
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001199 map = bpf_object__find_map_by_name(obj, map_name);
Wang Nan066dacb2016-02-22 09:10:30 +00001200 if (!map) {
1201 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1202 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1203 goto out;
1204 }
1205
Wang Nan2d055bf2016-02-22 09:10:34 +00001206 *key_scan_pos += strlen(map_opt);
1207 err = config_map_indices_range_check(term, map, map_name);
1208 if (err)
1209 goto out;
1210 *key_scan_pos -= strlen(map_opt);
1211
Wang Nan066dacb2016-02-22 09:10:30 +00001212 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1213 struct bpf_obj_config__map_func *func =
1214 &bpf_obj_config__map_funcs[i];
1215
1216 if (strcmp(map_opt, func->config_opt) == 0) {
1217 err = func->config_func(map, term, evlist);
1218 goto out;
1219 }
1220 }
1221
1222 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1223 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1224out:
1225 free(map_name);
1226 if (!err)
1227 key_scan_pos += strlen(map_opt);
1228 return err;
1229}
1230
1231int bpf__config_obj(struct bpf_object *obj,
1232 struct parse_events_term *term,
Jiri Olsa63503db2019-07-21 13:23:52 +02001233 struct evlist *evlist,
Wang Nan066dacb2016-02-22 09:10:30 +00001234 int *error_pos)
1235{
1236 int key_scan_pos = 0;
1237 int err;
1238
1239 if (!obj || !term || !term->config)
1240 return -EINVAL;
1241
Arnaldo Carvalho de Melo8e99b6d2017-07-20 15:27:39 -03001242 if (strstarts(term->config, "map:")) {
Wang Nan066dacb2016-02-22 09:10:30 +00001243 key_scan_pos = sizeof("map:") - 1;
1244 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1245 goto out;
1246 }
1247 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1248out:
1249 if (error_pos)
1250 *error_pos = key_scan_pos;
1251 return err;
1252
1253}
1254
Wang Nan8690a2a2016-02-22 09:10:32 +00001255typedef int (*map_config_func_t)(const char *name, int map_fd,
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001256 const struct bpf_map_def *pdef,
Wang Nan8690a2a2016-02-22 09:10:32 +00001257 struct bpf_map_op *op,
1258 void *pkey, void *arg);
1259
1260static int
1261foreach_key_array_all(map_config_func_t func,
1262 void *arg, const char *name,
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001263 int map_fd, const struct bpf_map_def *pdef,
Wang Nan8690a2a2016-02-22 09:10:32 +00001264 struct bpf_map_op *op)
1265{
1266 unsigned int i;
1267 int err;
1268
1269 for (i = 0; i < pdef->max_entries; i++) {
1270 err = func(name, map_fd, pdef, op, &i, arg);
1271 if (err) {
1272 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1273 name, i);
1274 return err;
1275 }
1276 }
1277 return 0;
1278}
1279
1280static int
Wang Nan2d055bf2016-02-22 09:10:34 +00001281foreach_key_array_ranges(map_config_func_t func, void *arg,
1282 const char *name, int map_fd,
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001283 const struct bpf_map_def *pdef,
Wang Nan2d055bf2016-02-22 09:10:34 +00001284 struct bpf_map_op *op)
1285{
1286 unsigned int i, j;
1287 int err;
1288
1289 for (i = 0; i < op->k.array.nr_ranges; i++) {
1290 unsigned int start = op->k.array.ranges[i].start;
1291 size_t length = op->k.array.ranges[i].length;
1292
1293 for (j = 0; j < length; j++) {
1294 unsigned int idx = start + j;
1295
1296 err = func(name, map_fd, pdef, op, &idx, arg);
1297 if (err) {
1298 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1299 name, idx);
1300 return err;
1301 }
1302 }
1303 }
1304 return 0;
1305}
1306
1307static int
Wang Nan8690a2a2016-02-22 09:10:32 +00001308bpf_map_config_foreach_key(struct bpf_map *map,
1309 map_config_func_t func,
1310 void *arg)
1311{
1312 int err, map_fd;
Wang Nan8690a2a2016-02-22 09:10:32 +00001313 struct bpf_map_op *op;
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001314 const struct bpf_map_def *def;
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03001315 const char *name = bpf_map__name(map);
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03001316 struct bpf_map_priv *priv = bpf_map__priv(map);
Wang Nan8690a2a2016-02-22 09:10:32 +00001317
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03001318 if (IS_ERR(priv)) {
Wang Nan8690a2a2016-02-22 09:10:32 +00001319 pr_debug("ERROR: failed to get private from map %s\n", name);
1320 return -BPF_LOADER_ERRNO__INTERNAL;
1321 }
1322 if (!priv || list_empty(&priv->ops_list)) {
1323 pr_debug("INFO: nothing to config for map %s\n", name);
1324 return 0;
1325 }
1326
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001327 def = bpf_map__def(map);
1328 if (IS_ERR(def)) {
Wang Nan8690a2a2016-02-22 09:10:32 +00001329 pr_debug("ERROR: failed to get definition from map %s\n", name);
1330 return -BPF_LOADER_ERRNO__INTERNAL;
1331 }
Arnaldo Carvalho de Melo6e009e652016-06-03 12:15:52 -03001332 map_fd = bpf_map__fd(map);
Wang Nan8690a2a2016-02-22 09:10:32 +00001333 if (map_fd < 0) {
1334 pr_debug("ERROR: failed to get fd from map %s\n", name);
1335 return map_fd;
1336 }
1337
1338 list_for_each_entry(op, &priv->ops_list, list) {
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001339 switch (def->type) {
Wang Nan8690a2a2016-02-22 09:10:32 +00001340 case BPF_MAP_TYPE_ARRAY:
Wang Nan7630b3e2016-02-22 09:10:33 +00001341 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
Wang Nan8690a2a2016-02-22 09:10:32 +00001342 switch (op->key_type) {
1343 case BPF_MAP_KEY_ALL:
1344 err = foreach_key_array_all(func, arg, name,
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001345 map_fd, def, op);
Wang Nan2d055bf2016-02-22 09:10:34 +00001346 break;
1347 case BPF_MAP_KEY_RANGES:
1348 err = foreach_key_array_ranges(func, arg, name,
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001349 map_fd, def,
Wang Nan2d055bf2016-02-22 09:10:34 +00001350 op);
Wang Nan8690a2a2016-02-22 09:10:32 +00001351 break;
1352 default:
1353 pr_debug("ERROR: keytype for map '%s' invalid\n",
1354 name);
1355 return -BPF_LOADER_ERRNO__INTERNAL;
1356 }
Wang Nan2d055bf2016-02-22 09:10:34 +00001357 if (err)
1358 return err;
Wang Nan8690a2a2016-02-22 09:10:32 +00001359 break;
1360 default:
1361 pr_debug("ERROR: type of '%s' incorrect\n", name);
1362 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1363 }
1364 }
1365
1366 return 0;
1367}
1368
1369static int
1370apply_config_value_for_key(int map_fd, void *pkey,
1371 size_t val_size, u64 val)
1372{
1373 int err = 0;
1374
1375 switch (val_size) {
1376 case 1: {
1377 u8 _val = (u8)(val);
1378 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1379 break;
1380 }
1381 case 2: {
1382 u16 _val = (u16)(val);
1383 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1384 break;
1385 }
1386 case 4: {
1387 u32 _val = (u32)(val);
1388 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1389 break;
1390 }
1391 case 8: {
1392 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1393 break;
1394 }
1395 default:
1396 pr_debug("ERROR: invalid value size\n");
1397 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1398 }
1399 if (err && errno)
1400 err = -errno;
1401 return err;
1402}
1403
1404static int
Wang Nan7630b3e2016-02-22 09:10:33 +00001405apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001406 struct evsel *evsel)
Wang Nan7630b3e2016-02-22 09:10:33 +00001407{
Jiri Olsa9dfcb752019-07-21 13:24:45 +02001408 struct xyarray *xy = evsel->core.fd;
Wang Nan7630b3e2016-02-22 09:10:33 +00001409 struct perf_event_attr *attr;
1410 unsigned int key, events;
1411 bool check_pass = false;
1412 int *evt_fd;
1413 int err;
1414
1415 if (!xy) {
1416 pr_debug("ERROR: evsel not ready for map %s\n", name);
1417 return -BPF_LOADER_ERRNO__INTERNAL;
1418 }
1419
1420 if (xy->row_size / xy->entry_size != 1) {
1421 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1422 name);
1423 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1424 }
1425
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001426 attr = &evsel->core.attr;
Wang Nan7630b3e2016-02-22 09:10:33 +00001427 if (attr->inherit) {
1428 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1429 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1430 }
1431
Wang Nan03e0a7d2016-02-22 09:10:37 +00001432 if (perf_evsel__is_bpf_output(evsel))
1433 check_pass = true;
Wang Nan7630b3e2016-02-22 09:10:33 +00001434 if (attr->type == PERF_TYPE_RAW)
1435 check_pass = true;
1436 if (attr->type == PERF_TYPE_HARDWARE)
1437 check_pass = true;
Wang Nan7630b3e2016-02-22 09:10:33 +00001438 if (!check_pass) {
1439 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1440 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1441 }
1442
1443 events = xy->entries / (xy->row_size / xy->entry_size);
1444 key = *((unsigned int *)pkey);
1445 if (key >= events) {
1446 pr_debug("ERROR: there is no event %d for map %s\n",
1447 key, name);
1448 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1449 }
1450 evt_fd = xyarray__entry(xy, key, 0);
1451 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1452 if (err && errno)
1453 err = -errno;
1454 return err;
1455}
1456
1457static int
Wang Nan8690a2a2016-02-22 09:10:32 +00001458apply_obj_config_map_for_key(const char *name, int map_fd,
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03001459 const struct bpf_map_def *pdef,
Wang Nan8690a2a2016-02-22 09:10:32 +00001460 struct bpf_map_op *op,
1461 void *pkey, void *arg __maybe_unused)
1462{
1463 int err;
1464
1465 switch (op->op_type) {
1466 case BPF_MAP_OP_SET_VALUE:
1467 err = apply_config_value_for_key(map_fd, pkey,
1468 pdef->value_size,
1469 op->v.value);
1470 break;
Wang Nan7630b3e2016-02-22 09:10:33 +00001471 case BPF_MAP_OP_SET_EVSEL:
1472 err = apply_config_evsel_for_key(name, map_fd, pkey,
1473 op->v.evsel);
1474 break;
Wang Nan8690a2a2016-02-22 09:10:32 +00001475 default:
1476 pr_debug("ERROR: unknown value type for '%s'\n", name);
1477 err = -BPF_LOADER_ERRNO__INTERNAL;
1478 }
1479 return err;
1480}
1481
1482static int
1483apply_obj_config_map(struct bpf_map *map)
1484{
1485 return bpf_map_config_foreach_key(map,
1486 apply_obj_config_map_for_key,
1487 NULL);
1488}
1489
1490static int
1491apply_obj_config_object(struct bpf_object *obj)
1492{
1493 struct bpf_map *map;
1494 int err;
1495
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08001496 bpf_object__for_each_map(map, obj) {
Wang Nan8690a2a2016-02-22 09:10:32 +00001497 err = apply_obj_config_map(map);
1498 if (err)
1499 return err;
1500 }
1501 return 0;
1502}
1503
1504int bpf__apply_obj_config(void)
1505{
1506 struct bpf_object *obj, *tmp;
1507 int err;
1508
1509 bpf_object__for_each_safe(obj, tmp) {
1510 err = apply_obj_config_object(obj);
1511 if (err)
1512 return err;
1513 }
1514
1515 return 0;
1516}
1517
Wang Nand7888572016-04-08 15:07:24 +00001518#define bpf__for_each_map(pos, obj, objtmp) \
1519 bpf_object__for_each_safe(obj, objtmp) \
Jakub Kicinskif74a53d92019-02-27 19:04:12 -08001520 bpf_object__for_each_map(pos, obj)
Wang Nand7888572016-04-08 15:07:24 +00001521
Arnaldo Carvalho de Melo5941d852018-08-06 09:46:33 -03001522#define bpf__for_each_map_named(pos, obj, objtmp, name) \
Wang Nand7888572016-04-08 15:07:24 +00001523 bpf__for_each_map(pos, obj, objtmp) \
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03001524 if (bpf_map__name(pos) && \
Arnaldo Carvalho de Melo5941d852018-08-06 09:46:33 -03001525 (strcmp(name, \
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03001526 bpf_map__name(pos)) == 0))
Wang Nand7888572016-04-08 15:07:24 +00001527
Jiri Olsa63503db2019-07-21 13:23:52 +02001528struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
Wang Nand7888572016-04-08 15:07:24 +00001529{
1530 struct bpf_map_priv *tmpl_priv = NULL;
1531 struct bpf_object *obj, *tmp;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001532 struct evsel *evsel = NULL;
Wang Nand7888572016-04-08 15:07:24 +00001533 struct bpf_map *map;
1534 int err;
1535 bool need_init = false;
1536
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001537 bpf__for_each_map_named(map, obj, tmp, name) {
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03001538 struct bpf_map_priv *priv = bpf_map__priv(map);
Wang Nand7888572016-04-08 15:07:24 +00001539
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03001540 if (IS_ERR(priv))
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001541 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
Wang Nand7888572016-04-08 15:07:24 +00001542
1543 /*
1544 * No need to check map type: type should have been
1545 * verified by kernel.
1546 */
1547 if (!need_init && !priv)
1548 need_init = !priv;
1549 if (!tmpl_priv && priv)
1550 tmpl_priv = priv;
1551 }
1552
1553 if (!need_init)
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001554 return NULL;
Wang Nand7888572016-04-08 15:07:24 +00001555
Wang Nan72c08092016-04-08 15:07:25 +00001556 if (!tmpl_priv) {
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001557 char *event_definition = NULL;
1558
1559 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001560 return ERR_PTR(-ENOMEM);
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001561
1562 err = parse_events(evlist, event_definition, NULL);
1563 free(event_definition);
1564
Wang Nan72c08092016-04-08 15:07:25 +00001565 if (err) {
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001566 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001567 return ERR_PTR(-err);
Wang Nan72c08092016-04-08 15:07:25 +00001568 }
1569
1570 evsel = perf_evlist__last(evlist);
1571 }
Wang Nand7888572016-04-08 15:07:24 +00001572
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001573 bpf__for_each_map_named(map, obj, tmp, name) {
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03001574 struct bpf_map_priv *priv = bpf_map__priv(map);
Wang Nand7888572016-04-08 15:07:24 +00001575
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03001576 if (IS_ERR(priv))
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001577 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
Wang Nand7888572016-04-08 15:07:24 +00001578 if (priv)
1579 continue;
1580
Wang Nan72c08092016-04-08 15:07:25 +00001581 if (tmpl_priv) {
1582 priv = bpf_map_priv__clone(tmpl_priv);
1583 if (!priv)
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001584 return ERR_PTR(-ENOMEM);
Wang Nand7888572016-04-08 15:07:24 +00001585
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03001586 err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
Wang Nan72c08092016-04-08 15:07:25 +00001587 if (err) {
1588 bpf_map_priv__clear(map, priv);
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001589 return ERR_PTR(err);
Wang Nan72c08092016-04-08 15:07:25 +00001590 }
1591 } else if (evsel) {
1592 struct bpf_map_op *op;
1593
1594 op = bpf_map__add_newop(map, NULL);
1595 if (IS_ERR(op))
Wen Yang19702892018-11-27 17:06:10 +08001596 return ERR_CAST(op);
Wang Nan72c08092016-04-08 15:07:25 +00001597 op->op_type = BPF_MAP_OP_SET_EVSEL;
1598 op->v.evsel = evsel;
Wang Nand7888572016-04-08 15:07:24 +00001599 }
1600 }
1601
Arnaldo Carvalho de Melo78e890e2018-08-07 16:19:05 -03001602 return evsel;
Wang Nand7888572016-04-08 15:07:24 +00001603}
1604
Jiri Olsa63503db2019-07-21 13:23:52 +02001605int bpf__setup_stdout(struct evlist *evlist)
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001606{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001607 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
Ding Xiange381d1c22018-09-07 09:34:42 +08001608 return PTR_ERR_OR_ZERO(evsel);
Arnaldo Carvalho de Melo92bbe8d2018-08-06 09:53:35 -03001609}
1610
Wang Nand3e0ce32015-11-06 13:58:09 +00001611#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1612#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1613#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1614
1615static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1616 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1617 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1618 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1619 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1620 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
Wang Nan0bb93492015-11-27 08:47:37 +00001621 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
He Kuangbfc077b2015-11-16 12:10:12 +00001622 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1623 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1624 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
Wang Nan066dacb2016-02-22 09:10:30 +00001625 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1626 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1627 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1628 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1629 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1630 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1631 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1632 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
Wang Nan7630b3e2016-02-22 09:10:33 +00001633 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1634 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1635 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1636 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1637 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
Wang Nan2d055bf2016-02-22 09:10:34 +00001638 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
Wang Nand3e0ce32015-11-06 13:58:09 +00001639};
1640
Wang Nan6371ca3b2015-11-06 13:49:37 +00001641static int
1642bpf_loader_strerror(int err, char *buf, size_t size)
1643{
1644 char sbuf[STRERR_BUFSIZE];
1645 const char *msg;
1646
1647 if (!buf || !size)
1648 return -1;
1649
1650 err = err > 0 ? err : -err;
1651
1652 if (err >= __LIBBPF_ERRNO__START)
1653 return libbpf_strerror(err, buf, size);
1654
Wang Nand3e0ce32015-11-06 13:58:09 +00001655 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1656 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1657 snprintf(buf, size, "%s", msg);
1658 buf[size - 1] = '\0';
1659 return 0;
1660 }
1661
1662 if (err >= __BPF_LOADER_ERRNO__END)
1663 snprintf(buf, size, "Unknown bpf loader error %d", err);
1664 else
1665 snprintf(buf, size, "%s",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001666 str_error_r(err, sbuf, sizeof(sbuf)));
Wang Nand3e0ce32015-11-06 13:58:09 +00001667
Wang Nan6371ca3b2015-11-06 13:49:37 +00001668 buf[size - 1] = '\0';
Wang Nand3e0ce32015-11-06 13:58:09 +00001669 return -1;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001670}
1671
Wang Nanaa3abf32015-10-14 12:41:15 +00001672#define bpf__strerror_head(err, buf, size) \
1673 char sbuf[STRERR_BUFSIZE], *emsg;\
1674 if (!size)\
1675 return 0;\
1676 if (err < 0)\
1677 err = -err;\
Wang Nan6371ca3b2015-11-06 13:49:37 +00001678 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1679 emsg = sbuf;\
Wang Nanaa3abf32015-10-14 12:41:15 +00001680 switch (err) {\
1681 default:\
1682 scnprintf(buf, size, "%s", emsg);\
1683 break;
1684
1685#define bpf__strerror_entry(val, fmt...)\
1686 case val: {\
1687 scnprintf(buf, size, fmt);\
1688 break;\
1689 }
1690
1691#define bpf__strerror_end(buf, size)\
1692 }\
1693 buf[size - 1] = '\0';
1694
Wang Nand3e0ce32015-11-06 13:58:09 +00001695int bpf__strerror_prepare_load(const char *filename, bool source,
1696 int err, char *buf, size_t size)
1697{
1698 size_t n;
1699 int ret;
1700
1701 n = snprintf(buf, size, "Failed to load %s%s: ",
1702 filename, source ? " from source" : "");
1703 if (n >= size) {
1704 buf[size - 1] = '\0';
1705 return 0;
1706 }
1707 buf += n;
1708 size -= n;
1709
1710 ret = bpf_loader_strerror(err, buf, size);
1711 buf[size - 1] = '\0';
1712 return ret;
1713}
1714
Wang Nanaa3abf32015-10-14 12:41:15 +00001715int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1716 int err, char *buf, size_t size)
1717{
1718 bpf__strerror_head(err, buf, size);
Wang Nan0bb93492015-11-27 08:47:37 +00001719 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
Wang Nan361f2b12015-11-16 12:10:05 +00001720 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1721 break;
1722 }
Wang Nan03e01f52015-11-16 12:10:08 +00001723 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
Wang Nand3e0ce32015-11-06 13:58:09 +00001724 bpf__strerror_entry(EACCES, "You need to be root");
1725 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1726 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
Wang Nanaa3abf32015-10-14 12:41:15 +00001727 bpf__strerror_end(buf, size);
1728 return 0;
Wang Nan69d262a2015-10-14 12:41:13 +00001729}
Wang Nan1e5e3ee2015-10-14 12:41:16 +00001730
Wang Nand3e0ce32015-11-06 13:58:09 +00001731int bpf__strerror_load(struct bpf_object *obj,
Wang Nan1e5e3ee2015-10-14 12:41:16 +00001732 int err, char *buf, size_t size)
1733{
1734 bpf__strerror_head(err, buf, size);
Wang Nand3e0ce32015-11-06 13:58:09 +00001735 case LIBBPF_ERRNO__KVER: {
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001736 unsigned int obj_kver = bpf_object__kversion(obj);
Wang Nand3e0ce32015-11-06 13:58:09 +00001737 unsigned int real_kver;
1738
1739 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1740 scnprintf(buf, size, "Unable to fetch kernel version");
1741 break;
1742 }
1743
1744 if (obj_kver != real_kver) {
1745 scnprintf(buf, size,
1746 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1747 KVER_PARAM(obj_kver),
1748 KVER_PARAM(real_kver));
1749 break;
1750 }
1751
1752 scnprintf(buf, size, "Failed to load program for unknown reason");
1753 break;
1754 }
Wang Nan1e5e3ee2015-10-14 12:41:16 +00001755 bpf__strerror_end(buf, size);
1756 return 0;
1757}
Wang Nan066dacb2016-02-22 09:10:30 +00001758
1759int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1760 struct parse_events_term *term __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001761 struct evlist *evlist __maybe_unused,
Wang Nan066dacb2016-02-22 09:10:30 +00001762 int *error_pos __maybe_unused, int err,
1763 char *buf, size_t size)
1764{
1765 bpf__strerror_head(err, buf, size);
1766 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1767 "Can't use this config term with this map type");
1768 bpf__strerror_end(buf, size);
1769 return 0;
1770}
Wang Nan8690a2a2016-02-22 09:10:32 +00001771
1772int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1773{
1774 bpf__strerror_head(err, buf, size);
Wang Nan7630b3e2016-02-22 09:10:33 +00001775 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1776 "Cannot set event to BPF map in multi-thread tracing");
1777 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1778 "%s (Hint: use -i to turn off inherit)", emsg);
1779 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1780 "Can only put raw, hardware and BPF output event into a BPF map");
Wang Nan8690a2a2016-02-22 09:10:32 +00001781 bpf__strerror_end(buf, size);
1782 return 0;
1783}
Wang Nand7888572016-04-08 15:07:24 +00001784
Jiri Olsa63503db2019-07-21 13:23:52 +02001785int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
Arnaldo Carvalho de Meloaa31be32018-08-06 11:35:37 -03001786 int err, char *buf, size_t size)
Wang Nand7888572016-04-08 15:07:24 +00001787{
1788 bpf__strerror_head(err, buf, size);
1789 bpf__strerror_end(buf, size);
1790 return 0;
1791}