blob: 62008756d8ccf8e9d73a2f79c9e583a746aa5ad0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Namhyung Kime5a18452012-08-06 13:41:20 +09002#include <fcntl.h>
3#include <stdio.h>
4#include <errno.h>
5#include <string.h>
6#include <unistd.h>
7#include <inttypes.h>
8
Arnaldo Carvalho de Melo1101f692019-01-27 13:42:37 +01009#include "map.h"
Arnaldo Carvalho de Melo41f30912019-01-27 13:44:29 +010010#include "map_groups.h"
Namhyung Kime5a18452012-08-06 13:41:20 +090011#include "symbol.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +010012#include "demangle-java.h"
David Tolnaycae15db2016-07-09 00:20:00 -070013#include "demangle-rust.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040014#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070015#include "vdso.h"
Namhyung Kime5a18452012-08-06 13:41:20 +090016#include "debug.h"
Arnaldo Carvalho de Melocf8b6972019-06-25 18:15:46 -030017#include "util.h"
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030018#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030019#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090020
David Aherne370a3d2015-02-18 19:33:37 -050021#ifndef EM_AARCH64
22#define EM_AARCH64 183 /* ARM 64 bit */
23#endif
24
Arnaldo Carvalho de Melo843cf702019-02-04 15:48:03 -030025#ifndef ELF32_ST_VISIBILITY
26#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
27#endif
28
29/* For ELF64 the definitions are the same. */
30#ifndef ELF64_ST_VISIBILITY
31#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
32#endif
33
34/* How to extract information held in the st_other field. */
35#ifndef GELF_ST_VISIBILITY
36#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
37#endif
38
Arnaldo Carvalho de Melocc310782016-07-12 11:04:13 -030039typedef Elf64_Nhdr GElf_Nhdr;
David Aherne370a3d2015-02-18 19:33:37 -050040
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030041#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
42extern char *cplus_demangle(const char *, int);
43
44static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
45{
46 return cplus_demangle(c, i);
47}
48#else
49#ifdef NO_DEMANGLE
50static inline char *bfd_demangle(void __maybe_unused *v,
51 const char __maybe_unused *c,
52 int __maybe_unused i)
53{
54 return NULL;
55}
56#else
57#define PACKAGE 'perf'
58#include <bfd.h>
59#endif
60#endif
61
Ingo Molnar89fe8082013-09-30 12:07:11 +020062#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030063static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030064{
65 GElf_Ehdr gehdr;
66 GElf_Ehdr *ehdr;
67
68 ehdr = gelf_getehdr(elf, &gehdr);
69 if (!ehdr)
70 return -1;
71
72 *dst = ehdr->e_phnum;
73
74 return 0;
75}
76#endif
77
Arnaldo Carvalho de Melo2492c462016-07-04 19:35:47 -030078#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
79static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
80{
81 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
82 return -1;
83}
84#endif
85
Namhyung Kime5a18452012-08-06 13:41:20 +090086#ifndef NT_GNU_BUILD_ID
87#define NT_GNU_BUILD_ID 3
88#endif
89
90/**
91 * elf_symtab__for_each_symbol - iterate thru all the symbols
92 *
93 * @syms: struct elf_symtab instance to iterate
94 * @idx: uint32_t idx
95 * @sym: GElf_Sym iterator
96 */
97#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
98 for (idx = 0, gelf_getsym(syms, idx, &sym);\
99 idx < nr_syms; \
100 idx++, gelf_getsym(syms, idx, &sym))
101
102static inline uint8_t elf_sym__type(const GElf_Sym *sym)
103{
104 return GELF_ST_TYPE(sym->st_info);
105}
106
Jiri Olsa59a17702019-01-28 14:35:26 +0100107static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
108{
109 return GELF_ST_VISIBILITY(sym->st_other);
110}
111
Vinson Lee4e310502015-02-09 16:29:37 -0800112#ifndef STT_GNU_IFUNC
113#define STT_GNU_IFUNC 10
114#endif
115
Namhyung Kime5a18452012-08-06 13:41:20 +0900116static inline int elf_sym__is_function(const GElf_Sym *sym)
117{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +0300118 return (elf_sym__type(sym) == STT_FUNC ||
119 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +0900120 sym->st_name != 0 &&
121 sym->st_shndx != SHN_UNDEF;
122}
123
124static inline bool elf_sym__is_object(const GElf_Sym *sym)
125{
126 return elf_sym__type(sym) == STT_OBJECT &&
127 sym->st_name != 0 &&
128 sym->st_shndx != SHN_UNDEF;
129}
130
131static inline int elf_sym__is_label(const GElf_Sym *sym)
132{
133 return elf_sym__type(sym) == STT_NOTYPE &&
134 sym->st_name != 0 &&
135 sym->st_shndx != SHN_UNDEF &&
Jiri Olsa59a17702019-01-28 14:35:26 +0100136 sym->st_shndx != SHN_ABS &&
137 elf_sym__visibility(sym) != STV_HIDDEN &&
138 elf_sym__visibility(sym) != STV_INTERNAL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900139}
140
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300141static bool elf_sym__filter(GElf_Sym *sym)
Namhyung Kime5a18452012-08-06 13:41:20 +0900142{
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300143 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
Namhyung Kime5a18452012-08-06 13:41:20 +0900144}
145
146static inline const char *elf_sym__name(const GElf_Sym *sym,
147 const Elf_Data *symstrs)
148{
149 return symstrs->d_buf + sym->st_name;
150}
151
152static inline const char *elf_sec__name(const GElf_Shdr *shdr,
153 const Elf_Data *secstrs)
154{
155 return secstrs->d_buf + shdr->sh_name;
156}
157
158static inline int elf_sec__is_text(const GElf_Shdr *shdr,
159 const Elf_Data *secstrs)
160{
161 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
162}
163
164static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
165 const Elf_Data *secstrs)
166{
167 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
168}
169
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300170static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
Namhyung Kime5a18452012-08-06 13:41:20 +0900171{
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300172 return elf_sec__is_text(shdr, secstrs) ||
173 elf_sec__is_data(shdr, secstrs);
Namhyung Kime5a18452012-08-06 13:41:20 +0900174}
175
176static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
177{
178 Elf_Scn *sec = NULL;
179 GElf_Shdr shdr;
180 size_t cnt = 1;
181
182 while ((sec = elf_nextscn(elf, sec)) != NULL) {
183 gelf_getshdr(sec, &shdr);
184
185 if ((addr >= shdr.sh_addr) &&
186 (addr < (shdr.sh_addr + shdr.sh_size)))
187 return cnt;
188
189 ++cnt;
190 }
191
192 return -1;
193}
194
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000195Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
196 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900197{
198 Elf_Scn *sec = NULL;
199 size_t cnt = 1;
200
Cody P Schafer49274652012-08-10 15:22:55 -0700201 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
202 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
203 return NULL;
204
Namhyung Kime5a18452012-08-06 13:41:20 +0900205 while ((sec = elf_nextscn(elf, sec)) != NULL) {
206 char *str;
207
208 gelf_getshdr(sec, shp);
209 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100210 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900211 if (idx)
212 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100213 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900214 }
215 ++cnt;
216 }
217
Jiri Olsa155b3a12014-03-02 14:32:07 +0100218 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900219}
220
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200221static bool want_demangle(bool is_kernel_sym)
222{
223 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
224}
225
226static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
227{
Namhyung Kimbb963e12017-02-17 17:17:38 +0900228 int demangle_flags = verbose > 0 ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS;
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200229 char *demangled = NULL;
230
231 /*
232 * We need to figure out if the object was created from C++ sources
233 * DWARF DW_compile_unit has this, but we don't always have access
234 * to it...
235 */
236 if (!want_demangle(dso->kernel || kmodule))
237 return demangled;
238
239 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
240 if (demangled == NULL)
241 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
242 else if (rust_is_mangled(demangled))
243 /*
244 * Input to Rust demangling is the BFD-demangled
245 * name which it Rust-demangles in place.
246 */
247 rust_demangle_sym(demangled);
248
249 return demangled;
250}
251
Namhyung Kime5a18452012-08-06 13:41:20 +0900252#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
253 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
254 idx < nr_entries; \
255 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
256
257#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
258 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
259 idx < nr_entries; \
260 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
261
262/*
263 * We need to check if we have a .dynsym, so that we can handle the
264 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
265 * .dynsym or .symtab).
266 * And always look at the original dso, not at debuginfo packages, that
267 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
268 */
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300269int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900270{
271 uint32_t nr_rel_entries, idx;
272 GElf_Sym sym;
Li Binb2f76052017-06-05 08:34:09 +0800273 u64 plt_offset, plt_header_size, plt_entry_size;
Namhyung Kime5a18452012-08-06 13:41:20 +0900274 GElf_Shdr shdr_plt;
275 struct symbol *f;
276 GElf_Shdr shdr_rel_plt, shdr_dynsym;
277 Elf_Data *reldata, *syms, *symstrs;
278 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
279 size_t dynsym_idx;
280 GElf_Ehdr ehdr;
281 char sympltname[1024];
282 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700283 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900284
David Ahernf47b58b2012-08-19 09:47:14 -0600285 if (!ss->dynsym)
286 return 0;
287
Cody P Schafera44f6052012-08-10 15:22:59 -0700288 elf = ss->elf;
289 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900290
Cody P Schafera44f6052012-08-10 15:22:59 -0700291 scn_dynsym = ss->dynsym;
292 shdr_dynsym = ss->dynshdr;
293 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900294
Namhyung Kime5a18452012-08-06 13:41:20 +0900295 if (scn_dynsym == NULL)
296 goto out_elf_end;
297
298 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
299 ".rela.plt", NULL);
300 if (scn_plt_rel == NULL) {
301 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
302 ".rel.plt", NULL);
303 if (scn_plt_rel == NULL)
304 goto out_elf_end;
305 }
306
307 err = -1;
308
309 if (shdr_rel_plt.sh_link != dynsym_idx)
310 goto out_elf_end;
311
312 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
313 goto out_elf_end;
314
315 /*
316 * Fetch the relocation section to find the idxes to the GOT
317 * and the symbols in the .dynsym they refer to.
318 */
319 reldata = elf_getdata(scn_plt_rel, NULL);
320 if (reldata == NULL)
321 goto out_elf_end;
322
323 syms = elf_getdata(scn_dynsym, NULL);
324 if (syms == NULL)
325 goto out_elf_end;
326
327 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
328 if (scn_symstrs == NULL)
329 goto out_elf_end;
330
331 symstrs = elf_getdata(scn_symstrs, NULL);
332 if (symstrs == NULL)
333 goto out_elf_end;
334
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700335 if (symstrs->d_size == 0)
336 goto out_elf_end;
337
Namhyung Kime5a18452012-08-06 13:41:20 +0900338 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
339 plt_offset = shdr_plt.sh_offset;
Li Binb2f76052017-06-05 08:34:09 +0800340 switch (ehdr.e_machine) {
341 case EM_ARM:
342 plt_header_size = 20;
343 plt_entry_size = 12;
344 break;
345
346 case EM_AARCH64:
347 plt_header_size = 32;
348 plt_entry_size = 16;
349 break;
350
David Millerd6afa562018-10-17 12:08:59 -0700351 case EM_SPARC:
352 plt_header_size = 48;
353 plt_entry_size = 12;
354 break;
355
356 case EM_SPARCV9:
357 plt_header_size = 128;
358 plt_entry_size = 32;
359 break;
360
361 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
Li Binb2f76052017-06-05 08:34:09 +0800362 plt_header_size = shdr_plt.sh_entsize;
363 plt_entry_size = shdr_plt.sh_entsize;
364 break;
365 }
366 plt_offset += plt_header_size;
Namhyung Kime5a18452012-08-06 13:41:20 +0900367
368 if (shdr_rel_plt.sh_type == SHT_RELA) {
369 GElf_Rela pos_mem, *pos;
370
371 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
372 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200373 const char *elf_name = NULL;
374 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900375 symidx = GELF_R_SYM(pos->r_info);
Namhyung Kime5a18452012-08-06 13:41:20 +0900376 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200377
378 elf_name = elf_sym__name(&sym, symstrs);
379 demangled = demangle_sym(dso, 0, elf_name);
380 if (demangled != NULL)
381 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900382 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200383 "%s@plt", elf_name);
384 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900385
Li Binb2f76052017-06-05 08:34:09 +0800386 f = symbol__new(plt_offset, plt_entry_size,
Arnaldo Carvalho de Meloaf30bff2018-04-26 11:09:10 -0300387 STB_GLOBAL, STT_FUNC, sympltname);
Namhyung Kime5a18452012-08-06 13:41:20 +0900388 if (!f)
389 goto out_elf_end;
390
Li Binb2f76052017-06-05 08:34:09 +0800391 plt_offset += plt_entry_size;
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300392 symbols__insert(&dso->symbols, f);
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300393 ++nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900394 }
395 } else if (shdr_rel_plt.sh_type == SHT_REL) {
396 GElf_Rel pos_mem, *pos;
397 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
398 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200399 const char *elf_name = NULL;
400 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900401 symidx = GELF_R_SYM(pos->r_info);
Namhyung Kime5a18452012-08-06 13:41:20 +0900402 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200403
404 elf_name = elf_sym__name(&sym, symstrs);
405 demangled = demangle_sym(dso, 0, elf_name);
406 if (demangled != NULL)
407 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900408 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200409 "%s@plt", elf_name);
410 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900411
Li Binb2f76052017-06-05 08:34:09 +0800412 f = symbol__new(plt_offset, plt_entry_size,
Arnaldo Carvalho de Meloaf30bff2018-04-26 11:09:10 -0300413 STB_GLOBAL, STT_FUNC, sympltname);
Namhyung Kime5a18452012-08-06 13:41:20 +0900414 if (!f)
415 goto out_elf_end;
416
Li Binb2f76052017-06-05 08:34:09 +0800417 plt_offset += plt_entry_size;
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300418 symbols__insert(&dso->symbols, f);
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300419 ++nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900420 }
421 }
422
423 err = 0;
424out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900425 if (err == 0)
426 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900427 pr_debug("%s: problems reading %s PLT info.\n",
428 __func__, dso->long_name);
429 return 0;
430}
431
Milian Wolff80c345b2017-08-06 23:24:34 +0200432char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
Jin Yaoa64489c2017-03-26 04:34:26 +0800433{
434 return demangle_sym(dso, kmodule, elf_name);
435}
436
Namhyung Kime5a18452012-08-06 13:41:20 +0900437/*
438 * Align offset to 4 bytes as needed for note name and descriptor data.
439 */
440#define NOTE_ALIGN(n) (((n) + 3) & -4U)
441
442static int elf_read_build_id(Elf *elf, void *bf, size_t size)
443{
444 int err = -1;
445 GElf_Ehdr ehdr;
446 GElf_Shdr shdr;
447 Elf_Data *data;
448 Elf_Scn *sec;
449 Elf_Kind ek;
450 void *ptr;
451
452 if (size < BUILD_ID_SIZE)
453 goto out;
454
455 ek = elf_kind(elf);
456 if (ek != ELF_K_ELF)
457 goto out;
458
459 if (gelf_getehdr(elf, &ehdr) == NULL) {
460 pr_err("%s: cannot get elf header.\n", __func__);
461 goto out;
462 }
463
464 /*
465 * Check following sections for notes:
466 * '.note.gnu.build-id'
467 * '.notes'
468 * '.note' (VDSO specific)
469 */
470 do {
471 sec = elf_section_by_name(elf, &ehdr, &shdr,
472 ".note.gnu.build-id", NULL);
473 if (sec)
474 break;
475
476 sec = elf_section_by_name(elf, &ehdr, &shdr,
477 ".notes", NULL);
478 if (sec)
479 break;
480
481 sec = elf_section_by_name(elf, &ehdr, &shdr,
482 ".note", NULL);
483 if (sec)
484 break;
485
486 return err;
487
488 } while (0);
489
490 data = elf_getdata(sec, NULL);
491 if (data == NULL)
492 goto out;
493
494 ptr = data->d_buf;
495 while (ptr < (data->d_buf + data->d_size)) {
496 GElf_Nhdr *nhdr = ptr;
497 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
498 descsz = NOTE_ALIGN(nhdr->n_descsz);
499 const char *name;
500
501 ptr += sizeof(*nhdr);
502 name = ptr;
503 ptr += namesz;
504 if (nhdr->n_type == NT_GNU_BUILD_ID &&
505 nhdr->n_namesz == sizeof("GNU")) {
506 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
507 size_t sz = min(size, descsz);
508 memcpy(bf, ptr, sz);
509 memset(bf + sz, 0, size - sz);
510 err = descsz;
511 break;
512 }
513 }
514 ptr += descsz;
515 }
516
517out:
518 return err;
519}
520
521int filename__read_build_id(const char *filename, void *bf, size_t size)
522{
523 int fd, err = -1;
524 Elf *elf;
525
526 if (size < BUILD_ID_SIZE)
527 goto out;
528
529 fd = open(filename, O_RDONLY);
530 if (fd < 0)
531 goto out;
532
533 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
534 if (elf == NULL) {
535 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
536 goto out_close;
537 }
538
539 err = elf_read_build_id(elf, bf, size);
540
541 elf_end(elf);
542out_close:
543 close(fd);
544out:
545 return err;
546}
547
548int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
549{
550 int fd, err = -1;
551
552 if (size < BUILD_ID_SIZE)
553 goto out;
554
555 fd = open(filename, O_RDONLY);
556 if (fd < 0)
557 goto out;
558
559 while (1) {
560 char bf[BUFSIZ];
561 GElf_Nhdr nhdr;
562 size_t namesz, descsz;
563
564 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
565 break;
566
567 namesz = NOTE_ALIGN(nhdr.n_namesz);
568 descsz = NOTE_ALIGN(nhdr.n_descsz);
569 if (nhdr.n_type == NT_GNU_BUILD_ID &&
570 nhdr.n_namesz == sizeof("GNU")) {
571 if (read(fd, bf, namesz) != (ssize_t)namesz)
572 break;
573 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
574 size_t sz = min(descsz, size);
575 if (read(fd, build_id, sz) == (ssize_t)sz) {
576 memset(build_id + sz, 0, size - sz);
577 err = 0;
578 break;
579 }
580 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
581 break;
582 } else {
583 int n = namesz + descsz;
Arnaldo Carvalho de Melo7934c982017-01-03 15:19:21 -0300584
585 if (n > (int)sizeof(bf)) {
586 n = sizeof(bf);
587 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
588 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
589 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900590 if (read(fd, bf, n) != n)
591 break;
592 }
593 }
594 close(fd);
595out:
596 return err;
597}
598
599int filename__read_debuglink(const char *filename, char *debuglink,
600 size_t size)
601{
602 int fd, err = -1;
603 Elf *elf;
604 GElf_Ehdr ehdr;
605 GElf_Shdr shdr;
606 Elf_Data *data;
607 Elf_Scn *sec;
608 Elf_Kind ek;
609
610 fd = open(filename, O_RDONLY);
611 if (fd < 0)
612 goto out;
613
614 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
615 if (elf == NULL) {
616 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
617 goto out_close;
618 }
619
620 ek = elf_kind(elf);
621 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800622 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900623
624 if (gelf_getehdr(elf, &ehdr) == NULL) {
625 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800626 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900627 }
628
629 sec = elf_section_by_name(elf, &ehdr, &shdr,
630 ".gnu_debuglink", NULL);
631 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800632 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900633
634 data = elf_getdata(sec, NULL);
635 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800636 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900637
638 /* the start of this section is a zero-terminated string */
639 strncpy(debuglink, data->d_buf, size);
640
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900641 err = 0;
642
Chenggang Qin784f3392013-10-11 08:27:57 +0800643out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900644 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900645out_close:
646 close(fd);
647out:
648 return err;
649}
650
651static int dso__swap_init(struct dso *dso, unsigned char eidata)
652{
653 static unsigned int const endian = 1;
654
655 dso->needs_swap = DSO_SWAP__NO;
656
657 switch (eidata) {
658 case ELFDATA2LSB:
659 /* We are big endian, DSO is little endian. */
660 if (*(unsigned char const *)&endian != 1)
661 dso->needs_swap = DSO_SWAP__YES;
662 break;
663
664 case ELFDATA2MSB:
665 /* We are little endian, DSO is big endian. */
666 if (*(unsigned char const *)&endian != 0)
667 dso->needs_swap = DSO_SWAP__YES;
668 break;
669
670 default:
671 pr_err("unrecognized DSO data encoding %d\n", eidata);
672 return -EINVAL;
673 }
674
675 return 0;
676}
677
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700678bool symsrc__possibly_runtime(struct symsrc *ss)
679{
680 return ss->dynsym || ss->opdsec;
681}
682
Cody P Schaferd26cd122012-08-10 15:23:00 -0700683bool symsrc__has_symtab(struct symsrc *ss)
684{
685 return ss->symtab != NULL;
686}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700687
688void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900689{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300690 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700691 elf_end(ss->elf);
692 close(ss->fd);
693}
694
Naveen N. Raod2332092015-04-28 17:35:35 +0530695bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
696{
697 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
698}
699
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700700int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
701 enum dso_binary_type type)
702{
Namhyung Kime5a18452012-08-06 13:41:20 +0900703 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900704 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700705 int fd;
706
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300707 if (dso__needs_decompress(dso)) {
Namhyung Kim42b3fa62017-06-08 16:31:03 +0900708 fd = dso__decompress_kmodule_fd(dso, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300709 if (fd < 0)
710 return -1;
Namhyung Kimc25ec422017-06-08 16:31:08 +0900711
712 type = dso->symtab_type;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300713 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900714 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300715 if (fd < 0) {
716 dso->load_errno = errno;
717 return -1;
718 }
719 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900720
721 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
722 if (elf == NULL) {
723 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300724 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900725 goto out_close;
726 }
727
728 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300729 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900730 pr_debug("%s: cannot get elf header.\n", __func__);
731 goto out_elf_end;
732 }
733
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300734 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
735 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900736 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300737 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900738
739 /* Always reject images with a mismatched build-id: */
Masami Hiramatsu428aff82016-08-26 01:24:42 +0900740 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900741 u8 build_id[BUILD_ID_SIZE];
742
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300743 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
744 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900745 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300746 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900747
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300748 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530749 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300750 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900751 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300752 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900753 }
754
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300755 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
756
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700757 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
758 NULL);
759 if (ss->symshdr.sh_type != SHT_SYMTAB)
760 ss->symtab = NULL;
761
762 ss->dynsym_idx = 0;
763 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
764 &ss->dynsym_idx);
765 if (ss->dynshdr.sh_type != SHT_DYNSYM)
766 ss->dynsym = NULL;
767
768 ss->opdidx = 0;
769 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
770 &ss->opdidx);
771 if (ss->opdshdr.sh_type != SHT_PROGBITS)
772 ss->opdsec = NULL;
773
Wang Nan99e87f72016-04-07 10:24:31 +0000774 if (dso->kernel == DSO_TYPE_USER)
775 ss->adjust_symbols = true;
776 else
Naveen N. Raod2332092015-04-28 17:35:35 +0530777 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700778
779 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300780 if (!ss->name) {
781 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700782 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300783 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700784
785 ss->elf = elf;
786 ss->fd = fd;
787 ss->ehdr = ehdr;
788 ss->type = type;
789
790 return 0;
791
792out_elf_end:
793 elf_end(elf);
794out_close:
795 close(fd);
Leo Yane5f177a2019-05-30 17:38:01 +0800796 return -1;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700797}
798
Adrian Hunter39b12f782013-08-07 14:38:47 +0300799/**
800 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
801 * @kmap: kernel maps and relocation reference symbol
802 *
803 * This function returns %true if we are dealing with the kernel maps and the
804 * relocation reference symbol has not yet been found. Otherwise %false is
805 * returned.
806 */
807static bool ref_reloc_sym_not_found(struct kmap *kmap)
808{
809 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
810 !kmap->ref_reloc_sym->unrelocated_addr;
811}
812
813/**
814 * ref_reloc - kernel relocation offset.
815 * @kmap: kernel maps and relocation reference symbol
816 *
817 * This function returns the offset of kernel addresses as determined by using
818 * the relocation reference symbol i.e. if the kernel has not been relocated
819 * then the return value is zero.
820 */
821static u64 ref_reloc(struct kmap *kmap)
822{
823 if (kmap && kmap->ref_reloc_sym &&
824 kmap->ref_reloc_sym->unrelocated_addr)
825 return kmap->ref_reloc_sym->addr -
826 kmap->ref_reloc_sym->unrelocated_addr;
827 return 0;
828}
829
Naveen N. Rao0b3c2262016-04-12 14:40:50 +0530830void __weak arch__sym_update(struct symbol *s __maybe_unused,
831 GElf_Sym *sym __maybe_unused) { }
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530832
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -0300833static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
834 GElf_Sym *sym, GElf_Shdr *shdr,
835 struct map_groups *kmaps, struct kmap *kmap,
836 struct dso **curr_dsop, struct map **curr_mapp,
837 const char *section_name,
838 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
839{
840 struct dso *curr_dso = *curr_dsop;
841 struct map *curr_map;
842 char dso_name[PATH_MAX];
843
844 /* Adjust symbol to map to file offset */
845 if (adjust_kernel_syms)
846 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
847
848 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
849 return 0;
850
851 if (strcmp(section_name, ".text") == 0) {
852 /*
853 * The initial kernel mapping is based on
854 * kallsyms and identity maps. Overwrite it to
855 * map to the kernel dso.
856 */
857 if (*remap_kernel && dso->kernel) {
858 *remap_kernel = false;
859 map->start = shdr->sh_addr + ref_reloc(kmap);
860 map->end = map->start + shdr->sh_size;
861 map->pgoff = shdr->sh_offset;
862 map->map_ip = map__map_ip;
863 map->unmap_ip = map__unmap_ip;
864 /* Ensure maps are correctly ordered */
865 if (kmaps) {
866 map__get(map);
867 map_groups__remove(kmaps, map);
868 map_groups__insert(kmaps, map);
869 map__put(map);
870 }
871 }
872
873 /*
874 * The initial module mapping is based on
875 * /proc/modules mapped to offset zero.
876 * Overwrite it to map to the module dso.
877 */
878 if (*remap_kernel && kmodule) {
879 *remap_kernel = false;
880 map->pgoff = shdr->sh_offset;
881 }
882
883 *curr_mapp = map;
884 *curr_dsop = dso;
885 return 0;
886 }
887
888 if (!kmap)
889 return 0;
890
891 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
892
893 curr_map = map_groups__find_by_name(kmaps, dso_name);
894 if (curr_map == NULL) {
895 u64 start = sym->st_value;
896
897 if (kmodule)
898 start += map->start + shdr->sh_offset;
899
900 curr_dso = dso__new(dso_name);
901 if (curr_dso == NULL)
902 return -1;
903 curr_dso->kernel = dso->kernel;
904 curr_dso->long_name = dso->long_name;
905 curr_dso->long_name_len = dso->long_name_len;
906 curr_map = map__new2(start, curr_dso);
907 dso__put(curr_dso);
908 if (curr_map == NULL)
909 return -1;
910
911 if (adjust_kernel_syms) {
912 curr_map->start = shdr->sh_addr + ref_reloc(kmap);
913 curr_map->end = curr_map->start + shdr->sh_size;
914 curr_map->pgoff = shdr->sh_offset;
915 } else {
916 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
917 }
918 curr_dso->symtab_type = dso->symtab_type;
919 map_groups__insert(kmaps, curr_map);
920 /*
921 * Add it before we drop the referece to curr_map, i.e. while
922 * we still are sure to have a reference to this DSO via
923 * *curr_map->dso.
924 */
925 dsos__add(&map->groups->machine->dsos, curr_dso);
926 /* kmaps already got it */
927 map__put(curr_map);
928 dso__set_loaded(curr_dso);
929 *curr_mapp = curr_map;
930 *curr_dsop = curr_dso;
931 } else
932 *curr_dsop = curr_map->dso;
933
934 return 0;
935}
936
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300937int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
938 struct symsrc *runtime_ss, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700939{
940 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000941 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700942 struct map *curr_map = map;
943 struct dso *curr_dso = dso;
944 Elf_Data *symstrs, *secstrs;
945 uint32_t nr_syms;
946 int err = -1;
947 uint32_t idx;
948 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700949 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +0000950 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700951 Elf_Data *syms, *opddata = NULL;
952 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700953 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700954 Elf *elf;
955 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300956 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700957
Wang Nanba927322015-04-07 08:22:45 +0000958 if (kmap && !kmaps)
959 return -1;
960
Cody P Schafer261360b2012-08-10 15:23:01 -0700961 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300962 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300963 dso->rel = syms_ss->ehdr.e_type == ET_REL;
964
965 /*
966 * Modules may already have symbols from kallsyms, but those symbols
967 * have the wrong values for the dso maps, so remove them.
968 */
969 if (kmodule && syms_ss->symtab)
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300970 symbols__delete(&dso->symbols);
Cody P Schafer005f9292012-08-10 15:22:58 -0700971
Cody P Schafer261360b2012-08-10 15:23:01 -0700972 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000973 /*
974 * If the vmlinux is stripped, fail so we will fall back
975 * to using kallsyms. The vmlinux runtime symbols aren't
976 * of much use.
977 */
978 if (dso->kernel)
979 goto out_elf_end;
980
Cody P Schafer261360b2012-08-10 15:23:01 -0700981 syms_ss->symtab = syms_ss->dynsym;
982 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700983 }
984
Cody P Schafer261360b2012-08-10 15:23:01 -0700985 elf = syms_ss->elf;
986 ehdr = syms_ss->ehdr;
987 sec = syms_ss->symtab;
988 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700989
Anton Blanchard50de1a02016-08-13 11:55:33 +1000990 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
991 ".text", NULL))
Wang Nan73cdf0c2016-02-26 09:31:49 +0000992 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
993
Cody P Schafer261360b2012-08-10 15:23:01 -0700994 if (runtime_ss->opdsec)
995 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900996
997 syms = elf_getdata(sec, NULL);
998 if (syms == NULL)
999 goto out_elf_end;
1000
1001 sec = elf_getscn(elf, shdr.sh_link);
1002 if (sec == NULL)
1003 goto out_elf_end;
1004
1005 symstrs = elf_getdata(sec, NULL);
1006 if (symstrs == NULL)
1007 goto out_elf_end;
1008
Adrian Hunterf247fb82014-07-31 09:00:46 +03001009 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +09001010 if (sec_strndx == NULL)
1011 goto out_elf_end;
1012
1013 secstrs = elf_getdata(sec_strndx, NULL);
1014 if (secstrs == NULL)
1015 goto out_elf_end;
1016
1017 nr_syms = shdr.sh_size / shdr.sh_entsize;
1018
1019 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +03001020
1021 /*
1022 * The kernel relocation symbol is needed in advance in order to adjust
1023 * kernel maps correctly.
1024 */
1025 if (ref_reloc_sym_not_found(kmap)) {
1026 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1027 const char *elf_name = elf_sym__name(&sym, symstrs);
1028
1029 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
1030 continue;
1031 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +02001032 map->reloc = kmap->ref_reloc_sym->addr -
1033 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +03001034 break;
1035 }
1036 }
1037
Adrian Hunterf0ee3b42015-08-14 15:50:06 +03001038 /*
1039 * Handle any relocation of vdso necessary because older kernels
1040 * attempted to prelink vdso to its virtual address.
1041 */
Wang Nan73cdf0c2016-02-26 09:31:49 +00001042 if (dso__is_vdso(dso))
1043 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +03001044
Adrian Hunter39b12f782013-08-07 14:38:47 +03001045 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
1046 /*
Arnaldo Carvalho de Melod1fd8d92018-04-26 12:36:37 -03001047 * Initial kernel and module mappings do not map to the dso.
1048 * Flag the fixups.
Adrian Hunter39b12f782013-08-07 14:38:47 +03001049 */
Arnaldo Carvalho de Melod1fd8d92018-04-26 12:36:37 -03001050 if (dso->kernel || kmodule) {
Adrian Hunter39b12f782013-08-07 14:38:47 +03001051 remap_kernel = true;
1052 adjust_kernel_syms = dso->adjust_symbols;
1053 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001054 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1055 struct symbol *f;
1056 const char *elf_name = elf_sym__name(&sym, symstrs);
1057 char *demangled = NULL;
1058 int is_label = elf_sym__is_label(&sym);
1059 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -07001060 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +09001061
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001062 if (!is_label && !elf_sym__filter(&sym))
Namhyung Kime5a18452012-08-06 13:41:20 +09001063 continue;
1064
1065 /* Reject ARM ELF "mapping symbols": these aren't unique and
1066 * don't identify functions, so will confuse the profile
1067 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -08001068 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
1069 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
1070 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +09001071 continue;
1072 }
1073
Cody P Schafer261360b2012-08-10 15:23:01 -07001074 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
1075 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +09001076 u64 *opd = opddata->d_buf + offset;
1077 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -07001078 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
1079 sym.st_value);
1080 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +09001081 }
Namhyung Kim3843b052012-11-21 13:49:44 +01001082 /*
1083 * When loading symbols in a data mapping, ABS symbols (which
1084 * has a value of SHN_ABS in its st_shndx) failed at
1085 * elf_getscn(). And it marks the loading as a failure so
1086 * already loaded symbols cannot be fixed up.
1087 *
1088 * I'm not sure what should be done. Just ignore them for now.
1089 * - Namhyung Kim
1090 */
1091 if (sym.st_shndx == SHN_ABS)
1092 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +09001093
Cody P Schafer261360b2012-08-10 15:23:01 -07001094 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +09001095 if (!sec)
1096 goto out_elf_end;
1097
1098 gelf_getshdr(sec, &shdr);
1099
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001100 if (is_label && !elf_sec__filter(&shdr, secstrs))
Namhyung Kime5a18452012-08-06 13:41:20 +09001101 continue;
1102
1103 section_name = elf_sec__name(&shdr, secstrs);
1104
1105 /* On ARM, symbols for thumb functions have 1 added to
1106 * the symbol address as a flag - remove it */
1107 if ((ehdr.e_machine == EM_ARM) &&
Arnaldo Carvalho de Melo18231d72018-04-26 12:45:17 -03001108 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +09001109 (sym.st_value & 1))
1110 --sym.st_value;
1111
Adrian Hunter39b12f782013-08-07 14:38:47 +03001112 if (dso->kernel || kmodule) {
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001113 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
1114 section_name, adjust_kernel_syms, kmodule, &remap_kernel))
1115 goto out_elf_end;
Arnaldo Carvalho de Melo857140e2018-04-27 10:53:14 -03001116 } else if ((used_opd && runtime_ss->adjust_symbols) ||
1117 (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001118 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1119 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1120 (u64)sym.st_value, (u64)shdr.sh_addr,
1121 (u64)shdr.sh_offset);
1122 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1123 }
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001124
Milian Wolff2a8d41b2016-08-30 13:41:02 +02001125 demangled = demangle_sym(dso, kmodule, elf_name);
1126 if (demangled != NULL)
1127 elf_name = demangled;
Namhyung Kime71e7942014-07-31 14:47:42 +09001128
Namhyung Kime5a18452012-08-06 13:41:20 +09001129 f = symbol__new(sym.st_value, sym.st_size,
Arnaldo Carvalho de Meloaf30bff2018-04-26 11:09:10 -03001130 GELF_ST_BIND(sym.st_info),
1131 GELF_ST_TYPE(sym.st_info), elf_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001132 free(demangled);
1133 if (!f)
1134 goto out_elf_end;
1135
Naveen N. Rao0b3c2262016-04-12 14:40:50 +05301136 arch__sym_update(f, &sym);
1137
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001138 __symbols__insert(&curr_dso->symbols, f, dso->kernel);
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -03001139 nr++;
Namhyung Kime5a18452012-08-06 13:41:20 +09001140 }
1141
1142 /*
1143 * For misannotated, zeroed, ASM function sizes.
1144 */
1145 if (nr > 0) {
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001146 symbols__fixup_end(&dso->symbols);
1147 symbols__fixup_duplicate(&dso->symbols);
Namhyung Kime5a18452012-08-06 13:41:20 +09001148 if (kmap) {
1149 /*
1150 * We need to fixup this here too because we create new
1151 * maps here, for things like vsyscall sections.
1152 */
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001153 map_groups__fixup_end(kmaps);
Namhyung Kime5a18452012-08-06 13:41:20 +09001154 }
1155 }
1156 err = nr;
1157out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001158 return err;
1159}
1160
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001161static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1162{
1163 GElf_Phdr phdr;
1164 size_t i, phdrnum;
1165 int err;
1166 u64 sz;
1167
1168 if (elf_getphdrnum(elf, &phdrnum))
1169 return -1;
1170
1171 for (i = 0; i < phdrnum; i++) {
1172 if (gelf_getphdr(elf, i, &phdr) == NULL)
1173 return -1;
1174 if (phdr.p_type != PT_LOAD)
1175 continue;
1176 if (exe) {
1177 if (!(phdr.p_flags & PF_X))
1178 continue;
1179 } else {
1180 if (!(phdr.p_flags & PF_R))
1181 continue;
1182 }
1183 sz = min(phdr.p_memsz, phdr.p_filesz);
1184 if (!sz)
1185 continue;
1186 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1187 if (err)
1188 return err;
1189 }
1190 return 0;
1191}
1192
1193int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1194 bool *is_64_bit)
1195{
1196 int err;
1197 Elf *elf;
1198
1199 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1200 if (elf == NULL)
1201 return -1;
1202
1203 if (is_64_bit)
1204 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1205
1206 err = elf_read_maps(elf, exe, mapfn, data);
1207
1208 elf_end(elf);
1209 return err;
1210}
1211
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001212enum dso_type dso__type_fd(int fd)
1213{
1214 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1215 GElf_Ehdr ehdr;
1216 Elf_Kind ek;
1217 Elf *elf;
1218
1219 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1220 if (elf == NULL)
1221 goto out;
1222
1223 ek = elf_kind(elf);
1224 if (ek != ELF_K_ELF)
1225 goto out_end;
1226
1227 if (gelf_getclass(elf) == ELFCLASS64) {
1228 dso_type = DSO__TYPE_64BIT;
1229 goto out_end;
1230 }
1231
1232 if (gelf_getehdr(elf, &ehdr) == NULL)
1233 goto out_end;
1234
1235 if (ehdr.e_machine == EM_X86_64)
1236 dso_type = DSO__TYPE_X32BIT;
1237 else
1238 dso_type = DSO__TYPE_32BIT;
1239out_end:
1240 elf_end(elf);
1241out:
1242 return dso_type;
1243}
1244
Adrian Hunterafba19d2013-10-09 15:01:12 +03001245static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1246{
1247 ssize_t r;
1248 size_t n;
1249 int err = -1;
1250 char *buf = malloc(page_size);
1251
1252 if (buf == NULL)
1253 return -1;
1254
1255 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1256 goto out;
1257
1258 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1259 goto out;
1260
1261 while (len) {
1262 n = page_size;
1263 if (len < n)
1264 n = len;
1265 /* Use read because mmap won't work on proc files */
1266 r = read(from, buf, n);
1267 if (r < 0)
1268 goto out;
1269 if (!r)
1270 break;
1271 n = r;
1272 r = write(to, buf, n);
1273 if (r < 0)
1274 goto out;
1275 if ((size_t)r != n)
1276 goto out;
1277 len -= n;
1278 }
1279
1280 err = 0;
1281out:
1282 free(buf);
1283 return err;
1284}
1285
1286struct kcore {
1287 int fd;
1288 int elfclass;
1289 Elf *elf;
1290 GElf_Ehdr ehdr;
1291};
1292
1293static int kcore__open(struct kcore *kcore, const char *filename)
1294{
1295 GElf_Ehdr *ehdr;
1296
1297 kcore->fd = open(filename, O_RDONLY);
1298 if (kcore->fd == -1)
1299 return -1;
1300
1301 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1302 if (!kcore->elf)
1303 goto out_close;
1304
1305 kcore->elfclass = gelf_getclass(kcore->elf);
1306 if (kcore->elfclass == ELFCLASSNONE)
1307 goto out_end;
1308
1309 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1310 if (!ehdr)
1311 goto out_end;
1312
1313 return 0;
1314
1315out_end:
1316 elf_end(kcore->elf);
1317out_close:
1318 close(kcore->fd);
1319 return -1;
1320}
1321
1322static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1323 bool temp)
1324{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001325 kcore->elfclass = elfclass;
1326
1327 if (temp)
1328 kcore->fd = mkstemp(filename);
1329 else
1330 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1331 if (kcore->fd == -1)
1332 return -1;
1333
1334 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1335 if (!kcore->elf)
1336 goto out_close;
1337
1338 if (!gelf_newehdr(kcore->elf, elfclass))
1339 goto out_end;
1340
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001341 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001342
1343 return 0;
1344
1345out_end:
1346 elf_end(kcore->elf);
1347out_close:
1348 close(kcore->fd);
1349 unlink(filename);
1350 return -1;
1351}
1352
1353static void kcore__close(struct kcore *kcore)
1354{
1355 elf_end(kcore->elf);
1356 close(kcore->fd);
1357}
1358
1359static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1360{
1361 GElf_Ehdr *ehdr = &to->ehdr;
1362 GElf_Ehdr *kehdr = &from->ehdr;
1363
1364 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1365 ehdr->e_type = kehdr->e_type;
1366 ehdr->e_machine = kehdr->e_machine;
1367 ehdr->e_version = kehdr->e_version;
1368 ehdr->e_entry = 0;
1369 ehdr->e_shoff = 0;
1370 ehdr->e_flags = kehdr->e_flags;
1371 ehdr->e_phnum = count;
1372 ehdr->e_shentsize = 0;
1373 ehdr->e_shnum = 0;
1374 ehdr->e_shstrndx = 0;
1375
1376 if (from->elfclass == ELFCLASS32) {
1377 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1378 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1379 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1380 } else {
1381 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1382 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1383 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1384 }
1385
1386 if (!gelf_update_ehdr(to->elf, ehdr))
1387 return -1;
1388
1389 if (!gelf_newphdr(to->elf, count))
1390 return -1;
1391
1392 return 0;
1393}
1394
1395static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1396 u64 addr, u64 len)
1397{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001398 GElf_Phdr phdr = {
1399 .p_type = PT_LOAD,
1400 .p_flags = PF_R | PF_W | PF_X,
1401 .p_offset = offset,
1402 .p_vaddr = addr,
1403 .p_paddr = 0,
1404 .p_filesz = len,
1405 .p_memsz = len,
1406 .p_align = page_size,
1407 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001408
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001409 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001410 return -1;
1411
1412 return 0;
1413}
1414
1415static off_t kcore__write(struct kcore *kcore)
1416{
1417 return elf_update(kcore->elf, ELF_C_WRITE);
1418}
1419
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001420struct phdr_data {
1421 off_t offset;
Adrian Hunter15acef62018-05-22 13:54:41 +03001422 off_t rel;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001423 u64 addr;
1424 u64 len;
Adrian Hunterf6838202018-05-22 13:54:38 +03001425 struct list_head node;
Adrian Hunter22916fd2018-05-22 13:54:45 +03001426 struct phdr_data *remaps;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001427};
1428
Adrian Huntera1a3a062018-05-22 13:54:44 +03001429struct sym_data {
1430 u64 addr;
1431 struct list_head node;
1432};
1433
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001434struct kcore_copy_info {
1435 u64 stext;
1436 u64 etext;
1437 u64 first_symbol;
1438 u64 last_symbol;
1439 u64 first_module;
1440 u64 last_module_symbol;
Adrian Hunter6e979572018-05-22 13:54:39 +03001441 size_t phnum;
Adrian Hunterf6838202018-05-22 13:54:38 +03001442 struct list_head phdrs;
Adrian Huntera1a3a062018-05-22 13:54:44 +03001443 struct list_head syms;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001444};
1445
Adrian Hunter15acef62018-05-22 13:54:41 +03001446#define kcore_copy__for_each_phdr(k, p) \
1447 list_for_each_entry((p), &(k)->phdrs, node)
1448
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001449static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
1450{
1451 struct phdr_data *p = zalloc(sizeof(*p));
1452
1453 if (p) {
1454 p->addr = addr;
1455 p->len = len;
1456 p->offset = offset;
1457 }
1458
1459 return p;
1460}
1461
1462static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
1463 u64 addr, u64 len,
1464 off_t offset)
1465{
1466 struct phdr_data *p = phdr_data__new(addr, len, offset);
1467
1468 if (p)
1469 list_add_tail(&p->node, &kci->phdrs);
1470
1471 return p;
1472}
1473
1474static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
1475{
1476 struct phdr_data *p, *tmp;
1477
1478 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
1479 list_del(&p->node);
1480 free(p);
1481 }
1482}
1483
Adrian Huntera1a3a062018-05-22 13:54:44 +03001484static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
1485 u64 addr)
1486{
1487 struct sym_data *s = zalloc(sizeof(*s));
1488
1489 if (s) {
1490 s->addr = addr;
1491 list_add_tail(&s->node, &kci->syms);
1492 }
1493
1494 return s;
1495}
1496
1497static void kcore_copy__free_syms(struct kcore_copy_info *kci)
1498{
1499 struct sym_data *s, *tmp;
1500
1501 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
1502 list_del(&s->node);
1503 free(s);
1504 }
1505}
1506
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001507static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1508 u64 start)
1509{
1510 struct kcore_copy_info *kci = arg;
1511
Arnaldo Carvalho de Meloe85e0e02018-04-25 17:16:31 -03001512 if (!kallsyms__is_function(type))
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001513 return 0;
1514
1515 if (strchr(name, '[')) {
1516 if (start > kci->last_module_symbol)
1517 kci->last_module_symbol = start;
1518 return 0;
1519 }
1520
1521 if (!kci->first_symbol || start < kci->first_symbol)
1522 kci->first_symbol = start;
1523
1524 if (!kci->last_symbol || start > kci->last_symbol)
1525 kci->last_symbol = start;
1526
1527 if (!strcmp(name, "_stext")) {
1528 kci->stext = start;
1529 return 0;
1530 }
1531
1532 if (!strcmp(name, "_etext")) {
1533 kci->etext = start;
1534 return 0;
1535 }
1536
Adrian Huntera1a3a062018-05-22 13:54:44 +03001537 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
1538 return -1;
1539
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001540 return 0;
1541}
1542
1543static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1544 const char *dir)
1545{
1546 char kallsyms_filename[PATH_MAX];
1547
1548 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1549
1550 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1551 return -1;
1552
1553 if (kallsyms__parse(kallsyms_filename, kci,
1554 kcore_copy__process_kallsyms) < 0)
1555 return -1;
1556
1557 return 0;
1558}
1559
1560static int kcore_copy__process_modules(void *arg,
1561 const char *name __maybe_unused,
Thomas Richter9ad46522017-08-03 15:49:02 +02001562 u64 start, u64 size __maybe_unused)
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001563{
1564 struct kcore_copy_info *kci = arg;
1565
1566 if (!kci->first_module || start < kci->first_module)
1567 kci->first_module = start;
1568
1569 return 0;
1570}
1571
1572static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1573 const char *dir)
1574{
1575 char modules_filename[PATH_MAX];
1576
1577 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1578
1579 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1580 return -1;
1581
1582 if (modules__parse(modules_filename, kci,
1583 kcore_copy__process_modules) < 0)
1584 return -1;
1585
1586 return 0;
1587}
1588
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001589static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
1590 u64 pgoff, u64 s, u64 e)
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001591{
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001592 u64 len, offset;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001593
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001594 if (s < start || s >= end)
1595 return 0;
1596
1597 offset = (s - start) + pgoff;
1598 len = e < end ? e - s : end - s;
1599
1600 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001601}
1602
1603static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1604{
1605 struct kcore_copy_info *kci = data;
1606 u64 end = start + len;
Adrian Huntera1a3a062018-05-22 13:54:44 +03001607 struct sym_data *sdat;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001608
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001609 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
1610 return -1;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001611
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001612 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
1613 kci->last_module_symbol))
1614 return -1;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001615
Adrian Huntera1a3a062018-05-22 13:54:44 +03001616 list_for_each_entry(sdat, &kci->syms, node) {
1617 u64 s = round_down(sdat->addr, page_size);
1618
1619 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
1620 return -1;
1621 }
1622
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001623 return 0;
1624}
1625
1626static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1627{
1628 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1629 return -1;
1630
1631 return 0;
1632}
1633
Adrian Hunter22916fd2018-05-22 13:54:45 +03001634static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
1635{
1636 struct phdr_data *p, *k = NULL;
1637 u64 kend;
1638
1639 if (!kci->stext)
1640 return;
1641
1642 /* Find phdr that corresponds to the kernel map (contains stext) */
1643 kcore_copy__for_each_phdr(kci, p) {
1644 u64 pend = p->addr + p->len - 1;
1645
1646 if (p->addr <= kci->stext && pend >= kci->stext) {
1647 k = p;
1648 break;
1649 }
1650 }
1651
1652 if (!k)
1653 return;
1654
1655 kend = k->offset + k->len;
1656
1657 /* Find phdrs that remap the kernel */
1658 kcore_copy__for_each_phdr(kci, p) {
1659 u64 pend = p->offset + p->len;
1660
1661 if (p == k)
1662 continue;
1663
1664 if (p->offset >= k->offset && pend <= kend)
1665 p->remaps = k;
1666 }
1667}
1668
Adrian Hunter15acef62018-05-22 13:54:41 +03001669static void kcore_copy__layout(struct kcore_copy_info *kci)
1670{
1671 struct phdr_data *p;
1672 off_t rel = 0;
1673
Adrian Hunter22916fd2018-05-22 13:54:45 +03001674 kcore_copy__find_remaps(kci);
1675
Adrian Hunter15acef62018-05-22 13:54:41 +03001676 kcore_copy__for_each_phdr(kci, p) {
Adrian Hunter22916fd2018-05-22 13:54:45 +03001677 if (!p->remaps) {
1678 p->rel = rel;
1679 rel += p->len;
1680 }
Adrian Hunter15acef62018-05-22 13:54:41 +03001681 kci->phnum += 1;
1682 }
Adrian Hunter22916fd2018-05-22 13:54:45 +03001683
1684 kcore_copy__for_each_phdr(kci, p) {
1685 struct phdr_data *k = p->remaps;
1686
1687 if (k)
1688 p->rel = p->offset - k->offset + k->rel;
1689 }
Adrian Hunter15acef62018-05-22 13:54:41 +03001690}
1691
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001692static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1693 Elf *elf)
1694{
1695 if (kcore_copy__parse_kallsyms(kci, dir))
1696 return -1;
1697
1698 if (kcore_copy__parse_modules(kci, dir))
1699 return -1;
1700
1701 if (kci->stext)
1702 kci->stext = round_down(kci->stext, page_size);
1703 else
1704 kci->stext = round_down(kci->first_symbol, page_size);
1705
1706 if (kci->etext) {
1707 kci->etext = round_up(kci->etext, page_size);
1708 } else if (kci->last_symbol) {
1709 kci->etext = round_up(kci->last_symbol, page_size);
1710 kci->etext += page_size;
1711 }
1712
1713 kci->first_module = round_down(kci->first_module, page_size);
1714
1715 if (kci->last_module_symbol) {
1716 kci->last_module_symbol = round_up(kci->last_module_symbol,
1717 page_size);
1718 kci->last_module_symbol += page_size;
1719 }
1720
1721 if (!kci->stext || !kci->etext)
1722 return -1;
1723
1724 if (kci->first_module && !kci->last_module_symbol)
1725 return -1;
1726
Adrian Hunter15acef62018-05-22 13:54:41 +03001727 if (kcore_copy__read_maps(kci, elf))
1728 return -1;
1729
1730 kcore_copy__layout(kci);
1731
1732 return 0;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001733}
1734
1735static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1736 const char *name)
1737{
1738 char from_filename[PATH_MAX];
1739 char to_filename[PATH_MAX];
1740
1741 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1742 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1743
1744 return copyfile_mode(from_filename, to_filename, 0400);
1745}
1746
1747static int kcore_copy__unlink(const char *dir, const char *name)
1748{
1749 char filename[PATH_MAX];
1750
1751 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1752
1753 return unlink(filename);
1754}
1755
1756static int kcore_copy__compare_fds(int from, int to)
1757{
1758 char *buf_from;
1759 char *buf_to;
1760 ssize_t ret;
1761 size_t len;
1762 int err = -1;
1763
1764 buf_from = malloc(page_size);
1765 buf_to = malloc(page_size);
1766 if (!buf_from || !buf_to)
1767 goto out;
1768
1769 while (1) {
1770 /* Use read because mmap won't work on proc files */
1771 ret = read(from, buf_from, page_size);
1772 if (ret < 0)
1773 goto out;
1774
1775 if (!ret)
1776 break;
1777
1778 len = ret;
1779
1780 if (readn(to, buf_to, len) != (int)len)
1781 goto out;
1782
1783 if (memcmp(buf_from, buf_to, len))
1784 goto out;
1785 }
1786
1787 err = 0;
1788out:
1789 free(buf_to);
1790 free(buf_from);
1791 return err;
1792}
1793
1794static int kcore_copy__compare_files(const char *from_filename,
1795 const char *to_filename)
1796{
1797 int from, to, err = -1;
1798
1799 from = open(from_filename, O_RDONLY);
1800 if (from < 0)
1801 return -1;
1802
1803 to = open(to_filename, O_RDONLY);
1804 if (to < 0)
1805 goto out_close_from;
1806
1807 err = kcore_copy__compare_fds(from, to);
1808
1809 close(to);
1810out_close_from:
1811 close(from);
1812 return err;
1813}
1814
1815static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1816 const char *name)
1817{
1818 char from_filename[PATH_MAX];
1819 char to_filename[PATH_MAX];
1820
1821 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1822 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1823
1824 return kcore_copy__compare_files(from_filename, to_filename);
1825}
1826
1827/**
1828 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1829 * @from_dir: from directory
1830 * @to_dir: to directory
1831 *
1832 * This function copies kallsyms, modules and kcore files from one directory to
1833 * another. kallsyms and modules are copied entirely. Only code segments are
1834 * copied from kcore. It is assumed that two segments suffice: one for the
1835 * kernel proper and one for all the modules. The code segments are determined
1836 * from kallsyms and modules files. The kernel map starts at _stext or the
1837 * lowest function symbol, and ends at _etext or the highest function symbol.
1838 * The module map starts at the lowest module address and ends at the highest
1839 * module symbol. Start addresses are rounded down to the nearest page. End
1840 * addresses are rounded up to the nearest page. An extra page is added to the
1841 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1842 * symbol too. Because it contains only code sections, the resulting kcore is
1843 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1844 * is not the same for the kernel map and the modules map. That happens because
1845 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1846 * kallsyms and modules files are compared with their copies to check that
1847 * modules have not been loaded or unloaded while the copies were taking place.
1848 *
1849 * Return: %0 on success, %-1 on failure.
1850 */
1851int kcore_copy(const char *from_dir, const char *to_dir)
1852{
1853 struct kcore kcore;
1854 struct kcore extract;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001855 int idx = 0, err = -1;
Adrian Hunterd2c95982018-05-22 13:54:42 +03001856 off_t offset, sz;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001857 struct kcore_copy_info kci = { .stext = 0, };
1858 char kcore_filename[PATH_MAX];
1859 char extract_filename[PATH_MAX];
Adrian Hunterd2c95982018-05-22 13:54:42 +03001860 struct phdr_data *p;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001861
Adrian Hunterf6838202018-05-22 13:54:38 +03001862 INIT_LIST_HEAD(&kci.phdrs);
Adrian Huntera1a3a062018-05-22 13:54:44 +03001863 INIT_LIST_HEAD(&kci.syms);
Adrian Hunterf6838202018-05-22 13:54:38 +03001864
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001865 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1866 return -1;
1867
1868 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1869 goto out_unlink_kallsyms;
1870
1871 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1872 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1873
1874 if (kcore__open(&kcore, kcore_filename))
1875 goto out_unlink_modules;
1876
1877 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1878 goto out_kcore_close;
1879
1880 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1881 goto out_kcore_close;
1882
Adrian Hunter6e979572018-05-22 13:54:39 +03001883 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001884 goto out_extract_close;
1885
Adrian Hunterc9dd1d82018-05-22 13:54:40 +03001886 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
1887 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
1888 offset = round_up(offset, page_size);
1889
Adrian Hunterd2c95982018-05-22 13:54:42 +03001890 kcore_copy__for_each_phdr(&kci, p) {
1891 off_t offs = p->rel + offset;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001892
Adrian Hunterd2c95982018-05-22 13:54:42 +03001893 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001894 goto out_extract_close;
1895 }
1896
1897 sz = kcore__write(&extract);
1898 if (sz < 0 || sz > offset)
1899 goto out_extract_close;
1900
Adrian Hunterd2c95982018-05-22 13:54:42 +03001901 kcore_copy__for_each_phdr(&kci, p) {
1902 off_t offs = p->rel + offset;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001903
Adrian Hunter22916fd2018-05-22 13:54:45 +03001904 if (p->remaps)
1905 continue;
Adrian Hunterd2c95982018-05-22 13:54:42 +03001906 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
1907 goto out_extract_close;
1908 }
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001909
1910 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1911 goto out_extract_close;
1912
1913 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1914 goto out_extract_close;
1915
1916 err = 0;
1917
1918out_extract_close:
1919 kcore__close(&extract);
1920 if (err)
1921 unlink(extract_filename);
1922out_kcore_close:
1923 kcore__close(&kcore);
1924out_unlink_modules:
1925 if (err)
1926 kcore_copy__unlink(to_dir, "modules");
1927out_unlink_kallsyms:
1928 if (err)
1929 kcore_copy__unlink(to_dir, "kallsyms");
1930
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001931 kcore_copy__free_phdrs(&kci);
Adrian Huntera1a3a062018-05-22 13:54:44 +03001932 kcore_copy__free_syms(&kci);
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001933
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001934 return err;
1935}
1936
Adrian Hunterafba19d2013-10-09 15:01:12 +03001937int kcore_extract__create(struct kcore_extract *kce)
1938{
1939 struct kcore kcore;
1940 struct kcore extract;
1941 size_t count = 1;
1942 int idx = 0, err = -1;
1943 off_t offset = page_size, sz;
1944
1945 if (kcore__open(&kcore, kce->kcore_filename))
1946 return -1;
1947
1948 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1949 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1950 goto out_kcore_close;
1951
1952 if (kcore__copy_hdr(&kcore, &extract, count))
1953 goto out_extract_close;
1954
1955 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1956 goto out_extract_close;
1957
1958 sz = kcore__write(&extract);
1959 if (sz < 0 || sz > offset)
1960 goto out_extract_close;
1961
1962 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1963 goto out_extract_close;
1964
1965 err = 0;
1966
1967out_extract_close:
1968 kcore__close(&extract);
1969 if (err)
1970 unlink(kce->extract_filename);
1971out_kcore_close:
1972 kcore__close(&kcore);
1973
1974 return err;
1975}
1976
1977void kcore_extract__delete(struct kcore_extract *kce)
1978{
1979 unlink(kce->extract_filename);
1980}
1981
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03001982#ifdef HAVE_GELF_GETNOTE_SUPPORT
Ravi Bangoria5a5e3d32018-08-20 10:12:50 +05301983
1984static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
1985{
1986 if (!base_off)
1987 return;
1988
1989 if (tmp->bit32)
1990 tmp->addr.a32[SDT_NOTE_IDX_LOC] =
1991 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
1992 tmp->addr.a32[SDT_NOTE_IDX_BASE];
1993 else
1994 tmp->addr.a64[SDT_NOTE_IDX_LOC] =
1995 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
1996 tmp->addr.a64[SDT_NOTE_IDX_BASE];
1997}
1998
1999static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
2000 GElf_Addr base_off)
2001{
2002 if (!base_off)
2003 return;
2004
2005 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
2006 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2007 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
2008 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2009}
2010
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002011/**
2012 * populate_sdt_note : Parse raw data and identify SDT note
2013 * @elf: elf of the opened file
2014 * @data: raw data of a section with description offset applied
2015 * @len: note description size
2016 * @type: type of the note
2017 * @sdt_notes: List to add the SDT note
2018 *
2019 * Responsible for parsing the @data in section .note.stapsdt in @elf and
2020 * if its an SDT note, it appends to @sdt_notes list.
2021 */
2022static int populate_sdt_note(Elf **elf, const char *data, size_t len,
2023 struct list_head *sdt_notes)
2024{
Alexis Berlemontbe881842016-12-14 01:07:31 +01002025 const char *provider, *name, *args;
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002026 struct sdt_note *tmp = NULL;
2027 GElf_Ehdr ehdr;
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002028 GElf_Shdr shdr;
2029 int ret = -EINVAL;
2030
2031 union {
2032 Elf64_Addr a64[NR_ADDR];
2033 Elf32_Addr a32[NR_ADDR];
2034 } buf;
2035
2036 Elf_Data dst = {
2037 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
2038 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
2039 .d_off = 0, .d_align = 0
2040 };
2041 Elf_Data src = {
2042 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
2043 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
2044 .d_align = 0
2045 };
2046
2047 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
2048 if (!tmp) {
2049 ret = -ENOMEM;
2050 goto out_err;
2051 }
2052
2053 INIT_LIST_HEAD(&tmp->note_list);
2054
2055 if (len < dst.d_size + 3)
2056 goto out_free_note;
2057
2058 /* Translation from file representation to memory representation */
2059 if (gelf_xlatetom(*elf, &dst, &src,
2060 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
2061 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
2062 goto out_free_note;
2063 }
2064
2065 /* Populate the fields of sdt_note */
2066 provider = data + dst.d_size;
2067
2068 name = (const char *)memchr(provider, '\0', data + len - provider);
2069 if (name++ == NULL)
2070 goto out_free_note;
2071
2072 tmp->provider = strdup(provider);
2073 if (!tmp->provider) {
2074 ret = -ENOMEM;
2075 goto out_free_note;
2076 }
2077 tmp->name = strdup(name);
2078 if (!tmp->name) {
2079 ret = -ENOMEM;
2080 goto out_free_prov;
2081 }
2082
Alexis Berlemontbe881842016-12-14 01:07:31 +01002083 args = memchr(name, '\0', data + len - name);
2084
2085 /*
2086 * There is no argument if:
2087 * - We reached the end of the note;
2088 * - There is not enough room to hold a potential string;
2089 * - The argument string is empty or just contains ':'.
2090 */
2091 if (args == NULL || data + len - args < 2 ||
2092 args[1] == ':' || args[1] == '\0')
2093 tmp->args = NULL;
2094 else {
2095 tmp->args = strdup(++args);
2096 if (!tmp->args) {
2097 ret = -ENOMEM;
2098 goto out_free_name;
2099 }
2100 }
2101
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002102 if (gelf_getclass(*elf) == ELFCLASS32) {
2103 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
2104 tmp->bit32 = true;
2105 } else {
2106 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
2107 tmp->bit32 = false;
2108 }
2109
2110 if (!gelf_getehdr(*elf, &ehdr)) {
2111 pr_debug("%s : cannot get elf header.\n", __func__);
2112 ret = -EBADF;
Alexis Berlemontbe881842016-12-14 01:07:31 +01002113 goto out_free_args;
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002114 }
2115
2116 /* Adjust the prelink effect :
2117 * Find out the .stapsdt.base section.
2118 * This scn will help us to handle prelinking (if present).
2119 * Compare the retrieved file offset of the base section with the
2120 * base address in the description of the SDT note. If its different,
2121 * then accordingly, adjust the note location.
2122 */
Ravi Bangoria5a5e3d32018-08-20 10:12:50 +05302123 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
2124 sdt_adjust_loc(tmp, shdr.sh_offset);
2125
2126 /* Adjust reference counter offset */
2127 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
2128 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002129
2130 list_add_tail(&tmp->note_list, sdt_notes);
2131 return 0;
2132
Alexis Berlemontbe881842016-12-14 01:07:31 +01002133out_free_args:
2134 free(tmp->args);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002135out_free_name:
2136 free(tmp->name);
2137out_free_prov:
2138 free(tmp->provider);
2139out_free_note:
2140 free(tmp);
2141out_err:
2142 return ret;
2143}
2144
2145/**
2146 * construct_sdt_notes_list : constructs a list of SDT notes
2147 * @elf : elf to look into
2148 * @sdt_notes : empty list_head
2149 *
2150 * Scans the sections in 'elf' for the section
2151 * .note.stapsdt. It, then calls populate_sdt_note to find
2152 * out the SDT events and populates the 'sdt_notes'.
2153 */
2154static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
2155{
2156 GElf_Ehdr ehdr;
2157 Elf_Scn *scn = NULL;
2158 Elf_Data *data;
2159 GElf_Shdr shdr;
2160 size_t shstrndx, next;
2161 GElf_Nhdr nhdr;
2162 size_t name_off, desc_off, offset;
2163 int ret = 0;
2164
2165 if (gelf_getehdr(elf, &ehdr) == NULL) {
2166 ret = -EBADF;
2167 goto out_ret;
2168 }
2169 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
2170 ret = -EBADF;
2171 goto out_ret;
2172 }
2173
2174 /* Look for the required section */
2175 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
2176 if (!scn) {
2177 ret = -ENOENT;
2178 goto out_ret;
2179 }
2180
2181 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
2182 ret = -ENOENT;
2183 goto out_ret;
2184 }
2185
2186 data = elf_getdata(scn, NULL);
2187
2188 /* Get the SDT notes */
2189 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
2190 &desc_off)) > 0; offset = next) {
2191 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
2192 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
2193 sizeof(SDT_NOTE_NAME))) {
2194 /* Check the type of the note */
2195 if (nhdr.n_type != SDT_NOTE_TYPE)
2196 goto out_ret;
2197
2198 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
2199 nhdr.n_descsz, sdt_notes);
2200 if (ret < 0)
2201 goto out_ret;
2202 }
2203 }
2204 if (list_empty(sdt_notes))
2205 ret = -ENOENT;
2206
2207out_ret:
2208 return ret;
2209}
2210
2211/**
2212 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2213 * @head : empty list_head
2214 * @target : file to find SDT notes from
2215 *
2216 * This opens the file, initializes
2217 * the ELF and then calls construct_sdt_notes_list.
2218 */
2219int get_sdt_note_list(struct list_head *head, const char *target)
2220{
2221 Elf *elf;
2222 int fd, ret;
2223
2224 fd = open(target, O_RDONLY);
2225 if (fd < 0)
2226 return -EBADF;
2227
2228 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2229 if (!elf) {
2230 ret = -EBADF;
2231 goto out_close;
2232 }
2233 ret = construct_sdt_notes_list(elf, head);
2234 elf_end(elf);
2235out_close:
2236 close(fd);
2237 return ret;
2238}
2239
2240/**
2241 * cleanup_sdt_note_list : free the sdt notes' list
2242 * @sdt_notes: sdt notes' list
2243 *
2244 * Free up the SDT notes in @sdt_notes.
2245 * Returns the number of SDT notes free'd.
2246 */
2247int cleanup_sdt_note_list(struct list_head *sdt_notes)
2248{
2249 struct sdt_note *tmp, *pos;
2250 int nr_free = 0;
2251
2252 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2253 list_del(&pos->note_list);
2254 free(pos->name);
2255 free(pos->provider);
2256 free(pos);
2257 nr_free++;
2258 }
2259 return nr_free;
2260}
2261
2262/**
2263 * sdt_notes__get_count: Counts the number of sdt events
2264 * @start: list_head to sdt_notes list
2265 *
2266 * Returns the number of SDT notes in a list
2267 */
2268int sdt_notes__get_count(struct list_head *start)
2269{
2270 struct sdt_note *sdt_ptr;
2271 int count = 0;
2272
2273 list_for_each_entry(sdt_ptr, start, note_list)
2274 count++;
2275 return count;
2276}
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03002277#endif
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002278
Namhyung Kime5a18452012-08-06 13:41:20 +09002279void symbol__elf_init(void)
2280{
2281 elf_version(EV_CURRENT);
2282}