blob: e680371bd5c9b79ca27e47a2f55889606af872ce [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01009#include "demangle-java.h"
David Tolnaycae15db2016-07-09 00:20:00 -070010#include "demangle-rust.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040011#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070012#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030013#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090014#include "debug.h"
15
David Aherne370a3d2015-02-18 19:33:37 -050016#ifndef EM_AARCH64
17#define EM_AARCH64 183 /* ARM 64 bit */
18#endif
19
Arnaldo Carvalho de Melocc310782016-07-12 11:04:13 -030020typedef Elf64_Nhdr GElf_Nhdr;
David Aherne370a3d2015-02-18 19:33:37 -050021
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030022#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
23extern char *cplus_demangle(const char *, int);
24
25static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
26{
27 return cplus_demangle(c, i);
28}
29#else
30#ifdef NO_DEMANGLE
31static inline char *bfd_demangle(void __maybe_unused *v,
32 const char __maybe_unused *c,
33 int __maybe_unused i)
34{
35 return NULL;
36}
37#else
38#define PACKAGE 'perf'
39#include <bfd.h>
40#endif
41#endif
42
Ingo Molnar89fe8082013-09-30 12:07:11 +020043#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030044static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030045{
46 GElf_Ehdr gehdr;
47 GElf_Ehdr *ehdr;
48
49 ehdr = gelf_getehdr(elf, &gehdr);
50 if (!ehdr)
51 return -1;
52
53 *dst = ehdr->e_phnum;
54
55 return 0;
56}
57#endif
58
Arnaldo Carvalho de Melo2492c462016-07-04 19:35:47 -030059#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
60static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
61{
62 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
63 return -1;
64}
65#endif
66
Namhyung Kime5a18452012-08-06 13:41:20 +090067#ifndef NT_GNU_BUILD_ID
68#define NT_GNU_BUILD_ID 3
69#endif
70
71/**
72 * elf_symtab__for_each_symbol - iterate thru all the symbols
73 *
74 * @syms: struct elf_symtab instance to iterate
75 * @idx: uint32_t idx
76 * @sym: GElf_Sym iterator
77 */
78#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
79 for (idx = 0, gelf_getsym(syms, idx, &sym);\
80 idx < nr_syms; \
81 idx++, gelf_getsym(syms, idx, &sym))
82
83static inline uint8_t elf_sym__type(const GElf_Sym *sym)
84{
85 return GELF_ST_TYPE(sym->st_info);
86}
87
Vinson Lee4e310502015-02-09 16:29:37 -080088#ifndef STT_GNU_IFUNC
89#define STT_GNU_IFUNC 10
90#endif
91
Namhyung Kime5a18452012-08-06 13:41:20 +090092static inline int elf_sym__is_function(const GElf_Sym *sym)
93{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030094 return (elf_sym__type(sym) == STT_FUNC ||
95 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090096 sym->st_name != 0 &&
97 sym->st_shndx != SHN_UNDEF;
98}
99
100static inline bool elf_sym__is_object(const GElf_Sym *sym)
101{
102 return elf_sym__type(sym) == STT_OBJECT &&
103 sym->st_name != 0 &&
104 sym->st_shndx != SHN_UNDEF;
105}
106
107static inline int elf_sym__is_label(const GElf_Sym *sym)
108{
109 return elf_sym__type(sym) == STT_NOTYPE &&
110 sym->st_name != 0 &&
111 sym->st_shndx != SHN_UNDEF &&
112 sym->st_shndx != SHN_ABS;
113}
114
115static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
116{
117 switch (type) {
118 case MAP__FUNCTION:
119 return elf_sym__is_function(sym);
120 case MAP__VARIABLE:
121 return elf_sym__is_object(sym);
122 default:
123 return false;
124 }
125}
126
127static inline const char *elf_sym__name(const GElf_Sym *sym,
128 const Elf_Data *symstrs)
129{
130 return symstrs->d_buf + sym->st_name;
131}
132
133static inline const char *elf_sec__name(const GElf_Shdr *shdr,
134 const Elf_Data *secstrs)
135{
136 return secstrs->d_buf + shdr->sh_name;
137}
138
139static inline int elf_sec__is_text(const GElf_Shdr *shdr,
140 const Elf_Data *secstrs)
141{
142 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
143}
144
145static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
146 const Elf_Data *secstrs)
147{
148 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
149}
150
151static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
152 enum map_type type)
153{
154 switch (type) {
155 case MAP__FUNCTION:
156 return elf_sec__is_text(shdr, secstrs);
157 case MAP__VARIABLE:
158 return elf_sec__is_data(shdr, secstrs);
159 default:
160 return false;
161 }
162}
163
164static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
165{
166 Elf_Scn *sec = NULL;
167 GElf_Shdr shdr;
168 size_t cnt = 1;
169
170 while ((sec = elf_nextscn(elf, sec)) != NULL) {
171 gelf_getshdr(sec, &shdr);
172
173 if ((addr >= shdr.sh_addr) &&
174 (addr < (shdr.sh_addr + shdr.sh_size)))
175 return cnt;
176
177 ++cnt;
178 }
179
180 return -1;
181}
182
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000183Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
184 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900185{
186 Elf_Scn *sec = NULL;
187 size_t cnt = 1;
188
Cody P Schafer49274652012-08-10 15:22:55 -0700189 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
190 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
191 return NULL;
192
Namhyung Kime5a18452012-08-06 13:41:20 +0900193 while ((sec = elf_nextscn(elf, sec)) != NULL) {
194 char *str;
195
196 gelf_getshdr(sec, shp);
197 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100198 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900199 if (idx)
200 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100201 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900202 }
203 ++cnt;
204 }
205
Jiri Olsa155b3a12014-03-02 14:32:07 +0100206 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900207}
208
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200209static bool want_demangle(bool is_kernel_sym)
210{
211 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
212}
213
214static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
215{
216 int demangle_flags = verbose ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS;
217 char *demangled = NULL;
218
219 /*
220 * We need to figure out if the object was created from C++ sources
221 * DWARF DW_compile_unit has this, but we don't always have access
222 * to it...
223 */
224 if (!want_demangle(dso->kernel || kmodule))
225 return demangled;
226
227 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
228 if (demangled == NULL)
229 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
230 else if (rust_is_mangled(demangled))
231 /*
232 * Input to Rust demangling is the BFD-demangled
233 * name which it Rust-demangles in place.
234 */
235 rust_demangle_sym(demangled);
236
237 return demangled;
238}
239
Namhyung Kime5a18452012-08-06 13:41:20 +0900240#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
241 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
242 idx < nr_entries; \
243 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
244
245#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
246 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
247 idx < nr_entries; \
248 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
249
250/*
251 * We need to check if we have a .dynsym, so that we can handle the
252 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
253 * .dynsym or .symtab).
254 * And always look at the original dso, not at debuginfo packages, that
255 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
256 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700257int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900258 symbol_filter_t filter)
259{
260 uint32_t nr_rel_entries, idx;
261 GElf_Sym sym;
262 u64 plt_offset;
263 GElf_Shdr shdr_plt;
264 struct symbol *f;
265 GElf_Shdr shdr_rel_plt, shdr_dynsym;
266 Elf_Data *reldata, *syms, *symstrs;
267 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
268 size_t dynsym_idx;
269 GElf_Ehdr ehdr;
270 char sympltname[1024];
271 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700272 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900273
David Ahernf47b58b2012-08-19 09:47:14 -0600274 if (!ss->dynsym)
275 return 0;
276
Cody P Schafera44f6052012-08-10 15:22:59 -0700277 elf = ss->elf;
278 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900279
Cody P Schafera44f6052012-08-10 15:22:59 -0700280 scn_dynsym = ss->dynsym;
281 shdr_dynsym = ss->dynshdr;
282 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900283
Namhyung Kime5a18452012-08-06 13:41:20 +0900284 if (scn_dynsym == NULL)
285 goto out_elf_end;
286
287 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
288 ".rela.plt", NULL);
289 if (scn_plt_rel == NULL) {
290 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
291 ".rel.plt", NULL);
292 if (scn_plt_rel == NULL)
293 goto out_elf_end;
294 }
295
296 err = -1;
297
298 if (shdr_rel_plt.sh_link != dynsym_idx)
299 goto out_elf_end;
300
301 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
302 goto out_elf_end;
303
304 /*
305 * Fetch the relocation section to find the idxes to the GOT
306 * and the symbols in the .dynsym they refer to.
307 */
308 reldata = elf_getdata(scn_plt_rel, NULL);
309 if (reldata == NULL)
310 goto out_elf_end;
311
312 syms = elf_getdata(scn_dynsym, NULL);
313 if (syms == NULL)
314 goto out_elf_end;
315
316 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
317 if (scn_symstrs == NULL)
318 goto out_elf_end;
319
320 symstrs = elf_getdata(scn_symstrs, NULL);
321 if (symstrs == NULL)
322 goto out_elf_end;
323
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700324 if (symstrs->d_size == 0)
325 goto out_elf_end;
326
Namhyung Kime5a18452012-08-06 13:41:20 +0900327 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
328 plt_offset = shdr_plt.sh_offset;
329
330 if (shdr_rel_plt.sh_type == SHT_RELA) {
331 GElf_Rela pos_mem, *pos;
332
333 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
334 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200335 const char *elf_name = NULL;
336 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900337 symidx = GELF_R_SYM(pos->r_info);
338 plt_offset += shdr_plt.sh_entsize;
339 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200340
341 elf_name = elf_sym__name(&sym, symstrs);
342 demangled = demangle_sym(dso, 0, elf_name);
343 if (demangled != NULL)
344 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900345 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200346 "%s@plt", elf_name);
347 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900348
349 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
350 STB_GLOBAL, sympltname);
351 if (!f)
352 goto out_elf_end;
353
354 if (filter && filter(map, f))
355 symbol__delete(f);
356 else {
357 symbols__insert(&dso->symbols[map->type], f);
358 ++nr;
359 }
360 }
361 } else if (shdr_rel_plt.sh_type == SHT_REL) {
362 GElf_Rel pos_mem, *pos;
363 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
364 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200365 const char *elf_name = NULL;
366 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900367 symidx = GELF_R_SYM(pos->r_info);
368 plt_offset += shdr_plt.sh_entsize;
369 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200370
371 elf_name = elf_sym__name(&sym, symstrs);
372 demangled = demangle_sym(dso, 0, elf_name);
373 if (demangled != NULL)
374 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900375 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200376 "%s@plt", elf_name);
377 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900378
379 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
380 STB_GLOBAL, sympltname);
381 if (!f)
382 goto out_elf_end;
383
384 if (filter && filter(map, f))
385 symbol__delete(f);
386 else {
387 symbols__insert(&dso->symbols[map->type], f);
388 ++nr;
389 }
390 }
391 }
392
393 err = 0;
394out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900395 if (err == 0)
396 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900397 pr_debug("%s: problems reading %s PLT info.\n",
398 __func__, dso->long_name);
399 return 0;
400}
401
402/*
403 * Align offset to 4 bytes as needed for note name and descriptor data.
404 */
405#define NOTE_ALIGN(n) (((n) + 3) & -4U)
406
407static int elf_read_build_id(Elf *elf, void *bf, size_t size)
408{
409 int err = -1;
410 GElf_Ehdr ehdr;
411 GElf_Shdr shdr;
412 Elf_Data *data;
413 Elf_Scn *sec;
414 Elf_Kind ek;
415 void *ptr;
416
417 if (size < BUILD_ID_SIZE)
418 goto out;
419
420 ek = elf_kind(elf);
421 if (ek != ELF_K_ELF)
422 goto out;
423
424 if (gelf_getehdr(elf, &ehdr) == NULL) {
425 pr_err("%s: cannot get elf header.\n", __func__);
426 goto out;
427 }
428
429 /*
430 * Check following sections for notes:
431 * '.note.gnu.build-id'
432 * '.notes'
433 * '.note' (VDSO specific)
434 */
435 do {
436 sec = elf_section_by_name(elf, &ehdr, &shdr,
437 ".note.gnu.build-id", NULL);
438 if (sec)
439 break;
440
441 sec = elf_section_by_name(elf, &ehdr, &shdr,
442 ".notes", NULL);
443 if (sec)
444 break;
445
446 sec = elf_section_by_name(elf, &ehdr, &shdr,
447 ".note", NULL);
448 if (sec)
449 break;
450
451 return err;
452
453 } while (0);
454
455 data = elf_getdata(sec, NULL);
456 if (data == NULL)
457 goto out;
458
459 ptr = data->d_buf;
460 while (ptr < (data->d_buf + data->d_size)) {
461 GElf_Nhdr *nhdr = ptr;
462 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
463 descsz = NOTE_ALIGN(nhdr->n_descsz);
464 const char *name;
465
466 ptr += sizeof(*nhdr);
467 name = ptr;
468 ptr += namesz;
469 if (nhdr->n_type == NT_GNU_BUILD_ID &&
470 nhdr->n_namesz == sizeof("GNU")) {
471 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
472 size_t sz = min(size, descsz);
473 memcpy(bf, ptr, sz);
474 memset(bf + sz, 0, size - sz);
475 err = descsz;
476 break;
477 }
478 }
479 ptr += descsz;
480 }
481
482out:
483 return err;
484}
485
486int filename__read_build_id(const char *filename, void *bf, size_t size)
487{
488 int fd, err = -1;
489 Elf *elf;
490
491 if (size < BUILD_ID_SIZE)
492 goto out;
493
494 fd = open(filename, O_RDONLY);
495 if (fd < 0)
496 goto out;
497
498 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
499 if (elf == NULL) {
500 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
501 goto out_close;
502 }
503
504 err = elf_read_build_id(elf, bf, size);
505
506 elf_end(elf);
507out_close:
508 close(fd);
509out:
510 return err;
511}
512
513int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
514{
515 int fd, err = -1;
516
517 if (size < BUILD_ID_SIZE)
518 goto out;
519
520 fd = open(filename, O_RDONLY);
521 if (fd < 0)
522 goto out;
523
524 while (1) {
525 char bf[BUFSIZ];
526 GElf_Nhdr nhdr;
527 size_t namesz, descsz;
528
529 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
530 break;
531
532 namesz = NOTE_ALIGN(nhdr.n_namesz);
533 descsz = NOTE_ALIGN(nhdr.n_descsz);
534 if (nhdr.n_type == NT_GNU_BUILD_ID &&
535 nhdr.n_namesz == sizeof("GNU")) {
536 if (read(fd, bf, namesz) != (ssize_t)namesz)
537 break;
538 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
539 size_t sz = min(descsz, size);
540 if (read(fd, build_id, sz) == (ssize_t)sz) {
541 memset(build_id + sz, 0, size - sz);
542 err = 0;
543 break;
544 }
545 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
546 break;
547 } else {
548 int n = namesz + descsz;
549 if (read(fd, bf, n) != n)
550 break;
551 }
552 }
553 close(fd);
554out:
555 return err;
556}
557
558int filename__read_debuglink(const char *filename, char *debuglink,
559 size_t size)
560{
561 int fd, err = -1;
562 Elf *elf;
563 GElf_Ehdr ehdr;
564 GElf_Shdr shdr;
565 Elf_Data *data;
566 Elf_Scn *sec;
567 Elf_Kind ek;
568
569 fd = open(filename, O_RDONLY);
570 if (fd < 0)
571 goto out;
572
573 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
574 if (elf == NULL) {
575 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
576 goto out_close;
577 }
578
579 ek = elf_kind(elf);
580 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800581 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900582
583 if (gelf_getehdr(elf, &ehdr) == NULL) {
584 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800585 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900586 }
587
588 sec = elf_section_by_name(elf, &ehdr, &shdr,
589 ".gnu_debuglink", NULL);
590 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800591 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900592
593 data = elf_getdata(sec, NULL);
594 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800595 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900596
597 /* the start of this section is a zero-terminated string */
598 strncpy(debuglink, data->d_buf, size);
599
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900600 err = 0;
601
Chenggang Qin784f3392013-10-11 08:27:57 +0800602out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900603 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900604out_close:
605 close(fd);
606out:
607 return err;
608}
609
610static int dso__swap_init(struct dso *dso, unsigned char eidata)
611{
612 static unsigned int const endian = 1;
613
614 dso->needs_swap = DSO_SWAP__NO;
615
616 switch (eidata) {
617 case ELFDATA2LSB:
618 /* We are big endian, DSO is little endian. */
619 if (*(unsigned char const *)&endian != 1)
620 dso->needs_swap = DSO_SWAP__YES;
621 break;
622
623 case ELFDATA2MSB:
624 /* We are little endian, DSO is big endian. */
625 if (*(unsigned char const *)&endian != 0)
626 dso->needs_swap = DSO_SWAP__YES;
627 break;
628
629 default:
630 pr_err("unrecognized DSO data encoding %d\n", eidata);
631 return -EINVAL;
632 }
633
634 return 0;
635}
636
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900637static int decompress_kmodule(struct dso *dso, const char *name,
638 enum dso_binary_type type)
639{
Jiri Olsa914f85c2015-02-12 22:27:50 +0100640 int fd = -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900641 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
Jiri Olsa914f85c2015-02-12 22:27:50 +0100642 struct kmod_path m;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900643
Namhyung Kim0b064f42015-01-29 17:06:42 +0900644 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
645 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
646 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900647 return -1;
648
Jiri Olsa914f85c2015-02-12 22:27:50 +0100649 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
650 name = dso->long_name;
651
652 if (kmod_path__parse_ext(&m, name) || !m.comp)
653 return -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900654
655 fd = mkstemp(tmpbuf);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300656 if (fd < 0) {
657 dso->load_errno = errno;
Jiri Olsa914f85c2015-02-12 22:27:50 +0100658 goto out;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300659 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900660
Jiri Olsa914f85c2015-02-12 22:27:50 +0100661 if (!decompress_to_file(m.ext, name, fd)) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300662 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900663 close(fd);
664 fd = -1;
665 }
666
667 unlink(tmpbuf);
668
Jiri Olsa914f85c2015-02-12 22:27:50 +0100669out:
670 free(m.ext);
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900671 return fd;
672}
673
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700674bool symsrc__possibly_runtime(struct symsrc *ss)
675{
676 return ss->dynsym || ss->opdsec;
677}
678
Cody P Schaferd26cd122012-08-10 15:23:00 -0700679bool symsrc__has_symtab(struct symsrc *ss)
680{
681 return ss->symtab != NULL;
682}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700683
684void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900685{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300686 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700687 elf_end(ss->elf);
688 close(ss->fd);
689}
690
Naveen N. Raod2332092015-04-28 17:35:35 +0530691bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
692{
693 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
694}
695
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700696int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
697 enum dso_binary_type type)
698{
Namhyung Kime5a18452012-08-06 13:41:20 +0900699 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900700 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900701 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700702 int fd;
703
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300704 if (dso__needs_decompress(dso)) {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900705 fd = decompress_kmodule(dso, name, type);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300706 if (fd < 0)
707 return -1;
708 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900709 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300710 if (fd < 0) {
711 dso->load_errno = errno;
712 return -1;
713 }
714 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900715
716 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
717 if (elf == NULL) {
718 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300719 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900720 goto out_close;
721 }
722
723 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300724 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900725 pr_debug("%s: cannot get elf header.\n", __func__);
726 goto out_elf_end;
727 }
728
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300729 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
730 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900731 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300732 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900733
734 /* Always reject images with a mismatched build-id: */
Masami Hiramatsu428aff82016-08-26 01:24:42 +0900735 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900736 u8 build_id[BUILD_ID_SIZE];
737
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300738 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
739 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900740 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300741 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900742
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300743 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530744 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300745 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900746 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300747 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900748 }
749
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300750 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
751
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700752 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
753 NULL);
754 if (ss->symshdr.sh_type != SHT_SYMTAB)
755 ss->symtab = NULL;
756
757 ss->dynsym_idx = 0;
758 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
759 &ss->dynsym_idx);
760 if (ss->dynshdr.sh_type != SHT_DYNSYM)
761 ss->dynsym = NULL;
762
763 ss->opdidx = 0;
764 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
765 &ss->opdidx);
766 if (ss->opdshdr.sh_type != SHT_PROGBITS)
767 ss->opdsec = NULL;
768
Wang Nan99e87f72016-04-07 10:24:31 +0000769 if (dso->kernel == DSO_TYPE_USER)
770 ss->adjust_symbols = true;
771 else
Naveen N. Raod2332092015-04-28 17:35:35 +0530772 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700773
774 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300775 if (!ss->name) {
776 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700777 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300778 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700779
780 ss->elf = elf;
781 ss->fd = fd;
782 ss->ehdr = ehdr;
783 ss->type = type;
784
785 return 0;
786
787out_elf_end:
788 elf_end(elf);
789out_close:
790 close(fd);
791 return err;
792}
793
Adrian Hunter39b12f782013-08-07 14:38:47 +0300794/**
795 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
796 * @kmap: kernel maps and relocation reference symbol
797 *
798 * This function returns %true if we are dealing with the kernel maps and the
799 * relocation reference symbol has not yet been found. Otherwise %false is
800 * returned.
801 */
802static bool ref_reloc_sym_not_found(struct kmap *kmap)
803{
804 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
805 !kmap->ref_reloc_sym->unrelocated_addr;
806}
807
808/**
809 * ref_reloc - kernel relocation offset.
810 * @kmap: kernel maps and relocation reference symbol
811 *
812 * This function returns the offset of kernel addresses as determined by using
813 * the relocation reference symbol i.e. if the kernel has not been relocated
814 * then the return value is zero.
815 */
816static u64 ref_reloc(struct kmap *kmap)
817{
818 if (kmap && kmap->ref_reloc_sym &&
819 kmap->ref_reloc_sym->unrelocated_addr)
820 return kmap->ref_reloc_sym->addr -
821 kmap->ref_reloc_sym->unrelocated_addr;
822 return 0;
823}
824
Naveen N. Rao0b3c2262016-04-12 14:40:50 +0530825void __weak arch__sym_update(struct symbol *s __maybe_unused,
826 GElf_Sym *sym __maybe_unused) { }
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530827
Cody P Schafer261360b2012-08-10 15:23:01 -0700828int dso__load_sym(struct dso *dso, struct map *map,
829 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700830 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700831{
832 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000833 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700834 struct map *curr_map = map;
835 struct dso *curr_dso = dso;
836 Elf_Data *symstrs, *secstrs;
837 uint32_t nr_syms;
838 int err = -1;
839 uint32_t idx;
840 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700841 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +0000842 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700843 Elf_Data *syms, *opddata = NULL;
844 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700845 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700846 Elf *elf;
847 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300848 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700849
Wang Nanba927322015-04-07 08:22:45 +0000850 if (kmap && !kmaps)
851 return -1;
852
Cody P Schafer261360b2012-08-10 15:23:01 -0700853 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300854 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300855 dso->rel = syms_ss->ehdr.e_type == ET_REL;
856
857 /*
858 * Modules may already have symbols from kallsyms, but those symbols
859 * have the wrong values for the dso maps, so remove them.
860 */
861 if (kmodule && syms_ss->symtab)
862 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700863
Cody P Schafer261360b2012-08-10 15:23:01 -0700864 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000865 /*
866 * If the vmlinux is stripped, fail so we will fall back
867 * to using kallsyms. The vmlinux runtime symbols aren't
868 * of much use.
869 */
870 if (dso->kernel)
871 goto out_elf_end;
872
Cody P Schafer261360b2012-08-10 15:23:01 -0700873 syms_ss->symtab = syms_ss->dynsym;
874 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700875 }
876
Cody P Schafer261360b2012-08-10 15:23:01 -0700877 elf = syms_ss->elf;
878 ehdr = syms_ss->ehdr;
879 sec = syms_ss->symtab;
880 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700881
Anton Blanchard50de1a02016-08-13 11:55:33 +1000882 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
883 ".text", NULL))
Wang Nan73cdf0c2016-02-26 09:31:49 +0000884 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
885
Cody P Schafer261360b2012-08-10 15:23:01 -0700886 if (runtime_ss->opdsec)
887 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900888
889 syms = elf_getdata(sec, NULL);
890 if (syms == NULL)
891 goto out_elf_end;
892
893 sec = elf_getscn(elf, shdr.sh_link);
894 if (sec == NULL)
895 goto out_elf_end;
896
897 symstrs = elf_getdata(sec, NULL);
898 if (symstrs == NULL)
899 goto out_elf_end;
900
Adrian Hunterf247fb82014-07-31 09:00:46 +0300901 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900902 if (sec_strndx == NULL)
903 goto out_elf_end;
904
905 secstrs = elf_getdata(sec_strndx, NULL);
906 if (secstrs == NULL)
907 goto out_elf_end;
908
909 nr_syms = shdr.sh_size / shdr.sh_entsize;
910
911 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300912
913 /*
914 * The kernel relocation symbol is needed in advance in order to adjust
915 * kernel maps correctly.
916 */
917 if (ref_reloc_sym_not_found(kmap)) {
918 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
919 const char *elf_name = elf_sym__name(&sym, symstrs);
920
921 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
922 continue;
923 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200924 map->reloc = kmap->ref_reloc_sym->addr -
925 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300926 break;
927 }
928 }
929
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300930 /*
931 * Handle any relocation of vdso necessary because older kernels
932 * attempted to prelink vdso to its virtual address.
933 */
Wang Nan73cdf0c2016-02-26 09:31:49 +0000934 if (dso__is_vdso(dso))
935 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300936
Adrian Hunter39b12f782013-08-07 14:38:47 +0300937 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
938 /*
939 * Initial kernel and module mappings do not map to the dso. For
940 * function mappings, flag the fixups.
941 */
942 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
943 remap_kernel = true;
944 adjust_kernel_syms = dso->adjust_symbols;
945 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900946 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
947 struct symbol *f;
948 const char *elf_name = elf_sym__name(&sym, symstrs);
949 char *demangled = NULL;
950 int is_label = elf_sym__is_label(&sym);
951 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700952 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900953
Namhyung Kime5a18452012-08-06 13:41:20 +0900954 if (!is_label && !elf_sym__is_a(&sym, map->type))
955 continue;
956
957 /* Reject ARM ELF "mapping symbols": these aren't unique and
958 * don't identify functions, so will confuse the profile
959 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800960 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
961 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
962 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900963 continue;
964 }
965
Cody P Schafer261360b2012-08-10 15:23:01 -0700966 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
967 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900968 u64 *opd = opddata->d_buf + offset;
969 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700970 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
971 sym.st_value);
972 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900973 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100974 /*
975 * When loading symbols in a data mapping, ABS symbols (which
976 * has a value of SHN_ABS in its st_shndx) failed at
977 * elf_getscn(). And it marks the loading as a failure so
978 * already loaded symbols cannot be fixed up.
979 *
980 * I'm not sure what should be done. Just ignore them for now.
981 * - Namhyung Kim
982 */
983 if (sym.st_shndx == SHN_ABS)
984 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900985
Cody P Schafer261360b2012-08-10 15:23:01 -0700986 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900987 if (!sec)
988 goto out_elf_end;
989
990 gelf_getshdr(sec, &shdr);
991
992 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
993 continue;
994
995 section_name = elf_sec__name(&shdr, secstrs);
996
997 /* On ARM, symbols for thumb functions have 1 added to
998 * the symbol address as a flag - remove it */
999 if ((ehdr.e_machine == EM_ARM) &&
1000 (map->type == MAP__FUNCTION) &&
1001 (sym.st_value & 1))
1002 --sym.st_value;
1003
Adrian Hunter39b12f782013-08-07 14:38:47 +03001004 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001005 char dso_name[PATH_MAX];
1006
Adrian Hunter39b12f782013-08-07 14:38:47 +03001007 /* Adjust symbol to map to file offset */
1008 if (adjust_kernel_syms)
1009 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1010
Namhyung Kime5a18452012-08-06 13:41:20 +09001011 if (strcmp(section_name,
1012 (curr_dso->short_name +
1013 dso->short_name_len)) == 0)
1014 goto new_symbol;
1015
1016 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +03001017 /*
1018 * The initial kernel mapping is based on
1019 * kallsyms and identity maps. Overwrite it to
1020 * map to the kernel dso.
1021 */
1022 if (remap_kernel && dso->kernel) {
1023 remap_kernel = false;
1024 map->start = shdr.sh_addr +
1025 ref_reloc(kmap);
1026 map->end = map->start + shdr.sh_size;
1027 map->pgoff = shdr.sh_offset;
1028 map->map_ip = map__map_ip;
1029 map->unmap_ip = map__unmap_ip;
1030 /* Ensure maps are correctly ordered */
Wang Nanba927322015-04-07 08:22:45 +00001031 if (kmaps) {
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -03001032 map__get(map);
Wang Nanba927322015-04-07 08:22:45 +00001033 map_groups__remove(kmaps, map);
1034 map_groups__insert(kmaps, map);
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -03001035 map__put(map);
Wang Nanba927322015-04-07 08:22:45 +00001036 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001037 }
1038
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001039 /*
1040 * The initial module mapping is based on
1041 * /proc/modules mapped to offset zero.
1042 * Overwrite it to map to the module dso.
1043 */
1044 if (remap_kernel && kmodule) {
1045 remap_kernel = false;
1046 map->pgoff = shdr.sh_offset;
1047 }
1048
Namhyung Kime5a18452012-08-06 13:41:20 +09001049 curr_map = map;
1050 curr_dso = dso;
1051 goto new_symbol;
1052 }
1053
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001054 if (!kmap)
1055 goto new_symbol;
1056
Namhyung Kime5a18452012-08-06 13:41:20 +09001057 snprintf(dso_name, sizeof(dso_name),
1058 "%s%s", dso->short_name, section_name);
1059
Wang Nanba927322015-04-07 08:22:45 +00001060 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001061 if (curr_map == NULL) {
1062 u64 start = sym.st_value;
1063
1064 if (kmodule)
1065 start += map->start + shdr.sh_offset;
1066
1067 curr_dso = dso__new(dso_name);
1068 if (curr_dso == NULL)
1069 goto out_elf_end;
1070 curr_dso->kernel = dso->kernel;
1071 curr_dso->long_name = dso->long_name;
1072 curr_dso->long_name_len = dso->long_name_len;
1073 curr_map = map__new2(start, curr_dso,
1074 map->type);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001075 dso__put(curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001076 if (curr_map == NULL) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001077 goto out_elf_end;
1078 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001079 if (adjust_kernel_syms) {
1080 curr_map->start = shdr.sh_addr +
1081 ref_reloc(kmap);
1082 curr_map->end = curr_map->start +
1083 shdr.sh_size;
1084 curr_map->pgoff = shdr.sh_offset;
1085 } else {
1086 curr_map->map_ip = identity__map_ip;
1087 curr_map->unmap_ip = identity__map_ip;
1088 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001089 curr_dso->symtab_type = dso->symtab_type;
Wang Nanba927322015-04-07 08:22:45 +00001090 map_groups__insert(kmaps, curr_map);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001091 /*
1092 * Add it before we drop the referece to curr_map,
1093 * i.e. while we still are sure to have a reference
1094 * to this DSO via curr_map->dso.
1095 */
1096 dsos__add(&map->groups->machine->dsos, curr_dso);
Masami Hiramatsu8d5c3402015-11-18 15:40:27 +09001097 /* kmaps already got it */
1098 map__put(curr_map);
Namhyung Kime5a18452012-08-06 13:41:20 +09001099 dso__set_loaded(curr_dso, map->type);
1100 } else
1101 curr_dso = curr_map->dso;
1102
1103 goto new_symbol;
1104 }
1105
Cody P Schafer261360b2012-08-10 15:23:01 -07001106 if ((used_opd && runtime_ss->adjust_symbols)
1107 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001108 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1109 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1110 (u64)sym.st_value, (u64)shdr.sh_addr,
1111 (u64)shdr.sh_offset);
1112 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1113 }
Avi Kivity950b8352014-01-22 21:58:46 +02001114new_symbol:
Milian Wolff2a8d41b2016-08-30 13:41:02 +02001115 demangled = demangle_sym(dso, kmodule, elf_name);
1116 if (demangled != NULL)
1117 elf_name = demangled;
Namhyung Kime71e7942014-07-31 14:47:42 +09001118
Namhyung Kime5a18452012-08-06 13:41:20 +09001119 f = symbol__new(sym.st_value, sym.st_size,
1120 GELF_ST_BIND(sym.st_info), elf_name);
1121 free(demangled);
1122 if (!f)
1123 goto out_elf_end;
1124
Naveen N. Rao0b3c2262016-04-12 14:40:50 +05301125 arch__sym_update(f, &sym);
1126
Namhyung Kime5a18452012-08-06 13:41:20 +09001127 if (filter && filter(curr_map, f))
1128 symbol__delete(f);
1129 else {
1130 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1131 nr++;
1132 }
1133 }
1134
1135 /*
1136 * For misannotated, zeroed, ASM function sizes.
1137 */
1138 if (nr > 0) {
Namhyung Kim680d9262015-03-06 16:31:27 +09001139 if (!symbol_conf.allow_aliases)
1140 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001141 symbols__fixup_end(&dso->symbols[map->type]);
1142 if (kmap) {
1143 /*
1144 * We need to fixup this here too because we create new
1145 * maps here, for things like vsyscall sections.
1146 */
Wang Nanba927322015-04-07 08:22:45 +00001147 __map_groups__fixup_end(kmaps, map->type);
Namhyung Kime5a18452012-08-06 13:41:20 +09001148 }
1149 }
1150 err = nr;
1151out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001152 return err;
1153}
1154
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001155static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1156{
1157 GElf_Phdr phdr;
1158 size_t i, phdrnum;
1159 int err;
1160 u64 sz;
1161
1162 if (elf_getphdrnum(elf, &phdrnum))
1163 return -1;
1164
1165 for (i = 0; i < phdrnum; i++) {
1166 if (gelf_getphdr(elf, i, &phdr) == NULL)
1167 return -1;
1168 if (phdr.p_type != PT_LOAD)
1169 continue;
1170 if (exe) {
1171 if (!(phdr.p_flags & PF_X))
1172 continue;
1173 } else {
1174 if (!(phdr.p_flags & PF_R))
1175 continue;
1176 }
1177 sz = min(phdr.p_memsz, phdr.p_filesz);
1178 if (!sz)
1179 continue;
1180 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1181 if (err)
1182 return err;
1183 }
1184 return 0;
1185}
1186
1187int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1188 bool *is_64_bit)
1189{
1190 int err;
1191 Elf *elf;
1192
1193 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1194 if (elf == NULL)
1195 return -1;
1196
1197 if (is_64_bit)
1198 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1199
1200 err = elf_read_maps(elf, exe, mapfn, data);
1201
1202 elf_end(elf);
1203 return err;
1204}
1205
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001206enum dso_type dso__type_fd(int fd)
1207{
1208 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1209 GElf_Ehdr ehdr;
1210 Elf_Kind ek;
1211 Elf *elf;
1212
1213 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1214 if (elf == NULL)
1215 goto out;
1216
1217 ek = elf_kind(elf);
1218 if (ek != ELF_K_ELF)
1219 goto out_end;
1220
1221 if (gelf_getclass(elf) == ELFCLASS64) {
1222 dso_type = DSO__TYPE_64BIT;
1223 goto out_end;
1224 }
1225
1226 if (gelf_getehdr(elf, &ehdr) == NULL)
1227 goto out_end;
1228
1229 if (ehdr.e_machine == EM_X86_64)
1230 dso_type = DSO__TYPE_X32BIT;
1231 else
1232 dso_type = DSO__TYPE_32BIT;
1233out_end:
1234 elf_end(elf);
1235out:
1236 return dso_type;
1237}
1238
Adrian Hunterafba19d2013-10-09 15:01:12 +03001239static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1240{
1241 ssize_t r;
1242 size_t n;
1243 int err = -1;
1244 char *buf = malloc(page_size);
1245
1246 if (buf == NULL)
1247 return -1;
1248
1249 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1250 goto out;
1251
1252 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1253 goto out;
1254
1255 while (len) {
1256 n = page_size;
1257 if (len < n)
1258 n = len;
1259 /* Use read because mmap won't work on proc files */
1260 r = read(from, buf, n);
1261 if (r < 0)
1262 goto out;
1263 if (!r)
1264 break;
1265 n = r;
1266 r = write(to, buf, n);
1267 if (r < 0)
1268 goto out;
1269 if ((size_t)r != n)
1270 goto out;
1271 len -= n;
1272 }
1273
1274 err = 0;
1275out:
1276 free(buf);
1277 return err;
1278}
1279
1280struct kcore {
1281 int fd;
1282 int elfclass;
1283 Elf *elf;
1284 GElf_Ehdr ehdr;
1285};
1286
1287static int kcore__open(struct kcore *kcore, const char *filename)
1288{
1289 GElf_Ehdr *ehdr;
1290
1291 kcore->fd = open(filename, O_RDONLY);
1292 if (kcore->fd == -1)
1293 return -1;
1294
1295 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1296 if (!kcore->elf)
1297 goto out_close;
1298
1299 kcore->elfclass = gelf_getclass(kcore->elf);
1300 if (kcore->elfclass == ELFCLASSNONE)
1301 goto out_end;
1302
1303 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1304 if (!ehdr)
1305 goto out_end;
1306
1307 return 0;
1308
1309out_end:
1310 elf_end(kcore->elf);
1311out_close:
1312 close(kcore->fd);
1313 return -1;
1314}
1315
1316static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1317 bool temp)
1318{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001319 kcore->elfclass = elfclass;
1320
1321 if (temp)
1322 kcore->fd = mkstemp(filename);
1323 else
1324 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1325 if (kcore->fd == -1)
1326 return -1;
1327
1328 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1329 if (!kcore->elf)
1330 goto out_close;
1331
1332 if (!gelf_newehdr(kcore->elf, elfclass))
1333 goto out_end;
1334
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001335 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001336
1337 return 0;
1338
1339out_end:
1340 elf_end(kcore->elf);
1341out_close:
1342 close(kcore->fd);
1343 unlink(filename);
1344 return -1;
1345}
1346
1347static void kcore__close(struct kcore *kcore)
1348{
1349 elf_end(kcore->elf);
1350 close(kcore->fd);
1351}
1352
1353static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1354{
1355 GElf_Ehdr *ehdr = &to->ehdr;
1356 GElf_Ehdr *kehdr = &from->ehdr;
1357
1358 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1359 ehdr->e_type = kehdr->e_type;
1360 ehdr->e_machine = kehdr->e_machine;
1361 ehdr->e_version = kehdr->e_version;
1362 ehdr->e_entry = 0;
1363 ehdr->e_shoff = 0;
1364 ehdr->e_flags = kehdr->e_flags;
1365 ehdr->e_phnum = count;
1366 ehdr->e_shentsize = 0;
1367 ehdr->e_shnum = 0;
1368 ehdr->e_shstrndx = 0;
1369
1370 if (from->elfclass == ELFCLASS32) {
1371 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1372 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1373 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1374 } else {
1375 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1376 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1377 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1378 }
1379
1380 if (!gelf_update_ehdr(to->elf, ehdr))
1381 return -1;
1382
1383 if (!gelf_newphdr(to->elf, count))
1384 return -1;
1385
1386 return 0;
1387}
1388
1389static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1390 u64 addr, u64 len)
1391{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001392 GElf_Phdr phdr = {
1393 .p_type = PT_LOAD,
1394 .p_flags = PF_R | PF_W | PF_X,
1395 .p_offset = offset,
1396 .p_vaddr = addr,
1397 .p_paddr = 0,
1398 .p_filesz = len,
1399 .p_memsz = len,
1400 .p_align = page_size,
1401 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001402
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001403 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001404 return -1;
1405
1406 return 0;
1407}
1408
1409static off_t kcore__write(struct kcore *kcore)
1410{
1411 return elf_update(kcore->elf, ELF_C_WRITE);
1412}
1413
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001414struct phdr_data {
1415 off_t offset;
1416 u64 addr;
1417 u64 len;
1418};
1419
1420struct kcore_copy_info {
1421 u64 stext;
1422 u64 etext;
1423 u64 first_symbol;
1424 u64 last_symbol;
1425 u64 first_module;
1426 u64 last_module_symbol;
1427 struct phdr_data kernel_map;
1428 struct phdr_data modules_map;
1429};
1430
1431static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1432 u64 start)
1433{
1434 struct kcore_copy_info *kci = arg;
1435
1436 if (!symbol_type__is_a(type, MAP__FUNCTION))
1437 return 0;
1438
1439 if (strchr(name, '[')) {
1440 if (start > kci->last_module_symbol)
1441 kci->last_module_symbol = start;
1442 return 0;
1443 }
1444
1445 if (!kci->first_symbol || start < kci->first_symbol)
1446 kci->first_symbol = start;
1447
1448 if (!kci->last_symbol || start > kci->last_symbol)
1449 kci->last_symbol = start;
1450
1451 if (!strcmp(name, "_stext")) {
1452 kci->stext = start;
1453 return 0;
1454 }
1455
1456 if (!strcmp(name, "_etext")) {
1457 kci->etext = start;
1458 return 0;
1459 }
1460
1461 return 0;
1462}
1463
1464static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1465 const char *dir)
1466{
1467 char kallsyms_filename[PATH_MAX];
1468
1469 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1470
1471 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1472 return -1;
1473
1474 if (kallsyms__parse(kallsyms_filename, kci,
1475 kcore_copy__process_kallsyms) < 0)
1476 return -1;
1477
1478 return 0;
1479}
1480
1481static int kcore_copy__process_modules(void *arg,
1482 const char *name __maybe_unused,
1483 u64 start)
1484{
1485 struct kcore_copy_info *kci = arg;
1486
1487 if (!kci->first_module || start < kci->first_module)
1488 kci->first_module = start;
1489
1490 return 0;
1491}
1492
1493static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1494 const char *dir)
1495{
1496 char modules_filename[PATH_MAX];
1497
1498 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1499
1500 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1501 return -1;
1502
1503 if (modules__parse(modules_filename, kci,
1504 kcore_copy__process_modules) < 0)
1505 return -1;
1506
1507 return 0;
1508}
1509
1510static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1511 u64 s, u64 e)
1512{
1513 if (p->addr || s < start || s >= end)
1514 return;
1515
1516 p->addr = s;
1517 p->offset = (s - start) + pgoff;
1518 p->len = e < end ? e - s : end - s;
1519}
1520
1521static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1522{
1523 struct kcore_copy_info *kci = data;
1524 u64 end = start + len;
1525
1526 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1527 kci->etext);
1528
1529 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1530 kci->last_module_symbol);
1531
1532 return 0;
1533}
1534
1535static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1536{
1537 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1538 return -1;
1539
1540 return 0;
1541}
1542
1543static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1544 Elf *elf)
1545{
1546 if (kcore_copy__parse_kallsyms(kci, dir))
1547 return -1;
1548
1549 if (kcore_copy__parse_modules(kci, dir))
1550 return -1;
1551
1552 if (kci->stext)
1553 kci->stext = round_down(kci->stext, page_size);
1554 else
1555 kci->stext = round_down(kci->first_symbol, page_size);
1556
1557 if (kci->etext) {
1558 kci->etext = round_up(kci->etext, page_size);
1559 } else if (kci->last_symbol) {
1560 kci->etext = round_up(kci->last_symbol, page_size);
1561 kci->etext += page_size;
1562 }
1563
1564 kci->first_module = round_down(kci->first_module, page_size);
1565
1566 if (kci->last_module_symbol) {
1567 kci->last_module_symbol = round_up(kci->last_module_symbol,
1568 page_size);
1569 kci->last_module_symbol += page_size;
1570 }
1571
1572 if (!kci->stext || !kci->etext)
1573 return -1;
1574
1575 if (kci->first_module && !kci->last_module_symbol)
1576 return -1;
1577
1578 return kcore_copy__read_maps(kci, elf);
1579}
1580
1581static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1582 const char *name)
1583{
1584 char from_filename[PATH_MAX];
1585 char to_filename[PATH_MAX];
1586
1587 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1588 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1589
1590 return copyfile_mode(from_filename, to_filename, 0400);
1591}
1592
1593static int kcore_copy__unlink(const char *dir, const char *name)
1594{
1595 char filename[PATH_MAX];
1596
1597 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1598
1599 return unlink(filename);
1600}
1601
1602static int kcore_copy__compare_fds(int from, int to)
1603{
1604 char *buf_from;
1605 char *buf_to;
1606 ssize_t ret;
1607 size_t len;
1608 int err = -1;
1609
1610 buf_from = malloc(page_size);
1611 buf_to = malloc(page_size);
1612 if (!buf_from || !buf_to)
1613 goto out;
1614
1615 while (1) {
1616 /* Use read because mmap won't work on proc files */
1617 ret = read(from, buf_from, page_size);
1618 if (ret < 0)
1619 goto out;
1620
1621 if (!ret)
1622 break;
1623
1624 len = ret;
1625
1626 if (readn(to, buf_to, len) != (int)len)
1627 goto out;
1628
1629 if (memcmp(buf_from, buf_to, len))
1630 goto out;
1631 }
1632
1633 err = 0;
1634out:
1635 free(buf_to);
1636 free(buf_from);
1637 return err;
1638}
1639
1640static int kcore_copy__compare_files(const char *from_filename,
1641 const char *to_filename)
1642{
1643 int from, to, err = -1;
1644
1645 from = open(from_filename, O_RDONLY);
1646 if (from < 0)
1647 return -1;
1648
1649 to = open(to_filename, O_RDONLY);
1650 if (to < 0)
1651 goto out_close_from;
1652
1653 err = kcore_copy__compare_fds(from, to);
1654
1655 close(to);
1656out_close_from:
1657 close(from);
1658 return err;
1659}
1660
1661static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1662 const char *name)
1663{
1664 char from_filename[PATH_MAX];
1665 char to_filename[PATH_MAX];
1666
1667 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1668 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1669
1670 return kcore_copy__compare_files(from_filename, to_filename);
1671}
1672
1673/**
1674 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1675 * @from_dir: from directory
1676 * @to_dir: to directory
1677 *
1678 * This function copies kallsyms, modules and kcore files from one directory to
1679 * another. kallsyms and modules are copied entirely. Only code segments are
1680 * copied from kcore. It is assumed that two segments suffice: one for the
1681 * kernel proper and one for all the modules. The code segments are determined
1682 * from kallsyms and modules files. The kernel map starts at _stext or the
1683 * lowest function symbol, and ends at _etext or the highest function symbol.
1684 * The module map starts at the lowest module address and ends at the highest
1685 * module symbol. Start addresses are rounded down to the nearest page. End
1686 * addresses are rounded up to the nearest page. An extra page is added to the
1687 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1688 * symbol too. Because it contains only code sections, the resulting kcore is
1689 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1690 * is not the same for the kernel map and the modules map. That happens because
1691 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1692 * kallsyms and modules files are compared with their copies to check that
1693 * modules have not been loaded or unloaded while the copies were taking place.
1694 *
1695 * Return: %0 on success, %-1 on failure.
1696 */
1697int kcore_copy(const char *from_dir, const char *to_dir)
1698{
1699 struct kcore kcore;
1700 struct kcore extract;
1701 size_t count = 2;
1702 int idx = 0, err = -1;
1703 off_t offset = page_size, sz, modules_offset = 0;
1704 struct kcore_copy_info kci = { .stext = 0, };
1705 char kcore_filename[PATH_MAX];
1706 char extract_filename[PATH_MAX];
1707
1708 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1709 return -1;
1710
1711 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1712 goto out_unlink_kallsyms;
1713
1714 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1715 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1716
1717 if (kcore__open(&kcore, kcore_filename))
1718 goto out_unlink_modules;
1719
1720 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1721 goto out_kcore_close;
1722
1723 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1724 goto out_kcore_close;
1725
1726 if (!kci.modules_map.addr)
1727 count -= 1;
1728
1729 if (kcore__copy_hdr(&kcore, &extract, count))
1730 goto out_extract_close;
1731
1732 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1733 kci.kernel_map.len))
1734 goto out_extract_close;
1735
1736 if (kci.modules_map.addr) {
1737 modules_offset = offset + kci.kernel_map.len;
1738 if (kcore__add_phdr(&extract, idx, modules_offset,
1739 kci.modules_map.addr, kci.modules_map.len))
1740 goto out_extract_close;
1741 }
1742
1743 sz = kcore__write(&extract);
1744 if (sz < 0 || sz > offset)
1745 goto out_extract_close;
1746
1747 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1748 kci.kernel_map.len))
1749 goto out_extract_close;
1750
1751 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1752 extract.fd, modules_offset,
1753 kci.modules_map.len))
1754 goto out_extract_close;
1755
1756 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1757 goto out_extract_close;
1758
1759 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1760 goto out_extract_close;
1761
1762 err = 0;
1763
1764out_extract_close:
1765 kcore__close(&extract);
1766 if (err)
1767 unlink(extract_filename);
1768out_kcore_close:
1769 kcore__close(&kcore);
1770out_unlink_modules:
1771 if (err)
1772 kcore_copy__unlink(to_dir, "modules");
1773out_unlink_kallsyms:
1774 if (err)
1775 kcore_copy__unlink(to_dir, "kallsyms");
1776
1777 return err;
1778}
1779
Adrian Hunterafba19d2013-10-09 15:01:12 +03001780int kcore_extract__create(struct kcore_extract *kce)
1781{
1782 struct kcore kcore;
1783 struct kcore extract;
1784 size_t count = 1;
1785 int idx = 0, err = -1;
1786 off_t offset = page_size, sz;
1787
1788 if (kcore__open(&kcore, kce->kcore_filename))
1789 return -1;
1790
1791 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1792 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1793 goto out_kcore_close;
1794
1795 if (kcore__copy_hdr(&kcore, &extract, count))
1796 goto out_extract_close;
1797
1798 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1799 goto out_extract_close;
1800
1801 sz = kcore__write(&extract);
1802 if (sz < 0 || sz > offset)
1803 goto out_extract_close;
1804
1805 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1806 goto out_extract_close;
1807
1808 err = 0;
1809
1810out_extract_close:
1811 kcore__close(&extract);
1812 if (err)
1813 unlink(kce->extract_filename);
1814out_kcore_close:
1815 kcore__close(&kcore);
1816
1817 return err;
1818}
1819
1820void kcore_extract__delete(struct kcore_extract *kce)
1821{
1822 unlink(kce->extract_filename);
1823}
1824
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03001825#ifdef HAVE_GELF_GETNOTE_SUPPORT
Hemant Kumar060fa0c2016-07-01 17:03:46 +09001826/**
1827 * populate_sdt_note : Parse raw data and identify SDT note
1828 * @elf: elf of the opened file
1829 * @data: raw data of a section with description offset applied
1830 * @len: note description size
1831 * @type: type of the note
1832 * @sdt_notes: List to add the SDT note
1833 *
1834 * Responsible for parsing the @data in section .note.stapsdt in @elf and
1835 * if its an SDT note, it appends to @sdt_notes list.
1836 */
1837static int populate_sdt_note(Elf **elf, const char *data, size_t len,
1838 struct list_head *sdt_notes)
1839{
1840 const char *provider, *name;
1841 struct sdt_note *tmp = NULL;
1842 GElf_Ehdr ehdr;
1843 GElf_Addr base_off = 0;
1844 GElf_Shdr shdr;
1845 int ret = -EINVAL;
1846
1847 union {
1848 Elf64_Addr a64[NR_ADDR];
1849 Elf32_Addr a32[NR_ADDR];
1850 } buf;
1851
1852 Elf_Data dst = {
1853 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
1854 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
1855 .d_off = 0, .d_align = 0
1856 };
1857 Elf_Data src = {
1858 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
1859 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
1860 .d_align = 0
1861 };
1862
1863 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
1864 if (!tmp) {
1865 ret = -ENOMEM;
1866 goto out_err;
1867 }
1868
1869 INIT_LIST_HEAD(&tmp->note_list);
1870
1871 if (len < dst.d_size + 3)
1872 goto out_free_note;
1873
1874 /* Translation from file representation to memory representation */
1875 if (gelf_xlatetom(*elf, &dst, &src,
1876 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
1877 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
1878 goto out_free_note;
1879 }
1880
1881 /* Populate the fields of sdt_note */
1882 provider = data + dst.d_size;
1883
1884 name = (const char *)memchr(provider, '\0', data + len - provider);
1885 if (name++ == NULL)
1886 goto out_free_note;
1887
1888 tmp->provider = strdup(provider);
1889 if (!tmp->provider) {
1890 ret = -ENOMEM;
1891 goto out_free_note;
1892 }
1893 tmp->name = strdup(name);
1894 if (!tmp->name) {
1895 ret = -ENOMEM;
1896 goto out_free_prov;
1897 }
1898
1899 if (gelf_getclass(*elf) == ELFCLASS32) {
1900 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
1901 tmp->bit32 = true;
1902 } else {
1903 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
1904 tmp->bit32 = false;
1905 }
1906
1907 if (!gelf_getehdr(*elf, &ehdr)) {
1908 pr_debug("%s : cannot get elf header.\n", __func__);
1909 ret = -EBADF;
1910 goto out_free_name;
1911 }
1912
1913 /* Adjust the prelink effect :
1914 * Find out the .stapsdt.base section.
1915 * This scn will help us to handle prelinking (if present).
1916 * Compare the retrieved file offset of the base section with the
1917 * base address in the description of the SDT note. If its different,
1918 * then accordingly, adjust the note location.
1919 */
1920 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) {
1921 base_off = shdr.sh_offset;
1922 if (base_off) {
1923 if (tmp->bit32)
1924 tmp->addr.a32[0] = tmp->addr.a32[0] + base_off -
1925 tmp->addr.a32[1];
1926 else
1927 tmp->addr.a64[0] = tmp->addr.a64[0] + base_off -
1928 tmp->addr.a64[1];
1929 }
1930 }
1931
1932 list_add_tail(&tmp->note_list, sdt_notes);
1933 return 0;
1934
1935out_free_name:
1936 free(tmp->name);
1937out_free_prov:
1938 free(tmp->provider);
1939out_free_note:
1940 free(tmp);
1941out_err:
1942 return ret;
1943}
1944
1945/**
1946 * construct_sdt_notes_list : constructs a list of SDT notes
1947 * @elf : elf to look into
1948 * @sdt_notes : empty list_head
1949 *
1950 * Scans the sections in 'elf' for the section
1951 * .note.stapsdt. It, then calls populate_sdt_note to find
1952 * out the SDT events and populates the 'sdt_notes'.
1953 */
1954static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
1955{
1956 GElf_Ehdr ehdr;
1957 Elf_Scn *scn = NULL;
1958 Elf_Data *data;
1959 GElf_Shdr shdr;
1960 size_t shstrndx, next;
1961 GElf_Nhdr nhdr;
1962 size_t name_off, desc_off, offset;
1963 int ret = 0;
1964
1965 if (gelf_getehdr(elf, &ehdr) == NULL) {
1966 ret = -EBADF;
1967 goto out_ret;
1968 }
1969 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
1970 ret = -EBADF;
1971 goto out_ret;
1972 }
1973
1974 /* Look for the required section */
1975 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
1976 if (!scn) {
1977 ret = -ENOENT;
1978 goto out_ret;
1979 }
1980
1981 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
1982 ret = -ENOENT;
1983 goto out_ret;
1984 }
1985
1986 data = elf_getdata(scn, NULL);
1987
1988 /* Get the SDT notes */
1989 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
1990 &desc_off)) > 0; offset = next) {
1991 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
1992 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
1993 sizeof(SDT_NOTE_NAME))) {
1994 /* Check the type of the note */
1995 if (nhdr.n_type != SDT_NOTE_TYPE)
1996 goto out_ret;
1997
1998 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
1999 nhdr.n_descsz, sdt_notes);
2000 if (ret < 0)
2001 goto out_ret;
2002 }
2003 }
2004 if (list_empty(sdt_notes))
2005 ret = -ENOENT;
2006
2007out_ret:
2008 return ret;
2009}
2010
2011/**
2012 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2013 * @head : empty list_head
2014 * @target : file to find SDT notes from
2015 *
2016 * This opens the file, initializes
2017 * the ELF and then calls construct_sdt_notes_list.
2018 */
2019int get_sdt_note_list(struct list_head *head, const char *target)
2020{
2021 Elf *elf;
2022 int fd, ret;
2023
2024 fd = open(target, O_RDONLY);
2025 if (fd < 0)
2026 return -EBADF;
2027
2028 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2029 if (!elf) {
2030 ret = -EBADF;
2031 goto out_close;
2032 }
2033 ret = construct_sdt_notes_list(elf, head);
2034 elf_end(elf);
2035out_close:
2036 close(fd);
2037 return ret;
2038}
2039
2040/**
2041 * cleanup_sdt_note_list : free the sdt notes' list
2042 * @sdt_notes: sdt notes' list
2043 *
2044 * Free up the SDT notes in @sdt_notes.
2045 * Returns the number of SDT notes free'd.
2046 */
2047int cleanup_sdt_note_list(struct list_head *sdt_notes)
2048{
2049 struct sdt_note *tmp, *pos;
2050 int nr_free = 0;
2051
2052 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2053 list_del(&pos->note_list);
2054 free(pos->name);
2055 free(pos->provider);
2056 free(pos);
2057 nr_free++;
2058 }
2059 return nr_free;
2060}
2061
2062/**
2063 * sdt_notes__get_count: Counts the number of sdt events
2064 * @start: list_head to sdt_notes list
2065 *
2066 * Returns the number of SDT notes in a list
2067 */
2068int sdt_notes__get_count(struct list_head *start)
2069{
2070 struct sdt_note *sdt_ptr;
2071 int count = 0;
2072
2073 list_for_each_entry(sdt_ptr, start, note_list)
2074 count++;
2075 return count;
2076}
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03002077#endif
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002078
Namhyung Kime5a18452012-08-06 13:41:20 +09002079void symbol__elf_init(void)
2080{
2081 elf_version(EV_CURRENT);
2082}