blob: 97a55f162ea5330eca6bccc29d6c580fd5409015 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Namhyung Kime5a18452012-08-06 13:41:20 +09002#include <fcntl.h>
3#include <stdio.h>
4#include <errno.h>
Arnaldo Carvalho de Melo215a0d32019-07-04 11:21:24 -03005#include <stdlib.h>
Namhyung Kime5a18452012-08-06 13:41:20 +09006#include <string.h>
7#include <unistd.h>
8#include <inttypes.h>
9
Arnaldo Carvalho de Melo09aa3b02019-09-10 16:17:19 +010010#include "dso.h"
Arnaldo Carvalho de Melo1101f692019-01-27 13:42:37 +010011#include "map.h"
Arnaldo Carvalho de Meloc54d2412019-11-25 22:24:10 -030012#include "maps.h"
Namhyung Kime5a18452012-08-06 13:41:20 +090013#include "symbol.h"
Arnaldo Carvalho de Melob1d1b092019-08-30 10:26:37 -030014#include "symsrc.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +010015#include "demangle-java.h"
David Tolnaycae15db2016-07-09 00:20:00 -070016#include "demangle-rust.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040017#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070018#include "vdso.h"
Namhyung Kime5a18452012-08-06 13:41:20 +090019#include "debug.h"
Arnaldo Carvalho de Melo32ff3fe2019-09-24 15:14:12 -030020#include "util/copyfile.h"
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030021#include <linux/ctype.h>
Arnaldo Carvalho de Melofb71c86c2019-09-03 10:56:06 -030022#include <linux/kernel.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030023#include <linux/zalloc.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030024#include <symbol/kallsyms.h>
Arnaldo Carvalho de Melofb71c86c2019-09-03 10:56:06 -030025#include <internal/lib.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090026
David Aherne370a3d2015-02-18 19:33:37 -050027#ifndef EM_AARCH64
28#define EM_AARCH64 183 /* ARM 64 bit */
29#endif
30
Arnaldo Carvalho de Melo843cf702019-02-04 15:48:03 -030031#ifndef ELF32_ST_VISIBILITY
32#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
33#endif
34
35/* For ELF64 the definitions are the same. */
36#ifndef ELF64_ST_VISIBILITY
37#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
38#endif
39
40/* How to extract information held in the st_other field. */
41#ifndef GELF_ST_VISIBILITY
42#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
43#endif
44
Arnaldo Carvalho de Melocc310782016-07-12 11:04:13 -030045typedef Elf64_Nhdr GElf_Nhdr;
David Aherne370a3d2015-02-18 19:33:37 -050046
Arnaldo Carvalho de Melo9bea81b2019-08-30 10:01:50 -030047#ifndef DMGL_PARAMS
48#define DMGL_NO_OPTS 0 /* For readability... */
49#define DMGL_PARAMS (1 << 0) /* Include function args */
50#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
51#endif
52
Remi Bernonba0509d2020-08-21 18:52:36 +020053#ifdef HAVE_LIBBFD_SUPPORT
54#define PACKAGE 'perf'
55#include <bfd.h>
56#else
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030057#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
58extern char *cplus_demangle(const char *, int);
59
60static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
61{
62 return cplus_demangle(c, i);
63}
64#else
65#ifdef NO_DEMANGLE
66static inline char *bfd_demangle(void __maybe_unused *v,
67 const char __maybe_unused *c,
68 int __maybe_unused i)
69{
70 return NULL;
71}
Remi Bernonba0509d2020-08-21 18:52:36 +020072#endif
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030073#endif
74#endif
75
Ingo Molnar89fe8082013-09-30 12:07:11 +020076#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030077static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030078{
79 GElf_Ehdr gehdr;
80 GElf_Ehdr *ehdr;
81
82 ehdr = gelf_getehdr(elf, &gehdr);
83 if (!ehdr)
84 return -1;
85
86 *dst = ehdr->e_phnum;
87
88 return 0;
89}
90#endif
91
Arnaldo Carvalho de Melo2492c462016-07-04 19:35:47 -030092#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
93static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
94{
95 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
96 return -1;
97}
98#endif
99
Namhyung Kime5a18452012-08-06 13:41:20 +0900100#ifndef NT_GNU_BUILD_ID
101#define NT_GNU_BUILD_ID 3
102#endif
103
104/**
105 * elf_symtab__for_each_symbol - iterate thru all the symbols
106 *
107 * @syms: struct elf_symtab instance to iterate
108 * @idx: uint32_t idx
109 * @sym: GElf_Sym iterator
110 */
111#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
112 for (idx = 0, gelf_getsym(syms, idx, &sym);\
113 idx < nr_syms; \
114 idx++, gelf_getsym(syms, idx, &sym))
115
116static inline uint8_t elf_sym__type(const GElf_Sym *sym)
117{
118 return GELF_ST_TYPE(sym->st_info);
119}
120
Jiri Olsa59a17702019-01-28 14:35:26 +0100121static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
122{
123 return GELF_ST_VISIBILITY(sym->st_other);
124}
125
Vinson Lee4e310502015-02-09 16:29:37 -0800126#ifndef STT_GNU_IFUNC
127#define STT_GNU_IFUNC 10
128#endif
129
Namhyung Kime5a18452012-08-06 13:41:20 +0900130static inline int elf_sym__is_function(const GElf_Sym *sym)
131{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +0300132 return (elf_sym__type(sym) == STT_FUNC ||
133 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +0900134 sym->st_name != 0 &&
135 sym->st_shndx != SHN_UNDEF;
136}
137
138static inline bool elf_sym__is_object(const GElf_Sym *sym)
139{
140 return elf_sym__type(sym) == STT_OBJECT &&
141 sym->st_name != 0 &&
142 sym->st_shndx != SHN_UNDEF;
143}
144
145static inline int elf_sym__is_label(const GElf_Sym *sym)
146{
147 return elf_sym__type(sym) == STT_NOTYPE &&
148 sym->st_name != 0 &&
149 sym->st_shndx != SHN_UNDEF &&
Jiri Olsa59a17702019-01-28 14:35:26 +0100150 sym->st_shndx != SHN_ABS &&
151 elf_sym__visibility(sym) != STV_HIDDEN &&
152 elf_sym__visibility(sym) != STV_INTERNAL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900153}
154
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300155static bool elf_sym__filter(GElf_Sym *sym)
Namhyung Kime5a18452012-08-06 13:41:20 +0900156{
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300157 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
Namhyung Kime5a18452012-08-06 13:41:20 +0900158}
159
160static inline const char *elf_sym__name(const GElf_Sym *sym,
161 const Elf_Data *symstrs)
162{
163 return symstrs->d_buf + sym->st_name;
164}
165
166static inline const char *elf_sec__name(const GElf_Shdr *shdr,
167 const Elf_Data *secstrs)
168{
169 return secstrs->d_buf + shdr->sh_name;
170}
171
172static inline int elf_sec__is_text(const GElf_Shdr *shdr,
173 const Elf_Data *secstrs)
174{
175 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
176}
177
178static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
179 const Elf_Data *secstrs)
180{
181 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
182}
183
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300184static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
Namhyung Kime5a18452012-08-06 13:41:20 +0900185{
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300186 return elf_sec__is_text(shdr, secstrs) ||
187 elf_sec__is_data(shdr, secstrs);
Namhyung Kime5a18452012-08-06 13:41:20 +0900188}
189
190static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
191{
192 Elf_Scn *sec = NULL;
193 GElf_Shdr shdr;
194 size_t cnt = 1;
195
196 while ((sec = elf_nextscn(elf, sec)) != NULL) {
197 gelf_getshdr(sec, &shdr);
198
199 if ((addr >= shdr.sh_addr) &&
200 (addr < (shdr.sh_addr + shdr.sh_size)))
201 return cnt;
202
203 ++cnt;
204 }
205
206 return -1;
207}
208
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000209Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
210 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900211{
212 Elf_Scn *sec = NULL;
213 size_t cnt = 1;
214
Cody P Schafer49274652012-08-10 15:22:55 -0700215 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
216 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
217 return NULL;
218
Namhyung Kime5a18452012-08-06 13:41:20 +0900219 while ((sec = elf_nextscn(elf, sec)) != NULL) {
220 char *str;
221
222 gelf_getshdr(sec, shp);
223 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100224 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900225 if (idx)
226 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100227 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900228 }
229 ++cnt;
230 }
231
Jiri Olsa155b3a12014-03-02 14:32:07 +0100232 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900233}
234
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200235static bool want_demangle(bool is_kernel_sym)
236{
237 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
238}
239
240static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
241{
Namhyung Kimbb963e12017-02-17 17:17:38 +0900242 int demangle_flags = verbose > 0 ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS;
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200243 char *demangled = NULL;
244
245 /*
246 * We need to figure out if the object was created from C++ sources
247 * DWARF DW_compile_unit has this, but we don't always have access
248 * to it...
249 */
250 if (!want_demangle(dso->kernel || kmodule))
251 return demangled;
252
253 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
254 if (demangled == NULL)
255 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
256 else if (rust_is_mangled(demangled))
257 /*
258 * Input to Rust demangling is the BFD-demangled
259 * name which it Rust-demangles in place.
260 */
261 rust_demangle_sym(demangled);
262
263 return demangled;
264}
265
Namhyung Kime5a18452012-08-06 13:41:20 +0900266#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
267 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
268 idx < nr_entries; \
269 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
270
271#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
272 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
273 idx < nr_entries; \
274 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
275
276/*
277 * We need to check if we have a .dynsym, so that we can handle the
278 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
279 * .dynsym or .symtab).
280 * And always look at the original dso, not at debuginfo packages, that
281 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
282 */
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300283int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900284{
285 uint32_t nr_rel_entries, idx;
286 GElf_Sym sym;
Li Binb2f76052017-06-05 08:34:09 +0800287 u64 plt_offset, plt_header_size, plt_entry_size;
Namhyung Kime5a18452012-08-06 13:41:20 +0900288 GElf_Shdr shdr_plt;
289 struct symbol *f;
290 GElf_Shdr shdr_rel_plt, shdr_dynsym;
291 Elf_Data *reldata, *syms, *symstrs;
292 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
293 size_t dynsym_idx;
294 GElf_Ehdr ehdr;
295 char sympltname[1024];
296 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700297 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900298
David Ahernf47b58b2012-08-19 09:47:14 -0600299 if (!ss->dynsym)
300 return 0;
301
Cody P Schafera44f6052012-08-10 15:22:59 -0700302 elf = ss->elf;
303 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900304
Cody P Schafera44f6052012-08-10 15:22:59 -0700305 scn_dynsym = ss->dynsym;
306 shdr_dynsym = ss->dynshdr;
307 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900308
Namhyung Kime5a18452012-08-06 13:41:20 +0900309 if (scn_dynsym == NULL)
310 goto out_elf_end;
311
312 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
313 ".rela.plt", NULL);
314 if (scn_plt_rel == NULL) {
315 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
316 ".rel.plt", NULL);
317 if (scn_plt_rel == NULL)
318 goto out_elf_end;
319 }
320
321 err = -1;
322
323 if (shdr_rel_plt.sh_link != dynsym_idx)
324 goto out_elf_end;
325
326 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
327 goto out_elf_end;
328
329 /*
330 * Fetch the relocation section to find the idxes to the GOT
331 * and the symbols in the .dynsym they refer to.
332 */
333 reldata = elf_getdata(scn_plt_rel, NULL);
334 if (reldata == NULL)
335 goto out_elf_end;
336
337 syms = elf_getdata(scn_dynsym, NULL);
338 if (syms == NULL)
339 goto out_elf_end;
340
341 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
342 if (scn_symstrs == NULL)
343 goto out_elf_end;
344
345 symstrs = elf_getdata(scn_symstrs, NULL);
346 if (symstrs == NULL)
347 goto out_elf_end;
348
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700349 if (symstrs->d_size == 0)
350 goto out_elf_end;
351
Namhyung Kime5a18452012-08-06 13:41:20 +0900352 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
353 plt_offset = shdr_plt.sh_offset;
Li Binb2f76052017-06-05 08:34:09 +0800354 switch (ehdr.e_machine) {
355 case EM_ARM:
356 plt_header_size = 20;
357 plt_entry_size = 12;
358 break;
359
360 case EM_AARCH64:
361 plt_header_size = 32;
362 plt_entry_size = 16;
363 break;
364
David Millerd6afa562018-10-17 12:08:59 -0700365 case EM_SPARC:
366 plt_header_size = 48;
367 plt_entry_size = 12;
368 break;
369
370 case EM_SPARCV9:
371 plt_header_size = 128;
372 plt_entry_size = 32;
373 break;
374
375 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
Li Binb2f76052017-06-05 08:34:09 +0800376 plt_header_size = shdr_plt.sh_entsize;
377 plt_entry_size = shdr_plt.sh_entsize;
378 break;
379 }
380 plt_offset += plt_header_size;
Namhyung Kime5a18452012-08-06 13:41:20 +0900381
382 if (shdr_rel_plt.sh_type == SHT_RELA) {
383 GElf_Rela pos_mem, *pos;
384
385 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
386 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200387 const char *elf_name = NULL;
388 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900389 symidx = GELF_R_SYM(pos->r_info);
Namhyung Kime5a18452012-08-06 13:41:20 +0900390 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200391
392 elf_name = elf_sym__name(&sym, symstrs);
393 demangled = demangle_sym(dso, 0, elf_name);
394 if (demangled != NULL)
395 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900396 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200397 "%s@plt", elf_name);
398 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900399
Li Binb2f76052017-06-05 08:34:09 +0800400 f = symbol__new(plt_offset, plt_entry_size,
Arnaldo Carvalho de Meloaf30bff2018-04-26 11:09:10 -0300401 STB_GLOBAL, STT_FUNC, sympltname);
Namhyung Kime5a18452012-08-06 13:41:20 +0900402 if (!f)
403 goto out_elf_end;
404
Li Binb2f76052017-06-05 08:34:09 +0800405 plt_offset += plt_entry_size;
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300406 symbols__insert(&dso->symbols, f);
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300407 ++nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900408 }
409 } else if (shdr_rel_plt.sh_type == SHT_REL) {
410 GElf_Rel pos_mem, *pos;
411 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
412 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200413 const char *elf_name = NULL;
414 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900415 symidx = GELF_R_SYM(pos->r_info);
Namhyung Kime5a18452012-08-06 13:41:20 +0900416 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200417
418 elf_name = elf_sym__name(&sym, symstrs);
419 demangled = demangle_sym(dso, 0, elf_name);
420 if (demangled != NULL)
421 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900422 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200423 "%s@plt", elf_name);
424 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900425
Li Binb2f76052017-06-05 08:34:09 +0800426 f = symbol__new(plt_offset, plt_entry_size,
Arnaldo Carvalho de Meloaf30bff2018-04-26 11:09:10 -0300427 STB_GLOBAL, STT_FUNC, sympltname);
Namhyung Kime5a18452012-08-06 13:41:20 +0900428 if (!f)
429 goto out_elf_end;
430
Li Binb2f76052017-06-05 08:34:09 +0800431 plt_offset += plt_entry_size;
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -0300432 symbols__insert(&dso->symbols, f);
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300433 ++nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900434 }
435 }
436
437 err = 0;
438out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900439 if (err == 0)
440 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900441 pr_debug("%s: problems reading %s PLT info.\n",
442 __func__, dso->long_name);
443 return 0;
444}
445
Milian Wolff80c345b2017-08-06 23:24:34 +0200446char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
Jin Yaoa64489c2017-03-26 04:34:26 +0800447{
448 return demangle_sym(dso, kmodule, elf_name);
449}
450
Namhyung Kime5a18452012-08-06 13:41:20 +0900451/*
452 * Align offset to 4 bytes as needed for note name and descriptor data.
453 */
454#define NOTE_ALIGN(n) (((n) + 3) & -4U)
455
456static int elf_read_build_id(Elf *elf, void *bf, size_t size)
457{
458 int err = -1;
459 GElf_Ehdr ehdr;
460 GElf_Shdr shdr;
461 Elf_Data *data;
462 Elf_Scn *sec;
463 Elf_Kind ek;
464 void *ptr;
465
466 if (size < BUILD_ID_SIZE)
467 goto out;
468
469 ek = elf_kind(elf);
470 if (ek != ELF_K_ELF)
471 goto out;
472
473 if (gelf_getehdr(elf, &ehdr) == NULL) {
474 pr_err("%s: cannot get elf header.\n", __func__);
475 goto out;
476 }
477
478 /*
479 * Check following sections for notes:
480 * '.note.gnu.build-id'
481 * '.notes'
482 * '.note' (VDSO specific)
483 */
484 do {
485 sec = elf_section_by_name(elf, &ehdr, &shdr,
486 ".note.gnu.build-id", NULL);
487 if (sec)
488 break;
489
490 sec = elf_section_by_name(elf, &ehdr, &shdr,
491 ".notes", NULL);
492 if (sec)
493 break;
494
495 sec = elf_section_by_name(elf, &ehdr, &shdr,
496 ".note", NULL);
497 if (sec)
498 break;
499
500 return err;
501
502 } while (0);
503
504 data = elf_getdata(sec, NULL);
505 if (data == NULL)
506 goto out;
507
508 ptr = data->d_buf;
509 while (ptr < (data->d_buf + data->d_size)) {
510 GElf_Nhdr *nhdr = ptr;
511 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
512 descsz = NOTE_ALIGN(nhdr->n_descsz);
513 const char *name;
514
515 ptr += sizeof(*nhdr);
516 name = ptr;
517 ptr += namesz;
518 if (nhdr->n_type == NT_GNU_BUILD_ID &&
519 nhdr->n_namesz == sizeof("GNU")) {
520 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
521 size_t sz = min(size, descsz);
522 memcpy(bf, ptr, sz);
523 memset(bf + sz, 0, size - sz);
524 err = descsz;
525 break;
526 }
527 }
528 ptr += descsz;
529 }
530
531out:
532 return err;
533}
534
Remi Bernonba0509d2020-08-21 18:52:36 +0200535#ifdef HAVE_LIBBFD_BUILDID_SUPPORT
536
Jiri Olsaf7668192020-10-13 21:24:34 +0200537int filename__read_build_id(const char *filename, struct build_id *bid)
Remi Bernonba0509d2020-08-21 18:52:36 +0200538{
Jiri Olsaf7668192020-10-13 21:24:34 +0200539 size_t size = sizeof(bid->data);
Remi Bernonba0509d2020-08-21 18:52:36 +0200540 int err = -1;
541 bfd *abfd;
542
543 abfd = bfd_openr(filename, NULL);
544 if (!abfd)
545 return -1;
546
547 if (!bfd_check_format(abfd, bfd_object)) {
548 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
549 goto out_close;
550 }
551
552 if (!abfd->build_id || abfd->build_id->size > size)
553 goto out_close;
554
Jiri Olsaf7668192020-10-13 21:24:34 +0200555 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
556 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
557 err = bid->size = abfd->build_id->size;
Remi Bernonba0509d2020-08-21 18:52:36 +0200558
559out_close:
560 bfd_close(abfd);
561 return err;
562}
563
564#else // HAVE_LIBBFD_BUILDID_SUPPORT
565
Jiri Olsaf7668192020-10-13 21:24:34 +0200566int filename__read_build_id(const char *filename, struct build_id *bid)
Namhyung Kime5a18452012-08-06 13:41:20 +0900567{
Jiri Olsaf7668192020-10-13 21:24:34 +0200568 size_t size = sizeof(bid->data);
Namhyung Kime5a18452012-08-06 13:41:20 +0900569 int fd, err = -1;
570 Elf *elf;
571
572 if (size < BUILD_ID_SIZE)
573 goto out;
574
575 fd = open(filename, O_RDONLY);
576 if (fd < 0)
577 goto out;
578
579 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
580 if (elf == NULL) {
581 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
582 goto out_close;
583 }
584
Jiri Olsaf7668192020-10-13 21:24:34 +0200585 err = elf_read_build_id(elf, bid->data, size);
586 if (err > 0)
587 bid->size = err;
Namhyung Kime5a18452012-08-06 13:41:20 +0900588
589 elf_end(elf);
590out_close:
591 close(fd);
592out:
593 return err;
594}
595
Remi Bernonba0509d2020-08-21 18:52:36 +0200596#endif // HAVE_LIBBFD_BUILDID_SUPPORT
597
Jiri Olsa3ff1b8c2020-10-13 21:24:35 +0200598int sysfs__read_build_id(const char *filename, struct build_id *bid)
Namhyung Kime5a18452012-08-06 13:41:20 +0900599{
Jiri Olsa3ff1b8c2020-10-13 21:24:35 +0200600 size_t size = sizeof(bid->data);
Namhyung Kime5a18452012-08-06 13:41:20 +0900601 int fd, err = -1;
602
Namhyung Kime5a18452012-08-06 13:41:20 +0900603 fd = open(filename, O_RDONLY);
604 if (fd < 0)
605 goto out;
606
607 while (1) {
608 char bf[BUFSIZ];
609 GElf_Nhdr nhdr;
610 size_t namesz, descsz;
611
612 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
613 break;
614
615 namesz = NOTE_ALIGN(nhdr.n_namesz);
616 descsz = NOTE_ALIGN(nhdr.n_descsz);
617 if (nhdr.n_type == NT_GNU_BUILD_ID &&
618 nhdr.n_namesz == sizeof("GNU")) {
619 if (read(fd, bf, namesz) != (ssize_t)namesz)
620 break;
621 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
622 size_t sz = min(descsz, size);
Jiri Olsa3ff1b8c2020-10-13 21:24:35 +0200623 if (read(fd, bid->data, sz) == (ssize_t)sz) {
624 memset(bid->data + sz, 0, size - sz);
625 bid->size = sz;
Namhyung Kime5a18452012-08-06 13:41:20 +0900626 err = 0;
627 break;
628 }
629 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
630 break;
631 } else {
632 int n = namesz + descsz;
Arnaldo Carvalho de Melo7934c982017-01-03 15:19:21 -0300633
634 if (n > (int)sizeof(bf)) {
635 n = sizeof(bf);
636 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
637 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
638 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900639 if (read(fd, bf, n) != n)
640 break;
641 }
642 }
643 close(fd);
644out:
645 return err;
646}
647
Remi Bernonba0509d2020-08-21 18:52:36 +0200648#ifdef HAVE_LIBBFD_SUPPORT
649
650int filename__read_debuglink(const char *filename, char *debuglink,
651 size_t size)
652{
653 int err = -1;
654 asection *section;
655 bfd *abfd;
656
657 abfd = bfd_openr(filename, NULL);
658 if (!abfd)
659 return -1;
660
661 if (!bfd_check_format(abfd, bfd_object)) {
662 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
663 goto out_close;
664 }
665
666 section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
667 if (!section)
668 goto out_close;
669
670 if (section->size > size)
671 goto out_close;
672
673 if (!bfd_get_section_contents(abfd, section, debuglink, 0,
674 section->size))
675 goto out_close;
676
677 err = 0;
678
679out_close:
680 bfd_close(abfd);
681 return err;
682}
683
684#else
685
Namhyung Kime5a18452012-08-06 13:41:20 +0900686int filename__read_debuglink(const char *filename, char *debuglink,
687 size_t size)
688{
689 int fd, err = -1;
690 Elf *elf;
691 GElf_Ehdr ehdr;
692 GElf_Shdr shdr;
693 Elf_Data *data;
694 Elf_Scn *sec;
695 Elf_Kind ek;
696
697 fd = open(filename, O_RDONLY);
698 if (fd < 0)
699 goto out;
700
701 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
702 if (elf == NULL) {
703 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
704 goto out_close;
705 }
706
707 ek = elf_kind(elf);
708 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800709 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900710
711 if (gelf_getehdr(elf, &ehdr) == NULL) {
712 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800713 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900714 }
715
716 sec = elf_section_by_name(elf, &ehdr, &shdr,
717 ".gnu_debuglink", NULL);
718 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800719 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900720
721 data = elf_getdata(sec, NULL);
722 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800723 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900724
725 /* the start of this section is a zero-terminated string */
726 strncpy(debuglink, data->d_buf, size);
727
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900728 err = 0;
729
Chenggang Qin784f3392013-10-11 08:27:57 +0800730out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900731 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900732out_close:
733 close(fd);
734out:
735 return err;
736}
737
Remi Bernonba0509d2020-08-21 18:52:36 +0200738#endif
739
Namhyung Kime5a18452012-08-06 13:41:20 +0900740static int dso__swap_init(struct dso *dso, unsigned char eidata)
741{
742 static unsigned int const endian = 1;
743
744 dso->needs_swap = DSO_SWAP__NO;
745
746 switch (eidata) {
747 case ELFDATA2LSB:
748 /* We are big endian, DSO is little endian. */
749 if (*(unsigned char const *)&endian != 1)
750 dso->needs_swap = DSO_SWAP__YES;
751 break;
752
753 case ELFDATA2MSB:
754 /* We are little endian, DSO is big endian. */
755 if (*(unsigned char const *)&endian != 0)
756 dso->needs_swap = DSO_SWAP__YES;
757 break;
758
759 default:
760 pr_err("unrecognized DSO data encoding %d\n", eidata);
761 return -EINVAL;
762 }
763
764 return 0;
765}
766
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700767bool symsrc__possibly_runtime(struct symsrc *ss)
768{
769 return ss->dynsym || ss->opdsec;
770}
771
Cody P Schaferd26cd122012-08-10 15:23:00 -0700772bool symsrc__has_symtab(struct symsrc *ss)
773{
774 return ss->symtab != NULL;
775}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700776
777void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900778{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300779 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700780 elf_end(ss->elf);
781 close(ss->fd);
782}
783
Leo Yan7eec00a2020-03-06 09:57:58 +0800784bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
Naveen N. Raod2332092015-04-28 17:35:35 +0530785{
Leo Yan7eec00a2020-03-06 09:57:58 +0800786 /*
787 * Usually vmlinux is an ELF file with type ET_EXEC for most
788 * architectures; except Arm64 kernel is linked with option
789 * '-share', so need to check type ET_DYN.
790 */
791 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
792 ehdr.e_type == ET_DYN;
Naveen N. Raod2332092015-04-28 17:35:35 +0530793}
794
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700795int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
796 enum dso_binary_type type)
797{
Namhyung Kime5a18452012-08-06 13:41:20 +0900798 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900799 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700800 int fd;
801
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300802 if (dso__needs_decompress(dso)) {
Namhyung Kim42b3fa62017-06-08 16:31:03 +0900803 fd = dso__decompress_kmodule_fd(dso, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300804 if (fd < 0)
805 return -1;
Namhyung Kimc25ec422017-06-08 16:31:08 +0900806
807 type = dso->symtab_type;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300808 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900809 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300810 if (fd < 0) {
811 dso->load_errno = errno;
812 return -1;
813 }
814 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900815
816 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
817 if (elf == NULL) {
818 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300819 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900820 goto out_close;
821 }
822
823 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300824 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900825 pr_debug("%s: cannot get elf header.\n", __func__);
826 goto out_elf_end;
827 }
828
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300829 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
830 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900831 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300832 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900833
834 /* Always reject images with a mismatched build-id: */
Masami Hiramatsu428aff82016-08-26 01:24:42 +0900835 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900836 u8 build_id[BUILD_ID_SIZE];
837
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300838 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
839 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900840 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300841 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900842
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300843 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530844 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300845 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900846 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300847 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900848 }
849
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300850 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
851
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700852 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
853 NULL);
854 if (ss->symshdr.sh_type != SHT_SYMTAB)
855 ss->symtab = NULL;
856
857 ss->dynsym_idx = 0;
858 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
859 &ss->dynsym_idx);
860 if (ss->dynshdr.sh_type != SHT_DYNSYM)
861 ss->dynsym = NULL;
862
863 ss->opdidx = 0;
864 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
865 &ss->opdidx);
866 if (ss->opdshdr.sh_type != SHT_PROGBITS)
867 ss->opdsec = NULL;
868
Jiri Olsa1c695c82020-08-08 14:21:54 +0200869 if (dso->kernel == DSO_SPACE__USER)
Wang Nan99e87f72016-04-07 10:24:31 +0000870 ss->adjust_symbols = true;
871 else
Naveen N. Raod2332092015-04-28 17:35:35 +0530872 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700873
874 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300875 if (!ss->name) {
876 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700877 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300878 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700879
880 ss->elf = elf;
881 ss->fd = fd;
882 ss->ehdr = ehdr;
883 ss->type = type;
884
885 return 0;
886
887out_elf_end:
888 elf_end(elf);
889out_close:
890 close(fd);
Leo Yane5f177a2019-05-30 17:38:01 +0800891 return -1;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700892}
893
Adrian Hunter39b12f782013-08-07 14:38:47 +0300894/**
895 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
896 * @kmap: kernel maps and relocation reference symbol
897 *
898 * This function returns %true if we are dealing with the kernel maps and the
899 * relocation reference symbol has not yet been found. Otherwise %false is
900 * returned.
901 */
902static bool ref_reloc_sym_not_found(struct kmap *kmap)
903{
904 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
905 !kmap->ref_reloc_sym->unrelocated_addr;
906}
907
908/**
909 * ref_reloc - kernel relocation offset.
910 * @kmap: kernel maps and relocation reference symbol
911 *
912 * This function returns the offset of kernel addresses as determined by using
913 * the relocation reference symbol i.e. if the kernel has not been relocated
914 * then the return value is zero.
915 */
916static u64 ref_reloc(struct kmap *kmap)
917{
918 if (kmap && kmap->ref_reloc_sym &&
919 kmap->ref_reloc_sym->unrelocated_addr)
920 return kmap->ref_reloc_sym->addr -
921 kmap->ref_reloc_sym->unrelocated_addr;
922 return 0;
923}
924
Naveen N. Rao0b3c2262016-04-12 14:40:50 +0530925void __weak arch__sym_update(struct symbol *s __maybe_unused,
926 GElf_Sym *sym __maybe_unused) { }
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530927
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -0300928static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
929 GElf_Sym *sym, GElf_Shdr *shdr,
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -0300930 struct maps *kmaps, struct kmap *kmap,
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -0300931 struct dso **curr_dsop, struct map **curr_mapp,
932 const char *section_name,
933 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
934{
935 struct dso *curr_dso = *curr_dsop;
936 struct map *curr_map;
937 char dso_name[PATH_MAX];
938
939 /* Adjust symbol to map to file offset */
940 if (adjust_kernel_syms)
941 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
942
943 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
944 return 0;
945
946 if (strcmp(section_name, ".text") == 0) {
947 /*
948 * The initial kernel mapping is based on
949 * kallsyms and identity maps. Overwrite it to
950 * map to the kernel dso.
951 */
Jiri Olsab2fe96a2020-08-08 14:26:56 +0200952 if (*remap_kernel && dso->kernel && !kmodule) {
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -0300953 *remap_kernel = false;
954 map->start = shdr->sh_addr + ref_reloc(kmap);
955 map->end = map->start + shdr->sh_size;
956 map->pgoff = shdr->sh_offset;
957 map->map_ip = map__map_ip;
958 map->unmap_ip = map__unmap_ip;
959 /* Ensure maps are correctly ordered */
960 if (kmaps) {
961 map__get(map);
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -0300962 maps__remove(kmaps, map);
963 maps__insert(kmaps, map);
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -0300964 map__put(map);
965 }
966 }
967
968 /*
969 * The initial module mapping is based on
970 * /proc/modules mapped to offset zero.
971 * Overwrite it to map to the module dso.
972 */
973 if (*remap_kernel && kmodule) {
974 *remap_kernel = false;
975 map->pgoff = shdr->sh_offset;
976 }
977
978 *curr_mapp = map;
979 *curr_dsop = dso;
980 return 0;
981 }
982
983 if (!kmap)
984 return 0;
985
986 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
987
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -0300988 curr_map = maps__find_by_name(kmaps, dso_name);
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -0300989 if (curr_map == NULL) {
990 u64 start = sym->st_value;
991
992 if (kmodule)
993 start += map->start + shdr->sh_offset;
994
995 curr_dso = dso__new(dso_name);
996 if (curr_dso == NULL)
997 return -1;
998 curr_dso->kernel = dso->kernel;
999 curr_dso->long_name = dso->long_name;
1000 curr_dso->long_name_len = dso->long_name_len;
1001 curr_map = map__new2(start, curr_dso);
1002 dso__put(curr_dso);
1003 if (curr_map == NULL)
1004 return -1;
1005
Arnaldo Carvalho de Meloa75af862019-12-18 15:23:14 -03001006 if (curr_dso->kernel)
1007 map__kmap(curr_map)->kmaps = kmaps;
1008
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001009 if (adjust_kernel_syms) {
1010 curr_map->start = shdr->sh_addr + ref_reloc(kmap);
1011 curr_map->end = curr_map->start + shdr->sh_size;
1012 curr_map->pgoff = shdr->sh_offset;
1013 } else {
1014 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
1015 }
1016 curr_dso->symtab_type = dso->symtab_type;
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -03001017 maps__insert(kmaps, curr_map);
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001018 /*
1019 * Add it before we drop the referece to curr_map, i.e. while
1020 * we still are sure to have a reference to this DSO via
1021 * *curr_map->dso.
1022 */
Arnaldo Carvalho de Melof2baa0602019-11-04 16:09:48 -03001023 dsos__add(&kmaps->machine->dsos, curr_dso);
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001024 /* kmaps already got it */
1025 map__put(curr_map);
1026 dso__set_loaded(curr_dso);
1027 *curr_mapp = curr_map;
1028 *curr_dsop = curr_dso;
1029 } else
1030 *curr_dsop = curr_map->dso;
1031
1032 return 0;
1033}
1034
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -03001035int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1036 struct symsrc *runtime_ss, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -07001037{
1038 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -03001039 struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -07001040 struct map *curr_map = map;
1041 struct dso *curr_dso = dso;
1042 Elf_Data *symstrs, *secstrs;
1043 uint32_t nr_syms;
1044 int err = -1;
1045 uint32_t idx;
1046 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -07001047 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +00001048 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -07001049 Elf_Data *syms, *opddata = NULL;
1050 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -07001051 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -07001052 Elf *elf;
1053 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +03001054 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -07001055
Wang Nanba927322015-04-07 08:22:45 +00001056 if (kmap && !kmaps)
1057 return -1;
1058
Cody P Schafer261360b2012-08-10 15:23:01 -07001059 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +03001060 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001061 dso->rel = syms_ss->ehdr.e_type == ET_REL;
1062
1063 /*
1064 * Modules may already have symbols from kallsyms, but those symbols
1065 * have the wrong values for the dso maps, so remove them.
1066 */
1067 if (kmodule && syms_ss->symtab)
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001068 symbols__delete(&dso->symbols);
Cody P Schafer005f9292012-08-10 15:22:58 -07001069
Cody P Schafer261360b2012-08-10 15:23:01 -07001070 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +10001071 /*
1072 * If the vmlinux is stripped, fail so we will fall back
1073 * to using kallsyms. The vmlinux runtime symbols aren't
1074 * of much use.
1075 */
1076 if (dso->kernel)
1077 goto out_elf_end;
1078
Cody P Schafer261360b2012-08-10 15:23:01 -07001079 syms_ss->symtab = syms_ss->dynsym;
1080 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -07001081 }
1082
Cody P Schafer261360b2012-08-10 15:23:01 -07001083 elf = syms_ss->elf;
1084 ehdr = syms_ss->ehdr;
1085 sec = syms_ss->symtab;
1086 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -07001087
Anton Blanchard50de1a02016-08-13 11:55:33 +10001088 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
1089 ".text", NULL))
Wang Nan73cdf0c2016-02-26 09:31:49 +00001090 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
1091
Cody P Schafer261360b2012-08-10 15:23:01 -07001092 if (runtime_ss->opdsec)
1093 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +09001094
1095 syms = elf_getdata(sec, NULL);
1096 if (syms == NULL)
1097 goto out_elf_end;
1098
1099 sec = elf_getscn(elf, shdr.sh_link);
1100 if (sec == NULL)
1101 goto out_elf_end;
1102
1103 symstrs = elf_getdata(sec, NULL);
1104 if (symstrs == NULL)
1105 goto out_elf_end;
1106
Adrian Hunterf247fb82014-07-31 09:00:46 +03001107 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +09001108 if (sec_strndx == NULL)
1109 goto out_elf_end;
1110
1111 secstrs = elf_getdata(sec_strndx, NULL);
1112 if (secstrs == NULL)
1113 goto out_elf_end;
1114
1115 nr_syms = shdr.sh_size / shdr.sh_entsize;
1116
1117 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +03001118
1119 /*
1120 * The kernel relocation symbol is needed in advance in order to adjust
1121 * kernel maps correctly.
1122 */
1123 if (ref_reloc_sym_not_found(kmap)) {
1124 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1125 const char *elf_name = elf_sym__name(&sym, symstrs);
1126
1127 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
1128 continue;
1129 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +02001130 map->reloc = kmap->ref_reloc_sym->addr -
1131 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +03001132 break;
1133 }
1134 }
1135
Adrian Hunterf0ee3b42015-08-14 15:50:06 +03001136 /*
1137 * Handle any relocation of vdso necessary because older kernels
1138 * attempted to prelink vdso to its virtual address.
1139 */
Wang Nan73cdf0c2016-02-26 09:31:49 +00001140 if (dso__is_vdso(dso))
1141 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +03001142
Adrian Hunter39b12f782013-08-07 14:38:47 +03001143 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
1144 /*
Arnaldo Carvalho de Melod1fd8d92018-04-26 12:36:37 -03001145 * Initial kernel and module mappings do not map to the dso.
1146 * Flag the fixups.
Adrian Hunter39b12f782013-08-07 14:38:47 +03001147 */
Jiri Olsab2fe96a2020-08-08 14:26:56 +02001148 if (dso->kernel) {
Adrian Hunter39b12f782013-08-07 14:38:47 +03001149 remap_kernel = true;
1150 adjust_kernel_syms = dso->adjust_symbols;
1151 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001152 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1153 struct symbol *f;
1154 const char *elf_name = elf_sym__name(&sym, symstrs);
1155 char *demangled = NULL;
1156 int is_label = elf_sym__is_label(&sym);
1157 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -07001158 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +09001159
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001160 if (!is_label && !elf_sym__filter(&sym))
Namhyung Kime5a18452012-08-06 13:41:20 +09001161 continue;
1162
1163 /* Reject ARM ELF "mapping symbols": these aren't unique and
1164 * don't identify functions, so will confuse the profile
1165 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -08001166 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
1167 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
1168 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +09001169 continue;
1170 }
1171
Cody P Schafer261360b2012-08-10 15:23:01 -07001172 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
1173 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +09001174 u64 *opd = opddata->d_buf + offset;
1175 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -07001176 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
1177 sym.st_value);
1178 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +09001179 }
Namhyung Kim3843b052012-11-21 13:49:44 +01001180 /*
1181 * When loading symbols in a data mapping, ABS symbols (which
1182 * has a value of SHN_ABS in its st_shndx) failed at
1183 * elf_getscn(). And it marks the loading as a failure so
1184 * already loaded symbols cannot be fixed up.
1185 *
1186 * I'm not sure what should be done. Just ignore them for now.
1187 * - Namhyung Kim
1188 */
1189 if (sym.st_shndx == SHN_ABS)
1190 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +09001191
Cody P Schafer261360b2012-08-10 15:23:01 -07001192 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +09001193 if (!sec)
1194 goto out_elf_end;
1195
1196 gelf_getshdr(sec, &shdr);
1197
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001198 if (is_label && !elf_sec__filter(&shdr, secstrs))
Namhyung Kime5a18452012-08-06 13:41:20 +09001199 continue;
1200
1201 section_name = elf_sec__name(&shdr, secstrs);
1202
1203 /* On ARM, symbols for thumb functions have 1 added to
1204 * the symbol address as a flag - remove it */
1205 if ((ehdr.e_machine == EM_ARM) &&
Arnaldo Carvalho de Melo18231d72018-04-26 12:45:17 -03001206 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +09001207 (sym.st_value & 1))
1208 --sym.st_value;
1209
Jiri Olsab2fe96a2020-08-08 14:26:56 +02001210 if (dso->kernel) {
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001211 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
1212 section_name, adjust_kernel_syms, kmodule, &remap_kernel))
1213 goto out_elf_end;
Arnaldo Carvalho de Melo857140e2018-04-27 10:53:14 -03001214 } else if ((used_opd && runtime_ss->adjust_symbols) ||
1215 (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001216 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1217 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1218 (u64)sym.st_value, (u64)shdr.sh_addr,
1219 (u64)shdr.sh_offset);
1220 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1221 }
Arnaldo Carvalho de Melo4e0d1e82018-04-27 15:15:24 -03001222
Milian Wolff2a8d41b2016-08-30 13:41:02 +02001223 demangled = demangle_sym(dso, kmodule, elf_name);
1224 if (demangled != NULL)
1225 elf_name = demangled;
Namhyung Kime71e7942014-07-31 14:47:42 +09001226
Namhyung Kime5a18452012-08-06 13:41:20 +09001227 f = symbol__new(sym.st_value, sym.st_size,
Arnaldo Carvalho de Meloaf30bff2018-04-26 11:09:10 -03001228 GELF_ST_BIND(sym.st_info),
1229 GELF_ST_TYPE(sym.st_info), elf_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001230 free(demangled);
1231 if (!f)
1232 goto out_elf_end;
1233
Naveen N. Rao0b3c2262016-04-12 14:40:50 +05301234 arch__sym_update(f, &sym);
1235
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001236 __symbols__insert(&curr_dso->symbols, f, dso->kernel);
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -03001237 nr++;
Namhyung Kime5a18452012-08-06 13:41:20 +09001238 }
1239
1240 /*
1241 * For misannotated, zeroed, ASM function sizes.
1242 */
1243 if (nr > 0) {
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03001244 symbols__fixup_end(&dso->symbols);
1245 symbols__fixup_duplicate(&dso->symbols);
Namhyung Kime5a18452012-08-06 13:41:20 +09001246 if (kmap) {
1247 /*
1248 * We need to fixup this here too because we create new
1249 * maps here, for things like vsyscall sections.
1250 */
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -03001251 maps__fixup_end(kmaps);
Namhyung Kime5a18452012-08-06 13:41:20 +09001252 }
1253 }
1254 err = nr;
1255out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001256 return err;
1257}
1258
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001259static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1260{
1261 GElf_Phdr phdr;
1262 size_t i, phdrnum;
1263 int err;
1264 u64 sz;
1265
1266 if (elf_getphdrnum(elf, &phdrnum))
1267 return -1;
1268
1269 for (i = 0; i < phdrnum; i++) {
1270 if (gelf_getphdr(elf, i, &phdr) == NULL)
1271 return -1;
1272 if (phdr.p_type != PT_LOAD)
1273 continue;
1274 if (exe) {
1275 if (!(phdr.p_flags & PF_X))
1276 continue;
1277 } else {
1278 if (!(phdr.p_flags & PF_R))
1279 continue;
1280 }
1281 sz = min(phdr.p_memsz, phdr.p_filesz);
1282 if (!sz)
1283 continue;
1284 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1285 if (err)
1286 return err;
1287 }
1288 return 0;
1289}
1290
1291int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1292 bool *is_64_bit)
1293{
1294 int err;
1295 Elf *elf;
1296
1297 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1298 if (elf == NULL)
1299 return -1;
1300
1301 if (is_64_bit)
1302 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1303
1304 err = elf_read_maps(elf, exe, mapfn, data);
1305
1306 elf_end(elf);
1307 return err;
1308}
1309
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001310enum dso_type dso__type_fd(int fd)
1311{
1312 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1313 GElf_Ehdr ehdr;
1314 Elf_Kind ek;
1315 Elf *elf;
1316
1317 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1318 if (elf == NULL)
1319 goto out;
1320
1321 ek = elf_kind(elf);
1322 if (ek != ELF_K_ELF)
1323 goto out_end;
1324
1325 if (gelf_getclass(elf) == ELFCLASS64) {
1326 dso_type = DSO__TYPE_64BIT;
1327 goto out_end;
1328 }
1329
1330 if (gelf_getehdr(elf, &ehdr) == NULL)
1331 goto out_end;
1332
1333 if (ehdr.e_machine == EM_X86_64)
1334 dso_type = DSO__TYPE_X32BIT;
1335 else
1336 dso_type = DSO__TYPE_32BIT;
1337out_end:
1338 elf_end(elf);
1339out:
1340 return dso_type;
1341}
1342
Adrian Hunterafba19d2013-10-09 15:01:12 +03001343static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1344{
1345 ssize_t r;
1346 size_t n;
1347 int err = -1;
1348 char *buf = malloc(page_size);
1349
1350 if (buf == NULL)
1351 return -1;
1352
1353 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1354 goto out;
1355
1356 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1357 goto out;
1358
1359 while (len) {
1360 n = page_size;
1361 if (len < n)
1362 n = len;
1363 /* Use read because mmap won't work on proc files */
1364 r = read(from, buf, n);
1365 if (r < 0)
1366 goto out;
1367 if (!r)
1368 break;
1369 n = r;
1370 r = write(to, buf, n);
1371 if (r < 0)
1372 goto out;
1373 if ((size_t)r != n)
1374 goto out;
1375 len -= n;
1376 }
1377
1378 err = 0;
1379out:
1380 free(buf);
1381 return err;
1382}
1383
1384struct kcore {
1385 int fd;
1386 int elfclass;
1387 Elf *elf;
1388 GElf_Ehdr ehdr;
1389};
1390
1391static int kcore__open(struct kcore *kcore, const char *filename)
1392{
1393 GElf_Ehdr *ehdr;
1394
1395 kcore->fd = open(filename, O_RDONLY);
1396 if (kcore->fd == -1)
1397 return -1;
1398
1399 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1400 if (!kcore->elf)
1401 goto out_close;
1402
1403 kcore->elfclass = gelf_getclass(kcore->elf);
1404 if (kcore->elfclass == ELFCLASSNONE)
1405 goto out_end;
1406
1407 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1408 if (!ehdr)
1409 goto out_end;
1410
1411 return 0;
1412
1413out_end:
1414 elf_end(kcore->elf);
1415out_close:
1416 close(kcore->fd);
1417 return -1;
1418}
1419
1420static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1421 bool temp)
1422{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001423 kcore->elfclass = elfclass;
1424
1425 if (temp)
1426 kcore->fd = mkstemp(filename);
1427 else
1428 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1429 if (kcore->fd == -1)
1430 return -1;
1431
1432 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1433 if (!kcore->elf)
1434 goto out_close;
1435
1436 if (!gelf_newehdr(kcore->elf, elfclass))
1437 goto out_end;
1438
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001439 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001440
1441 return 0;
1442
1443out_end:
1444 elf_end(kcore->elf);
1445out_close:
1446 close(kcore->fd);
1447 unlink(filename);
1448 return -1;
1449}
1450
1451static void kcore__close(struct kcore *kcore)
1452{
1453 elf_end(kcore->elf);
1454 close(kcore->fd);
1455}
1456
1457static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1458{
1459 GElf_Ehdr *ehdr = &to->ehdr;
1460 GElf_Ehdr *kehdr = &from->ehdr;
1461
1462 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1463 ehdr->e_type = kehdr->e_type;
1464 ehdr->e_machine = kehdr->e_machine;
1465 ehdr->e_version = kehdr->e_version;
1466 ehdr->e_entry = 0;
1467 ehdr->e_shoff = 0;
1468 ehdr->e_flags = kehdr->e_flags;
1469 ehdr->e_phnum = count;
1470 ehdr->e_shentsize = 0;
1471 ehdr->e_shnum = 0;
1472 ehdr->e_shstrndx = 0;
1473
1474 if (from->elfclass == ELFCLASS32) {
1475 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1476 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1477 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1478 } else {
1479 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1480 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1481 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1482 }
1483
1484 if (!gelf_update_ehdr(to->elf, ehdr))
1485 return -1;
1486
1487 if (!gelf_newphdr(to->elf, count))
1488 return -1;
1489
1490 return 0;
1491}
1492
1493static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1494 u64 addr, u64 len)
1495{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001496 GElf_Phdr phdr = {
1497 .p_type = PT_LOAD,
1498 .p_flags = PF_R | PF_W | PF_X,
1499 .p_offset = offset,
1500 .p_vaddr = addr,
1501 .p_paddr = 0,
1502 .p_filesz = len,
1503 .p_memsz = len,
1504 .p_align = page_size,
1505 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001506
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001507 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001508 return -1;
1509
1510 return 0;
1511}
1512
1513static off_t kcore__write(struct kcore *kcore)
1514{
1515 return elf_update(kcore->elf, ELF_C_WRITE);
1516}
1517
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001518struct phdr_data {
1519 off_t offset;
Adrian Hunter15acef62018-05-22 13:54:41 +03001520 off_t rel;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001521 u64 addr;
1522 u64 len;
Adrian Hunterf6838202018-05-22 13:54:38 +03001523 struct list_head node;
Adrian Hunter22916fd2018-05-22 13:54:45 +03001524 struct phdr_data *remaps;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001525};
1526
Adrian Huntera1a3a062018-05-22 13:54:44 +03001527struct sym_data {
1528 u64 addr;
1529 struct list_head node;
1530};
1531
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001532struct kcore_copy_info {
1533 u64 stext;
1534 u64 etext;
1535 u64 first_symbol;
1536 u64 last_symbol;
1537 u64 first_module;
Adrian Hunter61f82e32020-05-12 15:19:16 +03001538 u64 first_module_symbol;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001539 u64 last_module_symbol;
Adrian Hunter6e979572018-05-22 13:54:39 +03001540 size_t phnum;
Adrian Hunterf6838202018-05-22 13:54:38 +03001541 struct list_head phdrs;
Adrian Huntera1a3a062018-05-22 13:54:44 +03001542 struct list_head syms;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001543};
1544
Adrian Hunter15acef62018-05-22 13:54:41 +03001545#define kcore_copy__for_each_phdr(k, p) \
1546 list_for_each_entry((p), &(k)->phdrs, node)
1547
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001548static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
1549{
1550 struct phdr_data *p = zalloc(sizeof(*p));
1551
1552 if (p) {
1553 p->addr = addr;
1554 p->len = len;
1555 p->offset = offset;
1556 }
1557
1558 return p;
1559}
1560
1561static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
1562 u64 addr, u64 len,
1563 off_t offset)
1564{
1565 struct phdr_data *p = phdr_data__new(addr, len, offset);
1566
1567 if (p)
1568 list_add_tail(&p->node, &kci->phdrs);
1569
1570 return p;
1571}
1572
1573static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
1574{
1575 struct phdr_data *p, *tmp;
1576
1577 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -03001578 list_del_init(&p->node);
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001579 free(p);
1580 }
1581}
1582
Adrian Huntera1a3a062018-05-22 13:54:44 +03001583static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
1584 u64 addr)
1585{
1586 struct sym_data *s = zalloc(sizeof(*s));
1587
1588 if (s) {
1589 s->addr = addr;
1590 list_add_tail(&s->node, &kci->syms);
1591 }
1592
1593 return s;
1594}
1595
1596static void kcore_copy__free_syms(struct kcore_copy_info *kci)
1597{
1598 struct sym_data *s, *tmp;
1599
1600 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -03001601 list_del_init(&s->node);
Adrian Huntera1a3a062018-05-22 13:54:44 +03001602 free(s);
1603 }
1604}
1605
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001606static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1607 u64 start)
1608{
1609 struct kcore_copy_info *kci = arg;
1610
Arnaldo Carvalho de Meloe85e0e02018-04-25 17:16:31 -03001611 if (!kallsyms__is_function(type))
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001612 return 0;
1613
1614 if (strchr(name, '[')) {
Adrian Hunter61f82e32020-05-12 15:19:16 +03001615 if (!kci->first_module_symbol || start < kci->first_module_symbol)
1616 kci->first_module_symbol = start;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001617 if (start > kci->last_module_symbol)
1618 kci->last_module_symbol = start;
1619 return 0;
1620 }
1621
1622 if (!kci->first_symbol || start < kci->first_symbol)
1623 kci->first_symbol = start;
1624
1625 if (!kci->last_symbol || start > kci->last_symbol)
1626 kci->last_symbol = start;
1627
1628 if (!strcmp(name, "_stext")) {
1629 kci->stext = start;
1630 return 0;
1631 }
1632
1633 if (!strcmp(name, "_etext")) {
1634 kci->etext = start;
1635 return 0;
1636 }
1637
Adrian Huntera1a3a062018-05-22 13:54:44 +03001638 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
1639 return -1;
1640
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001641 return 0;
1642}
1643
1644static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1645 const char *dir)
1646{
1647 char kallsyms_filename[PATH_MAX];
1648
1649 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1650
1651 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1652 return -1;
1653
1654 if (kallsyms__parse(kallsyms_filename, kci,
1655 kcore_copy__process_kallsyms) < 0)
1656 return -1;
1657
1658 return 0;
1659}
1660
1661static int kcore_copy__process_modules(void *arg,
1662 const char *name __maybe_unused,
Thomas Richter9ad46522017-08-03 15:49:02 +02001663 u64 start, u64 size __maybe_unused)
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001664{
1665 struct kcore_copy_info *kci = arg;
1666
1667 if (!kci->first_module || start < kci->first_module)
1668 kci->first_module = start;
1669
1670 return 0;
1671}
1672
1673static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1674 const char *dir)
1675{
1676 char modules_filename[PATH_MAX];
1677
1678 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1679
1680 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1681 return -1;
1682
1683 if (modules__parse(modules_filename, kci,
1684 kcore_copy__process_modules) < 0)
1685 return -1;
1686
1687 return 0;
1688}
1689
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001690static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
1691 u64 pgoff, u64 s, u64 e)
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001692{
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001693 u64 len, offset;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001694
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001695 if (s < start || s >= end)
1696 return 0;
1697
1698 offset = (s - start) + pgoff;
1699 len = e < end ? e - s : end - s;
1700
1701 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001702}
1703
1704static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1705{
1706 struct kcore_copy_info *kci = data;
1707 u64 end = start + len;
Adrian Huntera1a3a062018-05-22 13:54:44 +03001708 struct sym_data *sdat;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001709
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001710 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
1711 return -1;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001712
Adrian Hunterb4503cd2018-05-22 13:54:43 +03001713 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
1714 kci->last_module_symbol))
1715 return -1;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001716
Adrian Huntera1a3a062018-05-22 13:54:44 +03001717 list_for_each_entry(sdat, &kci->syms, node) {
1718 u64 s = round_down(sdat->addr, page_size);
1719
1720 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
1721 return -1;
1722 }
1723
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001724 return 0;
1725}
1726
1727static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1728{
1729 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1730 return -1;
1731
1732 return 0;
1733}
1734
Adrian Hunter22916fd2018-05-22 13:54:45 +03001735static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
1736{
1737 struct phdr_data *p, *k = NULL;
1738 u64 kend;
1739
1740 if (!kci->stext)
1741 return;
1742
1743 /* Find phdr that corresponds to the kernel map (contains stext) */
1744 kcore_copy__for_each_phdr(kci, p) {
1745 u64 pend = p->addr + p->len - 1;
1746
1747 if (p->addr <= kci->stext && pend >= kci->stext) {
1748 k = p;
1749 break;
1750 }
1751 }
1752
1753 if (!k)
1754 return;
1755
1756 kend = k->offset + k->len;
1757
1758 /* Find phdrs that remap the kernel */
1759 kcore_copy__for_each_phdr(kci, p) {
1760 u64 pend = p->offset + p->len;
1761
1762 if (p == k)
1763 continue;
1764
1765 if (p->offset >= k->offset && pend <= kend)
1766 p->remaps = k;
1767 }
1768}
1769
Adrian Hunter15acef62018-05-22 13:54:41 +03001770static void kcore_copy__layout(struct kcore_copy_info *kci)
1771{
1772 struct phdr_data *p;
1773 off_t rel = 0;
1774
Adrian Hunter22916fd2018-05-22 13:54:45 +03001775 kcore_copy__find_remaps(kci);
1776
Adrian Hunter15acef62018-05-22 13:54:41 +03001777 kcore_copy__for_each_phdr(kci, p) {
Adrian Hunter22916fd2018-05-22 13:54:45 +03001778 if (!p->remaps) {
1779 p->rel = rel;
1780 rel += p->len;
1781 }
Adrian Hunter15acef62018-05-22 13:54:41 +03001782 kci->phnum += 1;
1783 }
Adrian Hunter22916fd2018-05-22 13:54:45 +03001784
1785 kcore_copy__for_each_phdr(kci, p) {
1786 struct phdr_data *k = p->remaps;
1787
1788 if (k)
1789 p->rel = p->offset - k->offset + k->rel;
1790 }
Adrian Hunter15acef62018-05-22 13:54:41 +03001791}
1792
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001793static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1794 Elf *elf)
1795{
1796 if (kcore_copy__parse_kallsyms(kci, dir))
1797 return -1;
1798
1799 if (kcore_copy__parse_modules(kci, dir))
1800 return -1;
1801
1802 if (kci->stext)
1803 kci->stext = round_down(kci->stext, page_size);
1804 else
1805 kci->stext = round_down(kci->first_symbol, page_size);
1806
1807 if (kci->etext) {
1808 kci->etext = round_up(kci->etext, page_size);
1809 } else if (kci->last_symbol) {
1810 kci->etext = round_up(kci->last_symbol, page_size);
1811 kci->etext += page_size;
1812 }
1813
Adrian Hunter61f82e32020-05-12 15:19:16 +03001814 if (kci->first_module_symbol &&
1815 (!kci->first_module || kci->first_module_symbol < kci->first_module))
1816 kci->first_module = kci->first_module_symbol;
1817
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001818 kci->first_module = round_down(kci->first_module, page_size);
1819
1820 if (kci->last_module_symbol) {
1821 kci->last_module_symbol = round_up(kci->last_module_symbol,
1822 page_size);
1823 kci->last_module_symbol += page_size;
1824 }
1825
1826 if (!kci->stext || !kci->etext)
1827 return -1;
1828
1829 if (kci->first_module && !kci->last_module_symbol)
1830 return -1;
1831
Adrian Hunter15acef62018-05-22 13:54:41 +03001832 if (kcore_copy__read_maps(kci, elf))
1833 return -1;
1834
1835 kcore_copy__layout(kci);
1836
1837 return 0;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001838}
1839
1840static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1841 const char *name)
1842{
1843 char from_filename[PATH_MAX];
1844 char to_filename[PATH_MAX];
1845
1846 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1847 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1848
1849 return copyfile_mode(from_filename, to_filename, 0400);
1850}
1851
1852static int kcore_copy__unlink(const char *dir, const char *name)
1853{
1854 char filename[PATH_MAX];
1855
1856 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1857
1858 return unlink(filename);
1859}
1860
1861static int kcore_copy__compare_fds(int from, int to)
1862{
1863 char *buf_from;
1864 char *buf_to;
1865 ssize_t ret;
1866 size_t len;
1867 int err = -1;
1868
1869 buf_from = malloc(page_size);
1870 buf_to = malloc(page_size);
1871 if (!buf_from || !buf_to)
1872 goto out;
1873
1874 while (1) {
1875 /* Use read because mmap won't work on proc files */
1876 ret = read(from, buf_from, page_size);
1877 if (ret < 0)
1878 goto out;
1879
1880 if (!ret)
1881 break;
1882
1883 len = ret;
1884
1885 if (readn(to, buf_to, len) != (int)len)
1886 goto out;
1887
1888 if (memcmp(buf_from, buf_to, len))
1889 goto out;
1890 }
1891
1892 err = 0;
1893out:
1894 free(buf_to);
1895 free(buf_from);
1896 return err;
1897}
1898
1899static int kcore_copy__compare_files(const char *from_filename,
1900 const char *to_filename)
1901{
1902 int from, to, err = -1;
1903
1904 from = open(from_filename, O_RDONLY);
1905 if (from < 0)
1906 return -1;
1907
1908 to = open(to_filename, O_RDONLY);
1909 if (to < 0)
1910 goto out_close_from;
1911
1912 err = kcore_copy__compare_fds(from, to);
1913
1914 close(to);
1915out_close_from:
1916 close(from);
1917 return err;
1918}
1919
1920static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1921 const char *name)
1922{
1923 char from_filename[PATH_MAX];
1924 char to_filename[PATH_MAX];
1925
1926 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1927 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1928
1929 return kcore_copy__compare_files(from_filename, to_filename);
1930}
1931
1932/**
1933 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1934 * @from_dir: from directory
1935 * @to_dir: to directory
1936 *
1937 * This function copies kallsyms, modules and kcore files from one directory to
1938 * another. kallsyms and modules are copied entirely. Only code segments are
1939 * copied from kcore. It is assumed that two segments suffice: one for the
1940 * kernel proper and one for all the modules. The code segments are determined
1941 * from kallsyms and modules files. The kernel map starts at _stext or the
1942 * lowest function symbol, and ends at _etext or the highest function symbol.
1943 * The module map starts at the lowest module address and ends at the highest
1944 * module symbol. Start addresses are rounded down to the nearest page. End
1945 * addresses are rounded up to the nearest page. An extra page is added to the
1946 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1947 * symbol too. Because it contains only code sections, the resulting kcore is
1948 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1949 * is not the same for the kernel map and the modules map. That happens because
1950 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1951 * kallsyms and modules files are compared with their copies to check that
1952 * modules have not been loaded or unloaded while the copies were taking place.
1953 *
1954 * Return: %0 on success, %-1 on failure.
1955 */
1956int kcore_copy(const char *from_dir, const char *to_dir)
1957{
1958 struct kcore kcore;
1959 struct kcore extract;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001960 int idx = 0, err = -1;
Adrian Hunterd2c95982018-05-22 13:54:42 +03001961 off_t offset, sz;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001962 struct kcore_copy_info kci = { .stext = 0, };
1963 char kcore_filename[PATH_MAX];
1964 char extract_filename[PATH_MAX];
Adrian Hunterd2c95982018-05-22 13:54:42 +03001965 struct phdr_data *p;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001966
Adrian Hunterf6838202018-05-22 13:54:38 +03001967 INIT_LIST_HEAD(&kci.phdrs);
Adrian Huntera1a3a062018-05-22 13:54:44 +03001968 INIT_LIST_HEAD(&kci.syms);
Adrian Hunterf6838202018-05-22 13:54:38 +03001969
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001970 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1971 return -1;
1972
1973 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1974 goto out_unlink_kallsyms;
1975
1976 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1977 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1978
1979 if (kcore__open(&kcore, kcore_filename))
1980 goto out_unlink_modules;
1981
1982 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1983 goto out_kcore_close;
1984
1985 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1986 goto out_kcore_close;
1987
Adrian Hunter6e979572018-05-22 13:54:39 +03001988 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001989 goto out_extract_close;
1990
Adrian Hunterc9dd1d82018-05-22 13:54:40 +03001991 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
1992 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
1993 offset = round_up(offset, page_size);
1994
Adrian Hunterd2c95982018-05-22 13:54:42 +03001995 kcore_copy__for_each_phdr(&kci, p) {
1996 off_t offs = p->rel + offset;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001997
Adrian Hunterd2c95982018-05-22 13:54:42 +03001998 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001999 goto out_extract_close;
2000 }
2001
2002 sz = kcore__write(&extract);
2003 if (sz < 0 || sz > offset)
2004 goto out_extract_close;
2005
Adrian Hunterd2c95982018-05-22 13:54:42 +03002006 kcore_copy__for_each_phdr(&kci, p) {
2007 off_t offs = p->rel + offset;
Adrian Hunterfc1b6912013-10-14 16:57:29 +03002008
Adrian Hunter22916fd2018-05-22 13:54:45 +03002009 if (p->remaps)
2010 continue;
Adrian Hunterd2c95982018-05-22 13:54:42 +03002011 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
2012 goto out_extract_close;
2013 }
Adrian Hunterfc1b6912013-10-14 16:57:29 +03002014
2015 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
2016 goto out_extract_close;
2017
2018 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
2019 goto out_extract_close;
2020
2021 err = 0;
2022
2023out_extract_close:
2024 kcore__close(&extract);
2025 if (err)
2026 unlink(extract_filename);
2027out_kcore_close:
2028 kcore__close(&kcore);
2029out_unlink_modules:
2030 if (err)
2031 kcore_copy__unlink(to_dir, "modules");
2032out_unlink_kallsyms:
2033 if (err)
2034 kcore_copy__unlink(to_dir, "kallsyms");
2035
Adrian Hunterb4503cd2018-05-22 13:54:43 +03002036 kcore_copy__free_phdrs(&kci);
Adrian Huntera1a3a062018-05-22 13:54:44 +03002037 kcore_copy__free_syms(&kci);
Adrian Hunterb4503cd2018-05-22 13:54:43 +03002038
Adrian Hunterfc1b6912013-10-14 16:57:29 +03002039 return err;
2040}
2041
Adrian Hunterafba19d2013-10-09 15:01:12 +03002042int kcore_extract__create(struct kcore_extract *kce)
2043{
2044 struct kcore kcore;
2045 struct kcore extract;
2046 size_t count = 1;
2047 int idx = 0, err = -1;
2048 off_t offset = page_size, sz;
2049
2050 if (kcore__open(&kcore, kce->kcore_filename))
2051 return -1;
2052
2053 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
2054 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
2055 goto out_kcore_close;
2056
2057 if (kcore__copy_hdr(&kcore, &extract, count))
2058 goto out_extract_close;
2059
2060 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
2061 goto out_extract_close;
2062
2063 sz = kcore__write(&extract);
2064 if (sz < 0 || sz > offset)
2065 goto out_extract_close;
2066
2067 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
2068 goto out_extract_close;
2069
2070 err = 0;
2071
2072out_extract_close:
2073 kcore__close(&extract);
2074 if (err)
2075 unlink(kce->extract_filename);
2076out_kcore_close:
2077 kcore__close(&kcore);
2078
2079 return err;
2080}
2081
2082void kcore_extract__delete(struct kcore_extract *kce)
2083{
2084 unlink(kce->extract_filename);
2085}
2086
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03002087#ifdef HAVE_GELF_GETNOTE_SUPPORT
Ravi Bangoria5a5e3d32018-08-20 10:12:50 +05302088
2089static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
2090{
2091 if (!base_off)
2092 return;
2093
2094 if (tmp->bit32)
2095 tmp->addr.a32[SDT_NOTE_IDX_LOC] =
2096 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
2097 tmp->addr.a32[SDT_NOTE_IDX_BASE];
2098 else
2099 tmp->addr.a64[SDT_NOTE_IDX_LOC] =
2100 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
2101 tmp->addr.a64[SDT_NOTE_IDX_BASE];
2102}
2103
2104static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
2105 GElf_Addr base_off)
2106{
2107 if (!base_off)
2108 return;
2109
2110 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
2111 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2112 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
2113 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2114}
2115
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002116/**
2117 * populate_sdt_note : Parse raw data and identify SDT note
2118 * @elf: elf of the opened file
2119 * @data: raw data of a section with description offset applied
2120 * @len: note description size
2121 * @type: type of the note
2122 * @sdt_notes: List to add the SDT note
2123 *
2124 * Responsible for parsing the @data in section .note.stapsdt in @elf and
2125 * if its an SDT note, it appends to @sdt_notes list.
2126 */
2127static int populate_sdt_note(Elf **elf, const char *data, size_t len,
2128 struct list_head *sdt_notes)
2129{
Alexis Berlemontbe881842016-12-14 01:07:31 +01002130 const char *provider, *name, *args;
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002131 struct sdt_note *tmp = NULL;
2132 GElf_Ehdr ehdr;
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002133 GElf_Shdr shdr;
2134 int ret = -EINVAL;
2135
2136 union {
2137 Elf64_Addr a64[NR_ADDR];
2138 Elf32_Addr a32[NR_ADDR];
2139 } buf;
2140
2141 Elf_Data dst = {
2142 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
2143 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
2144 .d_off = 0, .d_align = 0
2145 };
2146 Elf_Data src = {
2147 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
2148 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
2149 .d_align = 0
2150 };
2151
2152 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
2153 if (!tmp) {
2154 ret = -ENOMEM;
2155 goto out_err;
2156 }
2157
2158 INIT_LIST_HEAD(&tmp->note_list);
2159
2160 if (len < dst.d_size + 3)
2161 goto out_free_note;
2162
2163 /* Translation from file representation to memory representation */
2164 if (gelf_xlatetom(*elf, &dst, &src,
2165 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
2166 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
2167 goto out_free_note;
2168 }
2169
2170 /* Populate the fields of sdt_note */
2171 provider = data + dst.d_size;
2172
2173 name = (const char *)memchr(provider, '\0', data + len - provider);
2174 if (name++ == NULL)
2175 goto out_free_note;
2176
2177 tmp->provider = strdup(provider);
2178 if (!tmp->provider) {
2179 ret = -ENOMEM;
2180 goto out_free_note;
2181 }
2182 tmp->name = strdup(name);
2183 if (!tmp->name) {
2184 ret = -ENOMEM;
2185 goto out_free_prov;
2186 }
2187
Alexis Berlemontbe881842016-12-14 01:07:31 +01002188 args = memchr(name, '\0', data + len - name);
2189
2190 /*
2191 * There is no argument if:
2192 * - We reached the end of the note;
2193 * - There is not enough room to hold a potential string;
2194 * - The argument string is empty or just contains ':'.
2195 */
2196 if (args == NULL || data + len - args < 2 ||
2197 args[1] == ':' || args[1] == '\0')
2198 tmp->args = NULL;
2199 else {
2200 tmp->args = strdup(++args);
2201 if (!tmp->args) {
2202 ret = -ENOMEM;
2203 goto out_free_name;
2204 }
2205 }
2206
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002207 if (gelf_getclass(*elf) == ELFCLASS32) {
2208 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
2209 tmp->bit32 = true;
2210 } else {
2211 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
2212 tmp->bit32 = false;
2213 }
2214
2215 if (!gelf_getehdr(*elf, &ehdr)) {
2216 pr_debug("%s : cannot get elf header.\n", __func__);
2217 ret = -EBADF;
Alexis Berlemontbe881842016-12-14 01:07:31 +01002218 goto out_free_args;
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002219 }
2220
2221 /* Adjust the prelink effect :
2222 * Find out the .stapsdt.base section.
2223 * This scn will help us to handle prelinking (if present).
2224 * Compare the retrieved file offset of the base section with the
2225 * base address in the description of the SDT note. If its different,
2226 * then accordingly, adjust the note location.
2227 */
Ravi Bangoria5a5e3d32018-08-20 10:12:50 +05302228 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
2229 sdt_adjust_loc(tmp, shdr.sh_offset);
2230
2231 /* Adjust reference counter offset */
2232 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
2233 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002234
2235 list_add_tail(&tmp->note_list, sdt_notes);
2236 return 0;
2237
Alexis Berlemontbe881842016-12-14 01:07:31 +01002238out_free_args:
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03002239 zfree(&tmp->args);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002240out_free_name:
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03002241 zfree(&tmp->name);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002242out_free_prov:
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03002243 zfree(&tmp->provider);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002244out_free_note:
2245 free(tmp);
2246out_err:
2247 return ret;
2248}
2249
2250/**
2251 * construct_sdt_notes_list : constructs a list of SDT notes
2252 * @elf : elf to look into
2253 * @sdt_notes : empty list_head
2254 *
2255 * Scans the sections in 'elf' for the section
2256 * .note.stapsdt. It, then calls populate_sdt_note to find
2257 * out the SDT events and populates the 'sdt_notes'.
2258 */
2259static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
2260{
2261 GElf_Ehdr ehdr;
2262 Elf_Scn *scn = NULL;
2263 Elf_Data *data;
2264 GElf_Shdr shdr;
2265 size_t shstrndx, next;
2266 GElf_Nhdr nhdr;
2267 size_t name_off, desc_off, offset;
2268 int ret = 0;
2269
2270 if (gelf_getehdr(elf, &ehdr) == NULL) {
2271 ret = -EBADF;
2272 goto out_ret;
2273 }
2274 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
2275 ret = -EBADF;
2276 goto out_ret;
2277 }
2278
2279 /* Look for the required section */
2280 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
2281 if (!scn) {
2282 ret = -ENOENT;
2283 goto out_ret;
2284 }
2285
2286 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
2287 ret = -ENOENT;
2288 goto out_ret;
2289 }
2290
2291 data = elf_getdata(scn, NULL);
2292
2293 /* Get the SDT notes */
2294 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
2295 &desc_off)) > 0; offset = next) {
2296 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
2297 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
2298 sizeof(SDT_NOTE_NAME))) {
2299 /* Check the type of the note */
2300 if (nhdr.n_type != SDT_NOTE_TYPE)
2301 goto out_ret;
2302
2303 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
2304 nhdr.n_descsz, sdt_notes);
2305 if (ret < 0)
2306 goto out_ret;
2307 }
2308 }
2309 if (list_empty(sdt_notes))
2310 ret = -ENOENT;
2311
2312out_ret:
2313 return ret;
2314}
2315
2316/**
2317 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2318 * @head : empty list_head
2319 * @target : file to find SDT notes from
2320 *
2321 * This opens the file, initializes
2322 * the ELF and then calls construct_sdt_notes_list.
2323 */
2324int get_sdt_note_list(struct list_head *head, const char *target)
2325{
2326 Elf *elf;
2327 int fd, ret;
2328
2329 fd = open(target, O_RDONLY);
2330 if (fd < 0)
2331 return -EBADF;
2332
2333 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2334 if (!elf) {
2335 ret = -EBADF;
2336 goto out_close;
2337 }
2338 ret = construct_sdt_notes_list(elf, head);
2339 elf_end(elf);
2340out_close:
2341 close(fd);
2342 return ret;
2343}
2344
2345/**
2346 * cleanup_sdt_note_list : free the sdt notes' list
2347 * @sdt_notes: sdt notes' list
2348 *
2349 * Free up the SDT notes in @sdt_notes.
2350 * Returns the number of SDT notes free'd.
2351 */
2352int cleanup_sdt_note_list(struct list_head *sdt_notes)
2353{
2354 struct sdt_note *tmp, *pos;
2355 int nr_free = 0;
2356
2357 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -03002358 list_del_init(&pos->note_list);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03002359 zfree(&pos->name);
2360 zfree(&pos->provider);
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002361 free(pos);
2362 nr_free++;
2363 }
2364 return nr_free;
2365}
2366
2367/**
2368 * sdt_notes__get_count: Counts the number of sdt events
2369 * @start: list_head to sdt_notes list
2370 *
2371 * Returns the number of SDT notes in a list
2372 */
2373int sdt_notes__get_count(struct list_head *start)
2374{
2375 struct sdt_note *sdt_ptr;
2376 int count = 0;
2377
2378 list_for_each_entry(sdt_ptr, start, note_list)
2379 count++;
2380 return count;
2381}
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03002382#endif
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002383
Namhyung Kime5a18452012-08-06 13:41:20 +09002384void symbol__elf_init(void)
2385{
2386 elf_version(EV_CURRENT);
2387}