blob: cce12e1971d853353e6eed3eec83deeb07ad9348 [file] [log] [blame]
Thomas Gleixner4317cf952019-05-31 01:09:38 -07001// SPDX-License-Identifier: GPL-2.0-only
John Reiser81d38582010-10-13 15:12:54 -04002/*
3 * recordmcount.c: construct a table of the locations of calls to 'mcount'
4 * so that ftrace can find them quickly.
5 * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
John Reiser81d38582010-10-13 15:12:54 -04006 *
7 * Restructured to fit Linux format, as well as other updates:
8 * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
9 */
10
11/*
12 * Strategy: alter the .o file in-place.
13 *
14 * Append a new STRTAB that has the new section names, followed by a new array
15 * ElfXX_Shdr[] that has the new section headers, followed by the section
16 * contents for __mcount_loc and its relocations. The old shstrtab strings,
17 * and the old ElfXX_Shdr[] array, remain as "garbage" (commonly, a couple
18 * kilobytes.) Subsequent processing by /bin/ld (or the kernel module loader)
19 * will ignore the garbage regions, because they are not designated by the
20 * new .e_shoff nor the new ElfXX_Shdr[]. [In order to remove the garbage,
21 * then use "ld -r" to create a new file that omits the garbage.]
22 */
23
24#include <sys/types.h>
25#include <sys/mman.h>
26#include <sys/stat.h>
Steven Rostedtdfad3d52011-04-12 18:53:25 -040027#include <getopt.h>
John Reiser81d38582010-10-13 15:12:54 -040028#include <elf.h>
29#include <fcntl.h>
John Reiser81d38582010-10-13 15:12:54 -040030#include <stdio.h>
31#include <stdlib.h>
32#include <string.h>
33#include <unistd.h>
34
AKASHI Takahiroaf64d2a2014-04-30 10:54:32 +010035#ifndef EM_AARCH64
36#define EM_AARCH64 183
Li Bin2ee8a742015-10-30 16:31:04 +080037#define R_AARCH64_NONE 0
AKASHI Takahiroaf64d2a2014-04-30 10:54:32 +010038#define R_AARCH64_ABS64 257
39#endif
40
Alex Sverdlin927d7802020-01-08 15:57:47 +010041#define R_ARM_PC24 1
42#define R_ARM_THM_CALL 10
43#define R_ARM_CALL 28
44
Christophe Leroy3df14262020-08-10 08:48:22 +000045#define R_AARCH64_CALL26 283
46
John Reiser81d38582010-10-13 15:12:54 -040047static int fd_map; /* File descriptor for file being modified. */
48static int mmap_failed; /* Boolean flag. */
John Reiser81d38582010-10-13 15:12:54 -040049static char gpfx; /* prefix for global symbol name (sometimes '_') */
50static struct stat sb; /* Remember .st_size, etc. */
Rabin Vincented604532010-11-30 17:36:48 +010051static const char *altmcount; /* alternate mcount symbol name */
Steven Rostedtdfad3d52011-04-12 18:53:25 -040052static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050053static void *file_map; /* pointer of the mapped file */
54static void *file_end; /* pointer to the end of the mapped file */
55static int file_updated; /* flag to state file was changed */
56static void *file_ptr; /* current file pointer location */
Matt Helsley4fbcf072019-07-31 11:24:16 -070057
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050058static void *file_append; /* added to the end of the file */
59static size_t file_append_size; /* how much is added to end of file */
John Reiser81d38582010-10-13 15:12:54 -040060
John Reiser81d38582010-10-13 15:12:54 -040061/* Per-file resource cleanup when multiple files. */
Matt Helsley4fbcf072019-07-31 11:24:16 -070062static void file_append_cleanup(void)
63{
64 free(file_append);
65 file_append = NULL;
66 file_append_size = 0;
67 file_updated = 0;
68}
69
70static void mmap_cleanup(void)
John Reiser81d38582010-10-13 15:12:54 -040071{
72 if (!mmap_failed)
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050073 munmap(file_map, sb.st_size);
John Reiser81d38582010-10-13 15:12:54 -040074 else
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050075 free(file_map);
76 file_map = NULL;
John Reiser81d38582010-10-13 15:12:54 -040077}
78
Matt Helsleya1462072019-07-24 14:04:56 -070079/* ulseek, uwrite, ...: Check return value for errors. */
John Reiser81d38582010-10-13 15:12:54 -040080
Matt Helsley3aec8632019-07-31 11:24:13 -070081static off_t ulseek(off_t const offset, int const whence)
John Reiser81d38582010-10-13 15:12:54 -040082{
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050083 switch (whence) {
84 case SEEK_SET:
85 file_ptr = file_map + offset;
86 break;
87 case SEEK_CUR:
88 file_ptr += offset;
89 break;
90 case SEEK_END:
91 file_ptr = file_map + (sb.st_size - offset);
92 break;
93 }
94 if (file_ptr < file_map) {
95 fprintf(stderr, "lseek: seek before file\n");
Matt Helsley3f1df122019-07-31 11:24:12 -070096 return -1;
John Reiser81d38582010-10-13 15:12:54 -040097 }
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -050098 return file_ptr - file_map;
John Reiser81d38582010-10-13 15:12:54 -040099}
100
Matt Helsley3aec8632019-07-31 11:24:13 -0700101static ssize_t uwrite(void const *const buf, size_t const count)
John Reiser81d38582010-10-13 15:12:54 -0400102{
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500103 size_t cnt = count;
104 off_t idx = 0;
105
106 file_updated = 1;
107
108 if (file_ptr + count >= file_end) {
109 off_t aoffset = (file_ptr + count) - file_end;
110
111 if (aoffset > file_append_size) {
112 file_append = realloc(file_append, aoffset);
113 file_append_size = aoffset;
114 }
115 if (!file_append) {
116 perror("write");
Matt Helsley4fbcf072019-07-31 11:24:16 -0700117 file_append_cleanup();
118 mmap_cleanup();
Matt Helsley3f1df122019-07-31 11:24:12 -0700119 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500120 }
121 if (file_ptr < file_end) {
122 cnt = file_end - file_ptr;
123 } else {
124 cnt = 0;
125 idx = aoffset - count;
126 }
John Reiser81d38582010-10-13 15:12:54 -0400127 }
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500128
129 if (cnt)
130 memcpy(file_ptr, buf, cnt);
131
132 if (cnt < count)
133 memcpy(file_append + idx, buf + cnt, count - cnt);
134
135 file_ptr += count;
136 return count;
John Reiser81d38582010-10-13 15:12:54 -0400137}
138
Matt Helsley3aec8632019-07-31 11:24:13 -0700139static void * umalloc(size_t size)
John Reiser81d38582010-10-13 15:12:54 -0400140{
141 void *const addr = malloc(size);
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400142 if (addr == 0) {
John Reiser81d38582010-10-13 15:12:54 -0400143 fprintf(stderr, "malloc failed: %zu bytes\n", size);
Matt Helsley4fbcf072019-07-31 11:24:16 -0700144 file_append_cleanup();
145 mmap_cleanup();
Matt Helsley3f1df122019-07-31 11:24:12 -0700146 return NULL;
John Reiser81d38582010-10-13 15:12:54 -0400147 }
148 return addr;
149}
150
Matt Helsley4fbcf072019-07-31 11:24:16 -0700151/*
152 * Get the whole file as a programming convenience in order to avoid
153 * malloc+lseek+read+free of many pieces. If successful, then mmap
154 * avoids copying unused pieces; else just read the whole file.
155 * Open for both read and write; new info will be appended to the file.
156 * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
157 * do not propagate to the file until an explicit overwrite at the last.
158 * This preserves most aspects of consistency (all except .st_size)
159 * for simultaneous readers of the file while we are appending to it.
160 * However, multiple writers still are bad. We choose not to use
161 * locking because it is expensive and the use case of kernel build
162 * makes multiple writers unlikely.
163 */
164static void *mmap_file(char const *fname)
165{
166 /* Avoid problems if early cleanup() */
167 fd_map = -1;
168 mmap_failed = 1;
169 file_map = NULL;
170 file_ptr = NULL;
171 file_updated = 0;
172 sb.st_size = 0;
173
174 fd_map = open(fname, O_RDONLY);
175 if (fd_map < 0) {
176 perror(fname);
177 return NULL;
178 }
179 if (fstat(fd_map, &sb) < 0) {
180 perror(fname);
181 goto out;
182 }
183 if (!S_ISREG(sb.st_mode)) {
184 fprintf(stderr, "not a regular file: %s\n", fname);
185 goto out;
186 }
187 file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
188 fd_map, 0);
189 if (file_map == MAP_FAILED) {
190 mmap_failed = 1;
191 file_map = umalloc(sb.st_size);
192 if (!file_map) {
193 perror(fname);
194 goto out;
195 }
196 if (read(fd_map, file_map, sb.st_size) != sb.st_size) {
197 perror(fname);
198 free(file_map);
199 file_map = NULL;
200 goto out;
201 }
202 } else
203 mmap_failed = 0;
204out:
205 close(fd_map);
206 fd_map = -1;
207
208 file_end = file_map + sb.st_size;
209
210 return file_map;
211}
212
213
Steven Rostedtffd618f2011-04-08 03:58:48 -0400214static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
215static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
216static unsigned char *ideal_nop;
217
218static char rel_type_nop;
219
220static int (*make_nop)(void *map, size_t const offset);
221
222static int make_nop_x86(void *map, size_t const offset)
223{
224 uint32_t *ptr;
225 unsigned char *op;
226
227 /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */
228 ptr = map + offset;
229 if (*ptr != 0)
230 return -1;
231
232 op = map + offset - 1;
233 if (*op != 0xe8)
234 return -1;
235
236 /* convert to nop */
Matt Helsley3f1df122019-07-31 11:24:12 -0700237 if (ulseek(offset - 1, SEEK_SET) < 0)
238 return -1;
239 if (uwrite(ideal_nop, 5) < 0)
240 return -1;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400241 return 0;
242}
243
Stephen Boyd9648dc12016-10-18 16:42:00 -0700244static unsigned char ideal_nop4_arm_le[4] = { 0x00, 0x00, 0xa0, 0xe1 }; /* mov r0, r0 */
245static unsigned char ideal_nop4_arm_be[4] = { 0xe1, 0xa0, 0x00, 0x00 }; /* mov r0, r0 */
246static unsigned char *ideal_nop4_arm;
247
248static unsigned char bl_mcount_arm_le[4] = { 0xfe, 0xff, 0xff, 0xeb }; /* bl */
249static unsigned char bl_mcount_arm_be[4] = { 0xeb, 0xff, 0xff, 0xfe }; /* bl */
250static unsigned char *bl_mcount_arm;
251
252static unsigned char push_arm_le[4] = { 0x04, 0xe0, 0x2d, 0xe5 }; /* push {lr} */
253static unsigned char push_arm_be[4] = { 0xe5, 0x2d, 0xe0, 0x04 }; /* push {lr} */
254static unsigned char *push_arm;
255
256static unsigned char ideal_nop2_thumb_le[2] = { 0x00, 0xbf }; /* nop */
257static unsigned char ideal_nop2_thumb_be[2] = { 0xbf, 0x00 }; /* nop */
258static unsigned char *ideal_nop2_thumb;
259
260static unsigned char push_bl_mcount_thumb_le[6] = { 0x00, 0xb5, 0xff, 0xf7, 0xfe, 0xff }; /* push {lr}, bl */
261static unsigned char push_bl_mcount_thumb_be[6] = { 0xb5, 0x00, 0xf7, 0xff, 0xff, 0xfe }; /* push {lr}, bl */
262static unsigned char *push_bl_mcount_thumb;
263
264static int make_nop_arm(void *map, size_t const offset)
265{
266 char *ptr;
267 int cnt = 1;
268 int nop_size;
269 size_t off = offset;
270
271 ptr = map + offset;
272 if (memcmp(ptr, bl_mcount_arm, 4) == 0) {
273 if (memcmp(ptr - 4, push_arm, 4) == 0) {
274 off -= 4;
275 cnt = 2;
276 }
277 ideal_nop = ideal_nop4_arm;
278 nop_size = 4;
279 } else if (memcmp(ptr - 2, push_bl_mcount_thumb, 6) == 0) {
280 cnt = 3;
281 nop_size = 2;
282 off -= 2;
283 ideal_nop = ideal_nop2_thumb;
284 } else
285 return -1;
286
287 /* Convert to nop */
Matt Helsley3f1df122019-07-31 11:24:12 -0700288 if (ulseek(off, SEEK_SET) < 0)
289 return -1;
Stephen Boyd9648dc12016-10-18 16:42:00 -0700290
291 do {
Matt Helsley3f1df122019-07-31 11:24:12 -0700292 if (uwrite(ideal_nop, nop_size) < 0)
293 return -1;
Stephen Boyd9648dc12016-10-18 16:42:00 -0700294 } while (--cnt > 0);
295
296 return 0;
297}
298
Li Bin2ee8a742015-10-30 16:31:04 +0800299static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
300static int make_nop_arm64(void *map, size_t const offset)
301{
302 uint32_t *ptr;
303
304 ptr = map + offset;
305 /* bl <_mcount> is 0x94000000 before relocation */
306 if (*ptr != 0x94000000)
307 return -1;
308
309 /* Convert to nop */
Matt Helsley3f1df122019-07-31 11:24:12 -0700310 if (ulseek(offset, SEEK_SET) < 0)
311 return -1;
312 if (uwrite(ideal_nop, 4) < 0)
313 return -1;
Li Bin2ee8a742015-10-30 16:31:04 +0800314 return 0;
315}
316
Matt Helsley3f1df122019-07-31 11:24:12 -0700317static int write_file(const char *fname)
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500318{
319 char tmp_file[strlen(fname) + 4];
320 size_t n;
321
322 if (!file_updated)
Matt Helsley3f1df122019-07-31 11:24:12 -0700323 return 0;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500324
325 sprintf(tmp_file, "%s.rc", fname);
326
327 /*
328 * After reading the entire file into memory, delete it
329 * and write it back, to prevent weird side effects of modifying
330 * an object file in place.
331 */
332 fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode);
333 if (fd_map < 0) {
334 perror(fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700335 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500336 }
337 n = write(fd_map, file_map, sb.st_size);
338 if (n != sb.st_size) {
339 perror("write");
Matt Helsley3f1df122019-07-31 11:24:12 -0700340 close(fd_map);
341 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500342 }
343 if (file_append_size) {
344 n = write(fd_map, file_append, file_append_size);
345 if (n != file_append_size) {
346 perror("write");
Matt Helsley3f1df122019-07-31 11:24:12 -0700347 close(fd_map);
348 return -1;
Russell Kingdd39a262015-12-11 12:09:03 +0000349 }
Russell Kingdd39a262015-12-11 12:09:03 +0000350 }
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500351 close(fd_map);
352 if (rename(tmp_file, fname) < 0) {
353 perror(fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700354 return -1;
Steven Rostedt (Red Hat)a50bd432015-12-15 16:06:10 -0500355 }
Matt Helsley3f1df122019-07-31 11:24:12 -0700356 return 0;
John Reiser81d38582010-10-13 15:12:54 -0400357}
358
359/* w8rev, w8nat, ...: Handle endianness. */
360
361static uint64_t w8rev(uint64_t const x)
362{
363 return ((0xff & (x >> (0 * 8))) << (7 * 8))
364 | ((0xff & (x >> (1 * 8))) << (6 * 8))
365 | ((0xff & (x >> (2 * 8))) << (5 * 8))
366 | ((0xff & (x >> (3 * 8))) << (4 * 8))
367 | ((0xff & (x >> (4 * 8))) << (3 * 8))
368 | ((0xff & (x >> (5 * 8))) << (2 * 8))
369 | ((0xff & (x >> (6 * 8))) << (1 * 8))
370 | ((0xff & (x >> (7 * 8))) << (0 * 8));
371}
372
373static uint32_t w4rev(uint32_t const x)
374{
375 return ((0xff & (x >> (0 * 8))) << (3 * 8))
376 | ((0xff & (x >> (1 * 8))) << (2 * 8))
377 | ((0xff & (x >> (2 * 8))) << (1 * 8))
378 | ((0xff & (x >> (3 * 8))) << (0 * 8));
379}
380
381static uint32_t w2rev(uint16_t const x)
382{
383 return ((0xff & (x >> (0 * 8))) << (1 * 8))
384 | ((0xff & (x >> (1 * 8))) << (0 * 8));
385}
386
387static uint64_t w8nat(uint64_t const x)
388{
389 return x;
390}
391
392static uint32_t w4nat(uint32_t const x)
393{
394 return x;
395}
396
397static uint32_t w2nat(uint16_t const x)
398{
399 return x;
400}
401
402static uint64_t (*w8)(uint64_t);
403static uint32_t (*w)(uint32_t);
404static uint32_t (*w2)(uint16_t);
405
406/* Names of the sections that could contain calls to mcount. */
Matt Helsley3aec8632019-07-31 11:24:13 -0700407static int is_mcounted_section_name(char const *const txtname)
John Reiser81d38582010-10-13 15:12:54 -0400408{
Joe Lawrence9c8e2f62018-11-20 15:19:18 -0500409 return strncmp(".text", txtname, 5) == 0 ||
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -0500410 strcmp(".init.text", txtname) == 0 ||
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400411 strcmp(".ref.text", txtname) == 0 ||
412 strcmp(".sched.text", txtname) == 0 ||
413 strcmp(".spinlock.text", txtname) == 0 ||
414 strcmp(".irqentry.text", txtname) == 0 ||
Dmitry Vyukove436fd62016-09-28 15:22:36 -0700415 strcmp(".softirqentry.text", txtname) == 0 ||
Steven Rostedt9f087e72011-04-06 14:10:22 -0400416 strcmp(".kprobes.text", txtname) == 0 ||
Matt Helsley1bd95be2019-07-24 14:04:55 -0700417 strcmp(".cpuidle.text", txtname) == 0;
John Reiser81d38582010-10-13 15:12:54 -0400418}
419
Matt Helsley3f1df122019-07-31 11:24:12 -0700420static char const *already_has_rel_mcount = "success"; /* our work here is done! */
421
Steven Rostedtc28d5072010-10-13 19:06:14 -0400422/* 32 bit and 64 bit are very similar */
423#include "recordmcount.h"
424#define RECORD_MCOUNT_64
425#include "recordmcount.h"
John Reiser81d38582010-10-13 15:12:54 -0400426
Alex Sverdlin927d7802020-01-08 15:57:47 +0100427static int arm_is_fake_mcount(Elf32_Rel const *rp)
428{
429 switch (ELF32_R_TYPE(w(rp->r_info))) {
430 case R_ARM_THM_CALL:
431 case R_ARM_CALL:
432 case R_ARM_PC24:
433 return 0;
434 }
435
436 return 1;
437}
438
Gregory Herreroea0eada2020-07-17 16:33:38 +0200439static int arm64_is_fake_mcount(Elf64_Rel const *rp)
440{
Chen Jun999340d2021-02-22 13:58:40 +0000441 return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26;
Gregory Herreroea0eada2020-07-17 16:33:38 +0200442}
443
John Reisera2d493582010-10-27 18:59:07 +0800444/* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
445 * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
446 * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
447 * to imply the order of the members; the spec does not say so.
448 * typedef unsigned char Elf64_Byte;
449 * fails on MIPS64 because their <elf.h> already has it!
450 */
451
452typedef uint8_t myElf64_Byte; /* Type for a 8-bit quantity. */
453
454union mips_r_info {
455 Elf64_Xword r_info;
456 struct {
457 Elf64_Word r_sym; /* Symbol index. */
458 myElf64_Byte r_ssym; /* Special symbol. */
459 myElf64_Byte r_type3; /* Third relocation. */
460 myElf64_Byte r_type2; /* Second relocation. */
461 myElf64_Byte r_type; /* First relocation. */
462 } r_mips;
463};
464
465static uint64_t MIPS64_r_sym(Elf64_Rel const *rp)
466{
467 return w(((union mips_r_info){ .r_info = rp->r_info }).r_mips.r_sym);
468}
469
470static void MIPS64_r_info(Elf64_Rel *const rp, unsigned sym, unsigned type)
471{
472 rp->r_info = ((union mips_r_info){
473 .r_mips = { .r_sym = w(sym), .r_type = type }
474 }).r_info;
475}
476
Matt Helsley3aec8632019-07-31 11:24:13 -0700477static int do_file(char const *const fname)
John Reiser81d38582010-10-13 15:12:54 -0400478{
John Reiser81d38582010-10-13 15:12:54 -0400479 unsigned int reltype = 0;
Matt Helsley4fbcf072019-07-31 11:24:16 -0700480 Elf32_Ehdr *ehdr;
Matt Helsley3f1df122019-07-31 11:24:12 -0700481 int rc = -1;
482
Matt Helsley4fbcf072019-07-31 11:24:16 -0700483 ehdr = mmap_file(fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700484 if (!ehdr)
485 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400486
John Reiser81d38582010-10-13 15:12:54 -0400487 w = w4nat;
488 w2 = w2nat;
489 w8 = w8nat;
490 switch (ehdr->e_ident[EI_DATA]) {
491 static unsigned int const endian = 1;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400492 default:
John Reiser81d38582010-10-13 15:12:54 -0400493 fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
494 ehdr->e_ident[EI_DATA], fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700495 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400496 case ELFDATA2LSB:
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400497 if (*(unsigned char const *)&endian != 1) {
John Reiser81d38582010-10-13 15:12:54 -0400498 /* main() is big endian, file.o is little endian. */
499 w = w4rev;
500 w2 = w2rev;
501 w8 = w8rev;
502 }
Stephen Boyd9648dc12016-10-18 16:42:00 -0700503 ideal_nop4_arm = ideal_nop4_arm_le;
504 bl_mcount_arm = bl_mcount_arm_le;
505 push_arm = push_arm_le;
506 ideal_nop2_thumb = ideal_nop2_thumb_le;
507 push_bl_mcount_thumb = push_bl_mcount_thumb_le;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400508 break;
509 case ELFDATA2MSB:
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400510 if (*(unsigned char const *)&endian != 0) {
John Reiser81d38582010-10-13 15:12:54 -0400511 /* main() is little endian, file.o is big endian. */
512 w = w4rev;
513 w2 = w2rev;
514 w8 = w8rev;
515 }
Stephen Boyd9648dc12016-10-18 16:42:00 -0700516 ideal_nop4_arm = ideal_nop4_arm_be;
517 bl_mcount_arm = bl_mcount_arm_be;
518 push_arm = push_arm_be;
519 ideal_nop2_thumb = ideal_nop2_thumb_be;
520 push_bl_mcount_thumb = push_bl_mcount_thumb_be;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400521 break;
John Reiser81d38582010-10-13 15:12:54 -0400522 } /* end switch */
Matt Helsley2e631522019-07-31 11:24:14 -0700523 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
524 w2(ehdr->e_type) != ET_REL ||
525 ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
John Reiser81d38582010-10-13 15:12:54 -0400526 fprintf(stderr, "unrecognized ET_REL file %s\n", fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700527 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400528 }
529
Matt Helsley2e631522019-07-31 11:24:14 -0700530 gpfx = '_';
John Reiser81d38582010-10-13 15:12:54 -0400531 switch (w2(ehdr->e_machine)) {
Steven Rostedte90b0c82011-04-06 13:32:24 -0400532 default:
nixiaomingac5db1f2018-05-24 11:16:12 +0800533 fprintf(stderr, "unrecognized e_machine %u %s\n",
John Reiser81d38582010-10-13 15:12:54 -0400534 w2(ehdr->e_machine), fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700535 goto out;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400536 case EM_386:
537 reltype = R_386_32;
Li Bin46a2b612015-10-28 16:23:26 +0800538 rel_type_nop = R_386_NONE;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400539 make_nop = make_nop_x86;
540 ideal_nop = ideal_nop5_x86_32;
Martin Schwidefsky521ccb52011-05-10 10:10:41 +0200541 mcount_adjust_32 = -1;
Matt Helsley2e631522019-07-31 11:24:14 -0700542 gpfx = 0;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400543 break;
Matt Helsley2e631522019-07-31 11:24:14 -0700544 case EM_ARM:
545 reltype = R_ARM_ABS32;
546 altmcount = "__gnu_mcount_nc";
547 make_nop = make_nop_arm;
548 rel_type_nop = R_ARM_NONE;
Alex Sverdlin927d7802020-01-08 15:57:47 +0100549 is_fake_mcount32 = arm_is_fake_mcount;
Matt Helsley2e631522019-07-31 11:24:14 -0700550 gpfx = 0;
551 break;
AKASHI Takahiroaf64d2a2014-04-30 10:54:32 +0100552 case EM_AARCH64:
Matt Helsley2e631522019-07-31 11:24:14 -0700553 reltype = R_AARCH64_ABS64;
554 make_nop = make_nop_arm64;
555 rel_type_nop = R_AARCH64_NONE;
556 ideal_nop = ideal_nop4_arm64;
Gregory Herreroea0eada2020-07-17 16:33:38 +0200557 is_fake_mcount64 = arm64_is_fake_mcount;
Matt Helsley2e631522019-07-31 11:24:14 -0700558 break;
559 case EM_IA_64: reltype = R_IA64_IMM64; break;
560 case EM_MIPS: /* reltype: e_class */ break;
561 case EM_PPC: reltype = R_PPC_ADDR32; break;
562 case EM_PPC64: reltype = R_PPC64_ADDR64; break;
563 case EM_S390: /* reltype: e_class */ break;
564 case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break;
565 case EM_SPARCV9: reltype = R_SPARC_64; break;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400566 case EM_X86_64:
567 make_nop = make_nop_x86;
568 ideal_nop = ideal_nop5_x86_64;
569 reltype = R_X86_64_64;
Li Bin46a2b612015-10-28 16:23:26 +0800570 rel_type_nop = R_X86_64_NONE;
Martin Schwidefsky521ccb52011-05-10 10:10:41 +0200571 mcount_adjust_64 = -1;
Matt Helsley2e631522019-07-31 11:24:14 -0700572 gpfx = 0;
Steven Rostedtffd618f2011-04-08 03:58:48 -0400573 break;
John Reiser81d38582010-10-13 15:12:54 -0400574 } /* end switch */
575
576 switch (ehdr->e_ident[EI_CLASS]) {
Steven Rostedte90b0c82011-04-06 13:32:24 -0400577 default:
John Reiser81d38582010-10-13 15:12:54 -0400578 fprintf(stderr, "unrecognized ELF class %d %s\n",
579 ehdr->e_ident[EI_CLASS], fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700580 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400581 case ELFCLASS32:
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400582 if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
583 || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
John Reiser81d38582010-10-13 15:12:54 -0400584 fprintf(stderr,
585 "unrecognized ET_REL file: %s\n", fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700586 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400587 }
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400588 if (w2(ehdr->e_machine) == EM_MIPS) {
John Reisera2d493582010-10-27 18:59:07 +0800589 reltype = R_MIPS_32;
Wu Zhangjin412910c2010-10-27 18:59:08 +0800590 is_fake_mcount32 = MIPS32_is_fake_mcount;
591 }
Matt Helsley3f1df122019-07-31 11:24:12 -0700592 if (do32(ehdr, fname, reltype) < 0)
593 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400594 break;
John Reiser81d38582010-10-13 15:12:54 -0400595 case ELFCLASS64: {
596 Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400597 if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
598 || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
John Reiser81d38582010-10-13 15:12:54 -0400599 fprintf(stderr,
600 "unrecognized ET_REL file: %s\n", fname);
Matt Helsley3f1df122019-07-31 11:24:12 -0700601 goto out;
John Reiser81d38582010-10-13 15:12:54 -0400602 }
Martin Schwidefskyf2963882011-05-10 10:10:43 +0200603 if (w2(ghdr->e_machine) == EM_S390) {
John Reiser81d38582010-10-13 15:12:54 -0400604 reltype = R_390_64;
Heiko Carstensc9331462014-10-15 12:17:38 +0200605 mcount_adjust_64 = -14;
Martin Schwidefskyf2963882011-05-10 10:10:43 +0200606 }
Steven Rostedtdd5477f2011-04-06 13:21:17 -0400607 if (w2(ghdr->e_machine) == EM_MIPS) {
John Reisera2d493582010-10-27 18:59:07 +0800608 reltype = R_MIPS_64;
609 Elf64_r_sym = MIPS64_r_sym;
610 Elf64_r_info = MIPS64_r_info;
Wu Zhangjin412910c2010-10-27 18:59:08 +0800611 is_fake_mcount64 = MIPS64_is_fake_mcount;
John Reisera2d493582010-10-27 18:59:07 +0800612 }
Matt Helsley3f1df122019-07-31 11:24:12 -0700613 if (do64(ghdr, fname, reltype) < 0)
614 goto out;
Steven Rostedte90b0c82011-04-06 13:32:24 -0400615 break;
616 }
John Reiser81d38582010-10-13 15:12:54 -0400617 } /* end switch */
618
Matt Helsley3f1df122019-07-31 11:24:12 -0700619 rc = write_file(fname);
620out:
Matt Helsley4fbcf072019-07-31 11:24:16 -0700621 file_append_cleanup();
622 mmap_cleanup();
Matt Helsley3f1df122019-07-31 11:24:12 -0700623 return rc;
John Reiser81d38582010-10-13 15:12:54 -0400624}
625
Matt Helsley3aec8632019-07-31 11:24:13 -0700626int main(int argc, char *argv[])
John Reiser81d38582010-10-13 15:12:54 -0400627{
Rabin Vincentcd3478f2010-11-30 17:33:53 +0100628 const char ftrace[] = "/ftrace.o";
Steven Rostedt44475862010-10-15 11:49:47 -0400629 int ftrace_size = sizeof(ftrace) - 1;
John Reiser81d38582010-10-13 15:12:54 -0400630 int n_error = 0; /* gcc-4.3.0 false positive complaint */
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400631 int c;
632 int i;
Steven Rostedt44475862010-10-15 11:49:47 -0400633
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400634 while ((c = getopt(argc, argv, "w")) >= 0) {
635 switch (c) {
636 case 'w':
637 warn_on_notrace_sect = 1;
638 break;
639 default:
640 fprintf(stderr, "usage: recordmcount [-w] file.o...\n");
641 return 0;
642 }
643 }
644
645 if ((argc - optind) < 1) {
646 fprintf(stderr, "usage: recordmcount [-w] file.o...\n");
Steven Rostedt44475862010-10-15 11:49:47 -0400647 return 0;
648 }
649
650 /* Process each file in turn, allowing deep failure. */
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400651 for (i = optind; i < argc; i++) {
652 char *file = argv[i];
Steven Rostedt44475862010-10-15 11:49:47 -0400653 int len;
654
655 /*
656 * The file kernel/trace/ftrace.o references the mcount
657 * function but does not call it. Since ftrace.o should
658 * not be traced anyway, we just skip it.
659 */
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400660 len = strlen(file);
Steven Rostedt44475862010-10-15 11:49:47 -0400661 if (len >= ftrace_size &&
Steven Rostedtdfad3d52011-04-12 18:53:25 -0400662 strcmp(file + (len - ftrace_size), ftrace) == 0)
Steven Rostedt44475862010-10-15 11:49:47 -0400663 continue;
664
Matt Helsley3f1df122019-07-31 11:24:12 -0700665 if (do_file(file)) {
Colin Ian King713a3e42015-12-30 23:06:41 +0000666 fprintf(stderr, "%s: failed\n", file);
John Reiser81d38582010-10-13 15:12:54 -0400667 ++n_error;
Matt Helsley3f1df122019-07-31 11:24:12 -0700668 }
John Reiser81d38582010-10-13 15:12:54 -0400669 }
670 return !!n_error;
671}