blob: 6ee4fa882919c61e7847da79f16708c88a8898e2 [file] [log] [blame]
Thomas Gleixner4317cf952019-05-31 01:09:38 -07001// SPDX-License-Identifier: GPL-2.0-only
David Daneya79f2482012-04-19 14:59:55 -07002/*
Shile Zhang10916702019-12-04 08:46:31 +08003 * sorttable.c: Sort the kernel's table
David Daneya79f2482012-04-19 14:59:55 -07004 *
Shile Zhang57fa1892019-12-04 08:46:32 +08005 * Added ORC unwind tables sort support and other updates:
6 * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
7 * Shile Zhang <shile.zhang@linux.alibaba.com>
8 *
David Daneyd59a1682012-04-24 11:23:14 -07009 * Copyright 2011 - 2012 Cavium, Inc.
David Daneya79f2482012-04-19 14:59:55 -070010 *
11 * Based on code taken from recortmcount.c which is:
12 *
13 * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
David Daneya79f2482012-04-19 14:59:55 -070014 *
15 * Restructured to fit Linux format, as well as other updates:
Shile Zhang57fa1892019-12-04 08:46:32 +080016 * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
David Daneya79f2482012-04-19 14:59:55 -070017 */
18
19/*
20 * Strategy: alter the vmlinux file in-place.
21 */
22
23#include <sys/types.h>
24#include <sys/mman.h>
25#include <sys/stat.h>
26#include <getopt.h>
27#include <elf.h>
28#include <fcntl.h>
David Daneya79f2482012-04-19 14:59:55 -070029#include <stdio.h>
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
33
David Daneyd59a1682012-04-24 11:23:14 -070034#include <tools/be_byteshift.h>
35#include <tools/le_byteshift.h>
36
Vineet Guptaf06d19e2013-11-15 12:08:05 +053037#ifndef EM_ARCOMPACT
38#define EM_ARCOMPACT 93
39#endif
40
Max Filippov25df8192014-02-18 15:29:11 +040041#ifndef EM_XTENSA
42#define EM_XTENSA 94
43#endif
44
Will Deaconadace892013-05-08 17:29:24 +010045#ifndef EM_AARCH64
46#define EM_AARCH64 183
47#endif
48
Michal Simek372c7202014-01-23 15:52:46 -080049#ifndef EM_MICROBLAZE
50#define EM_MICROBLAZE 189
51#endif
52
Vineet Guptab3210d142013-11-22 13:05:58 +053053#ifndef EM_ARCV2
54#define EM_ARCV2 195
55#endif
56
Miles Chend09c3872021-09-24 15:43:38 -070057#ifndef EM_RISCV
58#define EM_RISCV 243
59#endif
60
Shile Zhang6402e142019-12-04 08:46:28 +080061static uint32_t (*r)(const uint32_t *);
62static uint16_t (*r2)(const uint16_t *);
63static uint64_t (*r8)(const uint64_t *);
64static void (*w)(uint32_t, uint32_t *);
65static void (*w2)(uint16_t, uint16_t *);
66static void (*w8)(uint64_t, uint64_t *);
67typedef void (*table_sort_t)(char *, int);
68
David Daneya79f2482012-04-19 14:59:55 -070069/*
70 * Get the whole file as a programming convenience in order to avoid
71 * malloc+lseek+read+free of many pieces. If successful, then mmap
72 * avoids copying unused pieces; else just read the whole file.
73 * Open for both read and write.
74 */
Shile Zhang3c47b782019-12-04 08:46:27 +080075static void *mmap_file(char const *fname, size_t *size)
David Daneya79f2482012-04-19 14:59:55 -070076{
Shile Zhang3c47b782019-12-04 08:46:27 +080077 int fd;
78 struct stat sb;
79 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -070080
Shile Zhang3c47b782019-12-04 08:46:27 +080081 fd = open(fname, O_RDWR);
82 if (fd < 0) {
David Daneya79f2482012-04-19 14:59:55 -070083 perror(fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080084 return NULL;
85 }
86 if (fstat(fd, &sb) < 0) {
87 perror(fname);
88 goto out;
David Daneya79f2482012-04-19 14:59:55 -070089 }
90 if (!S_ISREG(sb.st_mode)) {
91 fprintf(stderr, "not a regular file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080092 goto out;
David Daneya79f2482012-04-19 14:59:55 -070093 }
Shile Zhang6402e142019-12-04 08:46:28 +080094
Shile Zhang3c47b782019-12-04 08:46:27 +080095 addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
David Daneya79f2482012-04-19 14:59:55 -070096 if (addr == MAP_FAILED) {
David Daneya79f2482012-04-19 14:59:55 -070097 fprintf(stderr, "Could not mmap file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080098 goto out;
David Daneya79f2482012-04-19 14:59:55 -070099 }
Shile Zhang3c47b782019-12-04 08:46:27 +0800100
101 *size = sb.st_size;
102
103out:
104 close(fd);
David Daneya79f2482012-04-19 14:59:55 -0700105 return addr;
106}
107
David Daneyd59a1682012-04-24 11:23:14 -0700108static uint32_t rbe(const uint32_t *x)
109{
110 return get_unaligned_be32(x);
111}
Shile Zhang6402e142019-12-04 08:46:28 +0800112
David Daneyd59a1682012-04-24 11:23:14 -0700113static uint16_t r2be(const uint16_t *x)
114{
115 return get_unaligned_be16(x);
116}
Shile Zhang6402e142019-12-04 08:46:28 +0800117
118static uint64_t r8be(const uint64_t *x)
David Daneyd59a1682012-04-24 11:23:14 -0700119{
Shile Zhang6402e142019-12-04 08:46:28 +0800120 return get_unaligned_be64(x);
David Daneyd59a1682012-04-24 11:23:14 -0700121}
Shile Zhang6402e142019-12-04 08:46:28 +0800122
David Daneyd59a1682012-04-24 11:23:14 -0700123static uint32_t rle(const uint32_t *x)
124{
125 return get_unaligned_le32(x);
126}
Shile Zhang6402e142019-12-04 08:46:28 +0800127
David Daneyd59a1682012-04-24 11:23:14 -0700128static uint16_t r2le(const uint16_t *x)
129{
130 return get_unaligned_le16(x);
David Daneya79f2482012-04-19 14:59:55 -0700131}
132
Shile Zhang6402e142019-12-04 08:46:28 +0800133static uint64_t r8le(const uint64_t *x)
134{
135 return get_unaligned_le64(x);
136}
137
138static void wbe(uint32_t val, uint32_t *x)
139{
140 put_unaligned_be32(val, x);
141}
142
143static void w2be(uint16_t val, uint16_t *x)
144{
145 put_unaligned_be16(val, x);
146}
147
David Daneyd59a1682012-04-24 11:23:14 -0700148static void w8be(uint64_t val, uint64_t *x)
David Daneya79f2482012-04-19 14:59:55 -0700149{
David Daneyd59a1682012-04-24 11:23:14 -0700150 put_unaligned_be64(val, x);
151}
Shile Zhang6402e142019-12-04 08:46:28 +0800152
David Daneyd59a1682012-04-24 11:23:14 -0700153static void wle(uint32_t val, uint32_t *x)
154{
155 put_unaligned_le32(val, x);
156}
Shile Zhang6402e142019-12-04 08:46:28 +0800157
David Daneyd59a1682012-04-24 11:23:14 -0700158static void w2le(uint16_t val, uint16_t *x)
159{
160 put_unaligned_le16(val, x);
David Daneya79f2482012-04-19 14:59:55 -0700161}
162
Shile Zhang6402e142019-12-04 08:46:28 +0800163static void w8le(uint64_t val, uint64_t *x)
164{
165 put_unaligned_le64(val, x);
166}
David Daneya79f2482012-04-19 14:59:55 -0700167
Jamie Iles59c36452013-11-12 15:06:51 -0800168/*
169 * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
170 * the way to -256..-1, to avoid conflicting with real section
171 * indices.
172 */
173#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
174
175static inline int is_shndx_special(unsigned int i)
176{
177 return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
178}
179
180/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
181static inline unsigned int get_secindex(unsigned int shndx,
182 unsigned int sym_offs,
183 const Elf32_Word *symtab_shndx_start)
184{
185 if (is_shndx_special(shndx))
186 return SPECIAL(shndx);
187 if (shndx != SHN_XINDEX)
188 return shndx;
189 return r(&symtab_shndx_start[sym_offs]);
190}
191
David Daneya79f2482012-04-19 14:59:55 -0700192/* 32 bit and 64 bit are very similar */
Shile Zhang10916702019-12-04 08:46:31 +0800193#include "sorttable.h"
194#define SORTTABLE_64
195#include "sorttable.h"
David Daneya79f2482012-04-19 14:59:55 -0700196
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200197static int compare_relative_table(const void *a, const void *b)
David Daneyd59a1682012-04-24 11:23:14 -0700198{
199 int32_t av = (int32_t)r(a);
200 int32_t bv = (int32_t)r(b);
201
202 if (av < bv)
203 return -1;
204 if (av > bv)
205 return 1;
206 return 0;
207}
208
Shile Zhang6402e142019-12-04 08:46:28 +0800209static void sort_relative_table(char *extab_image, int image_size)
210{
211 int i = 0;
212
213 /*
214 * Do the same thing the runtime sort does, first normalize to
215 * being relative to the start of the section.
216 */
217 while (i < image_size) {
218 uint32_t *loc = (uint32_t *)(extab_image + i);
219 w(r(loc) + i, loc);
220 i += 4;
221 }
222
223 qsort(extab_image, image_size / 8, 8, compare_relative_table);
224
225 /* Now denormalize. */
226 i = 0;
227 while (i < image_size) {
228 uint32_t *loc = (uint32_t *)(extab_image + i);
229 w(r(loc) - i, loc);
230 i += 4;
231 }
232}
233
Tony Luck548acf12016-02-17 10:20:12 -0800234static void x86_sort_relative_table(char *extab_image, int image_size)
235{
Shile Zhang6402e142019-12-04 08:46:28 +0800236 int i = 0;
Tony Luck548acf12016-02-17 10:20:12 -0800237
Tony Luck548acf12016-02-17 10:20:12 -0800238 while (i < image_size) {
239 uint32_t *loc = (uint32_t *)(extab_image + i);
240
241 w(r(loc) + i, loc);
242 w(r(loc + 1) + i + 4, loc + 1);
243 w(r(loc + 2) + i + 8, loc + 2);
244
245 i += sizeof(uint32_t) * 3;
246 }
247
248 qsort(extab_image, image_size / 12, 12, compare_relative_table);
249
250 i = 0;
251 while (i < image_size) {
252 uint32_t *loc = (uint32_t *)(extab_image + i);
253
254 w(r(loc) - i, loc);
255 w(r(loc + 1) - (i + 4), loc + 1);
256 w(r(loc + 2) - (i + 8), loc + 2);
257
258 i += sizeof(uint32_t) * 3;
259 }
260}
261
Ilya Leoshkevich05a68e82020-06-30 20:52:03 +0200262static void s390_sort_relative_table(char *extab_image, int image_size)
263{
264 int i;
265
266 for (i = 0; i < image_size; i += 16) {
267 char *loc = extab_image + i;
268 uint64_t handler;
269
270 w(r((uint32_t *)loc) + i, (uint32_t *)loc);
271 w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4));
272 /*
273 * 0 is a special self-relative handler value, which means that
274 * handler should be ignored. It is safe, because it means that
275 * handler field points to itself, which should never happen.
276 * When creating extable-relative values, keep it as 0, since
277 * this should never occur either: it would mean that handler
278 * field points to the first extable entry.
279 */
280 handler = r8((uint64_t *)(loc + 8));
281 if (handler)
282 handler += i + 8;
283 w8(handler, (uint64_t *)(loc + 8));
284 }
285
286 qsort(extab_image, image_size / 16, 16, compare_relative_table);
287
288 for (i = 0; i < image_size; i += 16) {
289 char *loc = extab_image + i;
290 uint64_t handler;
291
292 w(r((uint32_t *)loc) - i, (uint32_t *)loc);
293 w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4));
294 handler = r8((uint64_t *)(loc + 8));
295 if (handler)
296 handler -= i + 8;
297 w8(handler, (uint64_t *)(loc + 8));
298 }
299}
300
Shile Zhang6402e142019-12-04 08:46:28 +0800301static int do_file(char const *const fname, void *addr)
David Daneyd59a1682012-04-24 11:23:14 -0700302{
Shile Zhang3c47b782019-12-04 08:46:27 +0800303 int rc = -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800304 Elf32_Ehdr *ehdr = addr;
305 table_sort_t custom_sort = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700306
David Daneya79f2482012-04-19 14:59:55 -0700307 switch (ehdr->e_ident[EI_DATA]) {
Shile Zhang6402e142019-12-04 08:46:28 +0800308 case ELFDATA2LSB:
309 r = rle;
310 r2 = r2le;
311 r8 = r8le;
312 w = wle;
313 w2 = w2le;
314 w8 = w8le;
315 break;
316 case ELFDATA2MSB:
317 r = rbe;
318 r2 = r2be;
319 r8 = r8be;
320 w = wbe;
321 w2 = w2be;
322 w8 = w8be;
323 break;
David Daneya79f2482012-04-19 14:59:55 -0700324 default:
325 fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
326 ehdr->e_ident[EI_DATA], fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800327 return -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800328 }
329
330 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
331 (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
332 ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100333 fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800334 return -1;
David Daneya79f2482012-04-19 14:59:55 -0700335 }
336
David Daneyd59a1682012-04-24 11:23:14 -0700337 switch (r2(&ehdr->e_machine)) {
David Daneya79f2482012-04-19 14:59:55 -0700338 case EM_386:
David Daneya79f2482012-04-19 14:59:55 -0700339 case EM_X86_64:
Tony Luck548acf12016-02-17 10:20:12 -0800340 custom_sort = x86_sort_relative_table;
341 break;
Heiko Carstens3193a982012-07-24 14:51:34 +0200342 case EM_S390:
Ilya Leoshkevich05a68e82020-06-30 20:52:03 +0200343 custom_sort = s390_sort_relative_table;
344 break;
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100345 case EM_AARCH64:
Helge Deller0de79852016-03-23 16:00:46 +0100346 case EM_PARISC:
Nicholas Piggin5b9ff022016-10-13 16:42:55 +1100347 case EM_PPC:
348 case EM_PPC64:
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200349 custom_sort = sort_relative_table;
350 break;
Vineet Guptaf06d19e2013-11-15 12:08:05 +0530351 case EM_ARCOMPACT:
Vineet Guptab3210d142013-11-22 13:05:58 +0530352 case EM_ARCV2:
Stephen Boydee951c62012-10-29 19:19:34 +0100353 case EM_ARM:
Michal Simek372c7202014-01-23 15:52:46 -0800354 case EM_MICROBLAZE:
David Daneyd59a1682012-04-24 11:23:14 -0700355 case EM_MIPS:
Jisheng Zhang54fed352021-08-26 22:10:29 +0800356 case EM_RISCV:
Max Filippov25df8192014-02-18 15:29:11 +0400357 case EM_XTENSA:
David Daneya79f2482012-04-19 14:59:55 -0700358 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800359 default:
360 fprintf(stderr, "unrecognized e_machine %d %s\n",
361 r2(&ehdr->e_machine), fname);
362 return -1;
363 }
David Daneya79f2482012-04-19 14:59:55 -0700364
365 switch (ehdr->e_ident[EI_CLASS]) {
David Daneya79f2482012-04-19 14:59:55 -0700366 case ELFCLASS32:
Shile Zhang6402e142019-12-04 08:46:28 +0800367 if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
368 r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700369 fprintf(stderr,
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100370 "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800371 break;
David Daneya79f2482012-04-19 14:59:55 -0700372 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800373 rc = do_sort_32(ehdr, fname, custom_sort);
David Daneya79f2482012-04-19 14:59:55 -0700374 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800375 case ELFCLASS64:
376 {
David Daneya79f2482012-04-19 14:59:55 -0700377 Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
Shile Zhang6402e142019-12-04 08:46:28 +0800378 if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
379 r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700380 fprintf(stderr,
Shile Zhang6402e142019-12-04 08:46:28 +0800381 "unrecognized ET_EXEC/ET_DYN file: %s\n",
382 fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800383 break;
David Daneya79f2482012-04-19 14:59:55 -0700384 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800385 rc = do_sort_64(ghdr, fname, custom_sort);
Shile Zhang6402e142019-12-04 08:46:28 +0800386 }
387 break;
388 default:
389 fprintf(stderr, "unrecognized ELF class %d %s\n",
390 ehdr->e_ident[EI_CLASS], fname);
David Daneya79f2482012-04-19 14:59:55 -0700391 break;
392 }
David Daneya79f2482012-04-19 14:59:55 -0700393
Shile Zhang3c47b782019-12-04 08:46:27 +0800394 return rc;
David Daneya79f2482012-04-19 14:59:55 -0700395}
396
Shile Zhang6402e142019-12-04 08:46:28 +0800397int main(int argc, char *argv[])
David Daneya79f2482012-04-19 14:59:55 -0700398{
Shile Zhang3c47b782019-12-04 08:46:27 +0800399 int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
400 size_t size = 0;
401 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700402
403 if (argc < 2) {
Shile Zhang10916702019-12-04 08:46:31 +0800404 fprintf(stderr, "usage: sorttable vmlinux...\n");
David Daneya79f2482012-04-19 14:59:55 -0700405 return 0;
406 }
407
408 /* Process each file in turn, allowing deep failure. */
409 for (i = 1; i < argc; i++) {
Shile Zhang3c47b782019-12-04 08:46:27 +0800410 addr = mmap_file(argv[i], &size);
411 if (!addr) {
David Daneya79f2482012-04-19 14:59:55 -0700412 ++n_error;
Shile Zhang3c47b782019-12-04 08:46:27 +0800413 continue;
414 }
415
416 if (do_file(argv[i], addr))
417 ++n_error;
418
419 munmap(addr, size);
David Daneya79f2482012-04-19 14:59:55 -0700420 }
Shile Zhang6402e142019-12-04 08:46:28 +0800421
David Daneya79f2482012-04-19 14:59:55 -0700422 return !!n_error;
423}