blob: b7c2ad71f9cfab5f540027257165f94762855df7 [file] [log] [blame]
Thomas Gleixner4317cf952019-05-31 01:09:38 -07001// SPDX-License-Identifier: GPL-2.0-only
David Daneya79f2482012-04-19 14:59:55 -07002/*
Shile Zhang10916702019-12-04 08:46:31 +08003 * sorttable.c: Sort the kernel's table
David Daneya79f2482012-04-19 14:59:55 -07004 *
Shile Zhang57fa1892019-12-04 08:46:32 +08005 * Added ORC unwind tables sort support and other updates:
6 * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
7 * Shile Zhang <shile.zhang@linux.alibaba.com>
8 *
David Daneyd59a1682012-04-24 11:23:14 -07009 * Copyright 2011 - 2012 Cavium, Inc.
David Daneya79f2482012-04-19 14:59:55 -070010 *
11 * Based on code taken from recortmcount.c which is:
12 *
13 * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
David Daneya79f2482012-04-19 14:59:55 -070014 *
15 * Restructured to fit Linux format, as well as other updates:
Shile Zhang57fa1892019-12-04 08:46:32 +080016 * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
David Daneya79f2482012-04-19 14:59:55 -070017 */
18
19/*
20 * Strategy: alter the vmlinux file in-place.
21 */
22
23#include <sys/types.h>
24#include <sys/mman.h>
25#include <sys/stat.h>
26#include <getopt.h>
27#include <elf.h>
28#include <fcntl.h>
David Daneya79f2482012-04-19 14:59:55 -070029#include <stdio.h>
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
33
David Daneyd59a1682012-04-24 11:23:14 -070034#include <tools/be_byteshift.h>
35#include <tools/le_byteshift.h>
36
Vineet Guptaf06d19e2013-11-15 12:08:05 +053037#ifndef EM_ARCOMPACT
38#define EM_ARCOMPACT 93
39#endif
40
Max Filippov25df8192014-02-18 15:29:11 +040041#ifndef EM_XTENSA
42#define EM_XTENSA 94
43#endif
44
Will Deaconadace892013-05-08 17:29:24 +010045#ifndef EM_AARCH64
46#define EM_AARCH64 183
47#endif
48
Michal Simek372c7202014-01-23 15:52:46 -080049#ifndef EM_MICROBLAZE
50#define EM_MICROBLAZE 189
51#endif
52
Vineet Guptab3210d142013-11-22 13:05:58 +053053#ifndef EM_ARCV2
54#define EM_ARCV2 195
55#endif
56
Miles Chend09c3872021-09-24 15:43:38 -070057#ifndef EM_RISCV
58#define EM_RISCV 243
59#endif
60
Shile Zhang6402e142019-12-04 08:46:28 +080061static uint32_t (*r)(const uint32_t *);
62static uint16_t (*r2)(const uint16_t *);
63static uint64_t (*r8)(const uint64_t *);
64static void (*w)(uint32_t, uint32_t *);
65static void (*w2)(uint16_t, uint16_t *);
66static void (*w8)(uint64_t, uint64_t *);
67typedef void (*table_sort_t)(char *, int);
68
David Daneya79f2482012-04-19 14:59:55 -070069/*
70 * Get the whole file as a programming convenience in order to avoid
71 * malloc+lseek+read+free of many pieces. If successful, then mmap
72 * avoids copying unused pieces; else just read the whole file.
73 * Open for both read and write.
74 */
Shile Zhang3c47b782019-12-04 08:46:27 +080075static void *mmap_file(char const *fname, size_t *size)
David Daneya79f2482012-04-19 14:59:55 -070076{
Shile Zhang3c47b782019-12-04 08:46:27 +080077 int fd;
78 struct stat sb;
79 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -070080
Shile Zhang3c47b782019-12-04 08:46:27 +080081 fd = open(fname, O_RDWR);
82 if (fd < 0) {
David Daneya79f2482012-04-19 14:59:55 -070083 perror(fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080084 return NULL;
85 }
86 if (fstat(fd, &sb) < 0) {
87 perror(fname);
88 goto out;
David Daneya79f2482012-04-19 14:59:55 -070089 }
90 if (!S_ISREG(sb.st_mode)) {
91 fprintf(stderr, "not a regular file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080092 goto out;
David Daneya79f2482012-04-19 14:59:55 -070093 }
Shile Zhang6402e142019-12-04 08:46:28 +080094
Shile Zhang3c47b782019-12-04 08:46:27 +080095 addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
David Daneya79f2482012-04-19 14:59:55 -070096 if (addr == MAP_FAILED) {
David Daneya79f2482012-04-19 14:59:55 -070097 fprintf(stderr, "Could not mmap file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080098 goto out;
David Daneya79f2482012-04-19 14:59:55 -070099 }
Shile Zhang3c47b782019-12-04 08:46:27 +0800100
101 *size = sb.st_size;
102
103out:
104 close(fd);
David Daneya79f2482012-04-19 14:59:55 -0700105 return addr;
106}
107
David Daneyd59a1682012-04-24 11:23:14 -0700108static uint32_t rbe(const uint32_t *x)
109{
110 return get_unaligned_be32(x);
111}
Shile Zhang6402e142019-12-04 08:46:28 +0800112
David Daneyd59a1682012-04-24 11:23:14 -0700113static uint16_t r2be(const uint16_t *x)
114{
115 return get_unaligned_be16(x);
116}
Shile Zhang6402e142019-12-04 08:46:28 +0800117
118static uint64_t r8be(const uint64_t *x)
David Daneyd59a1682012-04-24 11:23:14 -0700119{
Shile Zhang6402e142019-12-04 08:46:28 +0800120 return get_unaligned_be64(x);
David Daneyd59a1682012-04-24 11:23:14 -0700121}
Shile Zhang6402e142019-12-04 08:46:28 +0800122
David Daneyd59a1682012-04-24 11:23:14 -0700123static uint32_t rle(const uint32_t *x)
124{
125 return get_unaligned_le32(x);
126}
Shile Zhang6402e142019-12-04 08:46:28 +0800127
David Daneyd59a1682012-04-24 11:23:14 -0700128static uint16_t r2le(const uint16_t *x)
129{
130 return get_unaligned_le16(x);
David Daneya79f2482012-04-19 14:59:55 -0700131}
132
Shile Zhang6402e142019-12-04 08:46:28 +0800133static uint64_t r8le(const uint64_t *x)
134{
135 return get_unaligned_le64(x);
136}
137
138static void wbe(uint32_t val, uint32_t *x)
139{
140 put_unaligned_be32(val, x);
141}
142
143static void w2be(uint16_t val, uint16_t *x)
144{
145 put_unaligned_be16(val, x);
146}
147
David Daneyd59a1682012-04-24 11:23:14 -0700148static void w8be(uint64_t val, uint64_t *x)
David Daneya79f2482012-04-19 14:59:55 -0700149{
David Daneyd59a1682012-04-24 11:23:14 -0700150 put_unaligned_be64(val, x);
151}
Shile Zhang6402e142019-12-04 08:46:28 +0800152
David Daneyd59a1682012-04-24 11:23:14 -0700153static void wle(uint32_t val, uint32_t *x)
154{
155 put_unaligned_le32(val, x);
156}
Shile Zhang6402e142019-12-04 08:46:28 +0800157
David Daneyd59a1682012-04-24 11:23:14 -0700158static void w2le(uint16_t val, uint16_t *x)
159{
160 put_unaligned_le16(val, x);
David Daneya79f2482012-04-19 14:59:55 -0700161}
162
Shile Zhang6402e142019-12-04 08:46:28 +0800163static void w8le(uint64_t val, uint64_t *x)
164{
165 put_unaligned_le64(val, x);
166}
David Daneya79f2482012-04-19 14:59:55 -0700167
Jamie Iles59c36452013-11-12 15:06:51 -0800168/*
169 * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
170 * the way to -256..-1, to avoid conflicting with real section
171 * indices.
172 */
173#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
174
175static inline int is_shndx_special(unsigned int i)
176{
177 return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
178}
179
180/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
181static inline unsigned int get_secindex(unsigned int shndx,
182 unsigned int sym_offs,
183 const Elf32_Word *symtab_shndx_start)
184{
185 if (is_shndx_special(shndx))
186 return SPECIAL(shndx);
187 if (shndx != SHN_XINDEX)
188 return shndx;
189 return r(&symtab_shndx_start[sym_offs]);
190}
191
David Daneya79f2482012-04-19 14:59:55 -0700192/* 32 bit and 64 bit are very similar */
Shile Zhang10916702019-12-04 08:46:31 +0800193#include "sorttable.h"
194#define SORTTABLE_64
195#include "sorttable.h"
David Daneya79f2482012-04-19 14:59:55 -0700196
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200197static int compare_relative_table(const void *a, const void *b)
David Daneyd59a1682012-04-24 11:23:14 -0700198{
199 int32_t av = (int32_t)r(a);
200 int32_t bv = (int32_t)r(b);
201
202 if (av < bv)
203 return -1;
204 if (av > bv)
205 return 1;
206 return 0;
207}
208
Shile Zhang6402e142019-12-04 08:46:28 +0800209static void sort_relative_table(char *extab_image, int image_size)
210{
211 int i = 0;
212
213 /*
214 * Do the same thing the runtime sort does, first normalize to
215 * being relative to the start of the section.
216 */
217 while (i < image_size) {
218 uint32_t *loc = (uint32_t *)(extab_image + i);
219 w(r(loc) + i, loc);
220 i += 4;
221 }
222
223 qsort(extab_image, image_size / 8, 8, compare_relative_table);
224
225 /* Now denormalize. */
226 i = 0;
227 while (i < image_size) {
228 uint32_t *loc = (uint32_t *)(extab_image + i);
229 w(r(loc) - i, loc);
230 i += 4;
231 }
232}
233
Mark Rutlandd6e2cc52021-10-19 17:02:16 +0100234static void arm64_sort_relative_table(char *extab_image, int image_size)
235{
236 int i = 0;
237
238 while (i < image_size) {
239 uint32_t *loc = (uint32_t *)(extab_image + i);
240
241 w(r(loc) + i, loc);
242 w(r(loc + 1) + i + 4, loc + 1);
243 /* Don't touch the fixup type or data */
244
245 i += sizeof(uint32_t) * 3;
246 }
247
248 qsort(extab_image, image_size / 12, 12, compare_relative_table);
249
250 i = 0;
251 while (i < image_size) {
252 uint32_t *loc = (uint32_t *)(extab_image + i);
253
254 w(r(loc) - i, loc);
255 w(r(loc + 1) - (i + 4), loc + 1);
256 /* Don't touch the fixup type or data */
257
258 i += sizeof(uint32_t) * 3;
259 }
260}
261
Tony Luck548acf12016-02-17 10:20:12 -0800262static void x86_sort_relative_table(char *extab_image, int image_size)
263{
Shile Zhang6402e142019-12-04 08:46:28 +0800264 int i = 0;
Tony Luck548acf12016-02-17 10:20:12 -0800265
Tony Luck548acf12016-02-17 10:20:12 -0800266 while (i < image_size) {
267 uint32_t *loc = (uint32_t *)(extab_image + i);
268
269 w(r(loc) + i, loc);
270 w(r(loc + 1) + i + 4, loc + 1);
Thomas Gleixner46d28942021-09-08 15:29:18 +0200271 /* Don't touch the fixup type */
Tony Luck548acf12016-02-17 10:20:12 -0800272
273 i += sizeof(uint32_t) * 3;
274 }
275
276 qsort(extab_image, image_size / 12, 12, compare_relative_table);
277
278 i = 0;
279 while (i < image_size) {
280 uint32_t *loc = (uint32_t *)(extab_image + i);
281
282 w(r(loc) - i, loc);
283 w(r(loc + 1) - (i + 4), loc + 1);
Thomas Gleixner46d28942021-09-08 15:29:18 +0200284 /* Don't touch the fixup type */
Tony Luck548acf12016-02-17 10:20:12 -0800285
286 i += sizeof(uint32_t) * 3;
287 }
288}
289
Ilya Leoshkevich05a68e82020-06-30 20:52:03 +0200290static void s390_sort_relative_table(char *extab_image, int image_size)
291{
292 int i;
293
294 for (i = 0; i < image_size; i += 16) {
295 char *loc = extab_image + i;
296 uint64_t handler;
297
298 w(r((uint32_t *)loc) + i, (uint32_t *)loc);
299 w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4));
300 /*
301 * 0 is a special self-relative handler value, which means that
302 * handler should be ignored. It is safe, because it means that
303 * handler field points to itself, which should never happen.
304 * When creating extable-relative values, keep it as 0, since
305 * this should never occur either: it would mean that handler
306 * field points to the first extable entry.
307 */
308 handler = r8((uint64_t *)(loc + 8));
309 if (handler)
310 handler += i + 8;
311 w8(handler, (uint64_t *)(loc + 8));
312 }
313
314 qsort(extab_image, image_size / 16, 16, compare_relative_table);
315
316 for (i = 0; i < image_size; i += 16) {
317 char *loc = extab_image + i;
318 uint64_t handler;
319
320 w(r((uint32_t *)loc) - i, (uint32_t *)loc);
321 w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4));
322 handler = r8((uint64_t *)(loc + 8));
323 if (handler)
324 handler -= i + 8;
325 w8(handler, (uint64_t *)(loc + 8));
326 }
327}
328
Shile Zhang6402e142019-12-04 08:46:28 +0800329static int do_file(char const *const fname, void *addr)
David Daneyd59a1682012-04-24 11:23:14 -0700330{
Shile Zhang3c47b782019-12-04 08:46:27 +0800331 int rc = -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800332 Elf32_Ehdr *ehdr = addr;
333 table_sort_t custom_sort = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700334
David Daneya79f2482012-04-19 14:59:55 -0700335 switch (ehdr->e_ident[EI_DATA]) {
Shile Zhang6402e142019-12-04 08:46:28 +0800336 case ELFDATA2LSB:
337 r = rle;
338 r2 = r2le;
339 r8 = r8le;
340 w = wle;
341 w2 = w2le;
342 w8 = w8le;
343 break;
344 case ELFDATA2MSB:
345 r = rbe;
346 r2 = r2be;
347 r8 = r8be;
348 w = wbe;
349 w2 = w2be;
350 w8 = w8be;
351 break;
David Daneya79f2482012-04-19 14:59:55 -0700352 default:
353 fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
354 ehdr->e_ident[EI_DATA], fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800355 return -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800356 }
357
358 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
359 (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
360 ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100361 fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800362 return -1;
David Daneya79f2482012-04-19 14:59:55 -0700363 }
364
David Daneyd59a1682012-04-24 11:23:14 -0700365 switch (r2(&ehdr->e_machine)) {
David Daneya79f2482012-04-19 14:59:55 -0700366 case EM_386:
David Daneya79f2482012-04-19 14:59:55 -0700367 case EM_X86_64:
Tony Luck548acf12016-02-17 10:20:12 -0800368 custom_sort = x86_sort_relative_table;
369 break;
Heiko Carstens3193a982012-07-24 14:51:34 +0200370 case EM_S390:
Ilya Leoshkevich05a68e82020-06-30 20:52:03 +0200371 custom_sort = s390_sort_relative_table;
372 break;
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100373 case EM_AARCH64:
Mark Rutlandd6e2cc52021-10-19 17:02:16 +0100374 custom_sort = arm64_sort_relative_table;
375 break;
Helge Deller0de79852016-03-23 16:00:46 +0100376 case EM_PARISC:
Nicholas Piggin5b9ff022016-10-13 16:42:55 +1100377 case EM_PPC:
378 case EM_PPC64:
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200379 custom_sort = sort_relative_table;
380 break;
Vineet Guptaf06d19e2013-11-15 12:08:05 +0530381 case EM_ARCOMPACT:
Vineet Guptab3210d142013-11-22 13:05:58 +0530382 case EM_ARCV2:
Stephen Boydee951c62012-10-29 19:19:34 +0100383 case EM_ARM:
Michal Simek372c7202014-01-23 15:52:46 -0800384 case EM_MICROBLAZE:
David Daneyd59a1682012-04-24 11:23:14 -0700385 case EM_MIPS:
Jisheng Zhang54fed352021-08-26 22:10:29 +0800386 case EM_RISCV:
Max Filippov25df8192014-02-18 15:29:11 +0400387 case EM_XTENSA:
David Daneya79f2482012-04-19 14:59:55 -0700388 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800389 default:
390 fprintf(stderr, "unrecognized e_machine %d %s\n",
391 r2(&ehdr->e_machine), fname);
392 return -1;
393 }
David Daneya79f2482012-04-19 14:59:55 -0700394
395 switch (ehdr->e_ident[EI_CLASS]) {
David Daneya79f2482012-04-19 14:59:55 -0700396 case ELFCLASS32:
Shile Zhang6402e142019-12-04 08:46:28 +0800397 if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
398 r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700399 fprintf(stderr,
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100400 "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800401 break;
David Daneya79f2482012-04-19 14:59:55 -0700402 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800403 rc = do_sort_32(ehdr, fname, custom_sort);
David Daneya79f2482012-04-19 14:59:55 -0700404 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800405 case ELFCLASS64:
406 {
David Daneya79f2482012-04-19 14:59:55 -0700407 Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
Shile Zhang6402e142019-12-04 08:46:28 +0800408 if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
409 r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700410 fprintf(stderr,
Shile Zhang6402e142019-12-04 08:46:28 +0800411 "unrecognized ET_EXEC/ET_DYN file: %s\n",
412 fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800413 break;
David Daneya79f2482012-04-19 14:59:55 -0700414 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800415 rc = do_sort_64(ghdr, fname, custom_sort);
Shile Zhang6402e142019-12-04 08:46:28 +0800416 }
417 break;
418 default:
419 fprintf(stderr, "unrecognized ELF class %d %s\n",
420 ehdr->e_ident[EI_CLASS], fname);
David Daneya79f2482012-04-19 14:59:55 -0700421 break;
422 }
David Daneya79f2482012-04-19 14:59:55 -0700423
Shile Zhang3c47b782019-12-04 08:46:27 +0800424 return rc;
David Daneya79f2482012-04-19 14:59:55 -0700425}
426
Shile Zhang6402e142019-12-04 08:46:28 +0800427int main(int argc, char *argv[])
David Daneya79f2482012-04-19 14:59:55 -0700428{
Shile Zhang3c47b782019-12-04 08:46:27 +0800429 int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
430 size_t size = 0;
431 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700432
433 if (argc < 2) {
Shile Zhang10916702019-12-04 08:46:31 +0800434 fprintf(stderr, "usage: sorttable vmlinux...\n");
David Daneya79f2482012-04-19 14:59:55 -0700435 return 0;
436 }
437
438 /* Process each file in turn, allowing deep failure. */
439 for (i = 1; i < argc; i++) {
Shile Zhang3c47b782019-12-04 08:46:27 +0800440 addr = mmap_file(argv[i], &size);
441 if (!addr) {
David Daneya79f2482012-04-19 14:59:55 -0700442 ++n_error;
Shile Zhang3c47b782019-12-04 08:46:27 +0800443 continue;
444 }
445
446 if (do_file(argv[i], addr))
447 ++n_error;
448
449 munmap(addr, size);
David Daneya79f2482012-04-19 14:59:55 -0700450 }
Shile Zhang6402e142019-12-04 08:46:28 +0800451
David Daneya79f2482012-04-19 14:59:55 -0700452 return !!n_error;
453}