blob: 3a8ea5ed553d11547ebe840191c0c0cd511f2f49 [file] [log] [blame]
Thomas Gleixner4317cf952019-05-31 01:09:38 -07001// SPDX-License-Identifier: GPL-2.0-only
David Daneya79f2482012-04-19 14:59:55 -07002/*
Shile Zhang10916702019-12-04 08:46:31 +08003 * sorttable.c: Sort the kernel's table
David Daneya79f2482012-04-19 14:59:55 -07004 *
Shile Zhang57fa1892019-12-04 08:46:32 +08005 * Added ORC unwind tables sort support and other updates:
6 * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
7 * Shile Zhang <shile.zhang@linux.alibaba.com>
8 *
David Daneyd59a1682012-04-24 11:23:14 -07009 * Copyright 2011 - 2012 Cavium, Inc.
David Daneya79f2482012-04-19 14:59:55 -070010 *
11 * Based on code taken from recortmcount.c which is:
12 *
13 * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
David Daneya79f2482012-04-19 14:59:55 -070014 *
15 * Restructured to fit Linux format, as well as other updates:
Shile Zhang57fa1892019-12-04 08:46:32 +080016 * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
David Daneya79f2482012-04-19 14:59:55 -070017 */
18
19/*
20 * Strategy: alter the vmlinux file in-place.
21 */
22
23#include <sys/types.h>
24#include <sys/mman.h>
25#include <sys/stat.h>
26#include <getopt.h>
27#include <elf.h>
28#include <fcntl.h>
David Daneya79f2482012-04-19 14:59:55 -070029#include <stdio.h>
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
Yinan Liu72b39422021-12-12 19:33:58 +080033#include <errno.h>
34#include <pthread.h>
David Daneya79f2482012-04-19 14:59:55 -070035
David Daneyd59a1682012-04-24 11:23:14 -070036#include <tools/be_byteshift.h>
37#include <tools/le_byteshift.h>
38
Vineet Guptaf06d19e2013-11-15 12:08:05 +053039#ifndef EM_ARCOMPACT
40#define EM_ARCOMPACT 93
41#endif
42
Max Filippov25df8192014-02-18 15:29:11 +040043#ifndef EM_XTENSA
44#define EM_XTENSA 94
45#endif
46
Will Deaconadace892013-05-08 17:29:24 +010047#ifndef EM_AARCH64
48#define EM_AARCH64 183
49#endif
50
Michal Simek372c7202014-01-23 15:52:46 -080051#ifndef EM_MICROBLAZE
52#define EM_MICROBLAZE 189
53#endif
54
Vineet Guptab3210d142013-11-22 13:05:58 +053055#ifndef EM_ARCV2
56#define EM_ARCV2 195
57#endif
58
Miles Chend09c3872021-09-24 15:43:38 -070059#ifndef EM_RISCV
60#define EM_RISCV 243
61#endif
62
Shile Zhang6402e142019-12-04 08:46:28 +080063static uint32_t (*r)(const uint32_t *);
64static uint16_t (*r2)(const uint16_t *);
65static uint64_t (*r8)(const uint64_t *);
66static void (*w)(uint32_t, uint32_t *);
67static void (*w2)(uint16_t, uint16_t *);
68static void (*w8)(uint64_t, uint64_t *);
69typedef void (*table_sort_t)(char *, int);
70
David Daneya79f2482012-04-19 14:59:55 -070071/*
72 * Get the whole file as a programming convenience in order to avoid
73 * malloc+lseek+read+free of many pieces. If successful, then mmap
74 * avoids copying unused pieces; else just read the whole file.
75 * Open for both read and write.
76 */
Shile Zhang3c47b782019-12-04 08:46:27 +080077static void *mmap_file(char const *fname, size_t *size)
David Daneya79f2482012-04-19 14:59:55 -070078{
Shile Zhang3c47b782019-12-04 08:46:27 +080079 int fd;
80 struct stat sb;
81 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -070082
Shile Zhang3c47b782019-12-04 08:46:27 +080083 fd = open(fname, O_RDWR);
84 if (fd < 0) {
David Daneya79f2482012-04-19 14:59:55 -070085 perror(fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080086 return NULL;
87 }
88 if (fstat(fd, &sb) < 0) {
89 perror(fname);
90 goto out;
David Daneya79f2482012-04-19 14:59:55 -070091 }
92 if (!S_ISREG(sb.st_mode)) {
93 fprintf(stderr, "not a regular file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080094 goto out;
David Daneya79f2482012-04-19 14:59:55 -070095 }
Shile Zhang6402e142019-12-04 08:46:28 +080096
Shile Zhang3c47b782019-12-04 08:46:27 +080097 addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
David Daneya79f2482012-04-19 14:59:55 -070098 if (addr == MAP_FAILED) {
David Daneya79f2482012-04-19 14:59:55 -070099 fprintf(stderr, "Could not mmap file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800100 goto out;
David Daneya79f2482012-04-19 14:59:55 -0700101 }
Shile Zhang3c47b782019-12-04 08:46:27 +0800102
103 *size = sb.st_size;
104
105out:
106 close(fd);
David Daneya79f2482012-04-19 14:59:55 -0700107 return addr;
108}
109
David Daneyd59a1682012-04-24 11:23:14 -0700110static uint32_t rbe(const uint32_t *x)
111{
112 return get_unaligned_be32(x);
113}
Shile Zhang6402e142019-12-04 08:46:28 +0800114
David Daneyd59a1682012-04-24 11:23:14 -0700115static uint16_t r2be(const uint16_t *x)
116{
117 return get_unaligned_be16(x);
118}
Shile Zhang6402e142019-12-04 08:46:28 +0800119
120static uint64_t r8be(const uint64_t *x)
David Daneyd59a1682012-04-24 11:23:14 -0700121{
Shile Zhang6402e142019-12-04 08:46:28 +0800122 return get_unaligned_be64(x);
David Daneyd59a1682012-04-24 11:23:14 -0700123}
Shile Zhang6402e142019-12-04 08:46:28 +0800124
David Daneyd59a1682012-04-24 11:23:14 -0700125static uint32_t rle(const uint32_t *x)
126{
127 return get_unaligned_le32(x);
128}
Shile Zhang6402e142019-12-04 08:46:28 +0800129
David Daneyd59a1682012-04-24 11:23:14 -0700130static uint16_t r2le(const uint16_t *x)
131{
132 return get_unaligned_le16(x);
David Daneya79f2482012-04-19 14:59:55 -0700133}
134
Shile Zhang6402e142019-12-04 08:46:28 +0800135static uint64_t r8le(const uint64_t *x)
136{
137 return get_unaligned_le64(x);
138}
139
140static void wbe(uint32_t val, uint32_t *x)
141{
142 put_unaligned_be32(val, x);
143}
144
145static void w2be(uint16_t val, uint16_t *x)
146{
147 put_unaligned_be16(val, x);
148}
149
David Daneyd59a1682012-04-24 11:23:14 -0700150static void w8be(uint64_t val, uint64_t *x)
David Daneya79f2482012-04-19 14:59:55 -0700151{
David Daneyd59a1682012-04-24 11:23:14 -0700152 put_unaligned_be64(val, x);
153}
Shile Zhang6402e142019-12-04 08:46:28 +0800154
David Daneyd59a1682012-04-24 11:23:14 -0700155static void wle(uint32_t val, uint32_t *x)
156{
157 put_unaligned_le32(val, x);
158}
Shile Zhang6402e142019-12-04 08:46:28 +0800159
David Daneyd59a1682012-04-24 11:23:14 -0700160static void w2le(uint16_t val, uint16_t *x)
161{
162 put_unaligned_le16(val, x);
David Daneya79f2482012-04-19 14:59:55 -0700163}
164
Shile Zhang6402e142019-12-04 08:46:28 +0800165static void w8le(uint64_t val, uint64_t *x)
166{
167 put_unaligned_le64(val, x);
168}
David Daneya79f2482012-04-19 14:59:55 -0700169
Jamie Iles59c36452013-11-12 15:06:51 -0800170/*
171 * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
172 * the way to -256..-1, to avoid conflicting with real section
173 * indices.
174 */
175#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
176
177static inline int is_shndx_special(unsigned int i)
178{
179 return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
180}
181
182/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
183static inline unsigned int get_secindex(unsigned int shndx,
184 unsigned int sym_offs,
185 const Elf32_Word *symtab_shndx_start)
186{
187 if (is_shndx_special(shndx))
188 return SPECIAL(shndx);
189 if (shndx != SHN_XINDEX)
190 return shndx;
191 return r(&symtab_shndx_start[sym_offs]);
192}
193
David Daneya79f2482012-04-19 14:59:55 -0700194/* 32 bit and 64 bit are very similar */
Shile Zhang10916702019-12-04 08:46:31 +0800195#include "sorttable.h"
196#define SORTTABLE_64
197#include "sorttable.h"
David Daneya79f2482012-04-19 14:59:55 -0700198
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200199static int compare_relative_table(const void *a, const void *b)
David Daneyd59a1682012-04-24 11:23:14 -0700200{
201 int32_t av = (int32_t)r(a);
202 int32_t bv = (int32_t)r(b);
203
204 if (av < bv)
205 return -1;
206 if (av > bv)
207 return 1;
208 return 0;
209}
210
Shile Zhang6402e142019-12-04 08:46:28 +0800211static void sort_relative_table(char *extab_image, int image_size)
212{
213 int i = 0;
214
215 /*
216 * Do the same thing the runtime sort does, first normalize to
217 * being relative to the start of the section.
218 */
219 while (i < image_size) {
220 uint32_t *loc = (uint32_t *)(extab_image + i);
221 w(r(loc) + i, loc);
222 i += 4;
223 }
224
225 qsort(extab_image, image_size / 8, 8, compare_relative_table);
226
227 /* Now denormalize. */
228 i = 0;
229 while (i < image_size) {
230 uint32_t *loc = (uint32_t *)(extab_image + i);
231 w(r(loc) - i, loc);
232 i += 4;
233 }
234}
235
Mark Rutlande2323332021-11-08 11:42:20 +0000236static void sort_relative_table_with_data(char *extab_image, int image_size)
Mark Rutlandd6e2cc52021-10-19 17:02:16 +0100237{
238 int i = 0;
239
240 while (i < image_size) {
241 uint32_t *loc = (uint32_t *)(extab_image + i);
242
243 w(r(loc) + i, loc);
244 w(r(loc + 1) + i + 4, loc + 1);
245 /* Don't touch the fixup type or data */
246
247 i += sizeof(uint32_t) * 3;
248 }
249
250 qsort(extab_image, image_size / 12, 12, compare_relative_table);
251
252 i = 0;
253 while (i < image_size) {
254 uint32_t *loc = (uint32_t *)(extab_image + i);
255
256 w(r(loc) - i, loc);
257 w(r(loc + 1) - (i + 4), loc + 1);
258 /* Don't touch the fixup type or data */
259
260 i += sizeof(uint32_t) * 3;
261 }
262}
263
Ilya Leoshkevich05a68e82020-06-30 20:52:03 +0200264static void s390_sort_relative_table(char *extab_image, int image_size)
265{
266 int i;
267
268 for (i = 0; i < image_size; i += 16) {
269 char *loc = extab_image + i;
270 uint64_t handler;
271
272 w(r((uint32_t *)loc) + i, (uint32_t *)loc);
273 w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4));
274 /*
275 * 0 is a special self-relative handler value, which means that
276 * handler should be ignored. It is safe, because it means that
277 * handler field points to itself, which should never happen.
278 * When creating extable-relative values, keep it as 0, since
279 * this should never occur either: it would mean that handler
280 * field points to the first extable entry.
281 */
282 handler = r8((uint64_t *)(loc + 8));
283 if (handler)
284 handler += i + 8;
285 w8(handler, (uint64_t *)(loc + 8));
286 }
287
288 qsort(extab_image, image_size / 16, 16, compare_relative_table);
289
290 for (i = 0; i < image_size; i += 16) {
291 char *loc = extab_image + i;
292 uint64_t handler;
293
294 w(r((uint32_t *)loc) - i, (uint32_t *)loc);
295 w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4));
296 handler = r8((uint64_t *)(loc + 8));
297 if (handler)
298 handler -= i + 8;
299 w8(handler, (uint64_t *)(loc + 8));
300 }
301}
302
Shile Zhang6402e142019-12-04 08:46:28 +0800303static int do_file(char const *const fname, void *addr)
David Daneyd59a1682012-04-24 11:23:14 -0700304{
Shile Zhang3c47b782019-12-04 08:46:27 +0800305 int rc = -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800306 Elf32_Ehdr *ehdr = addr;
307 table_sort_t custom_sort = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700308
David Daneya79f2482012-04-19 14:59:55 -0700309 switch (ehdr->e_ident[EI_DATA]) {
Shile Zhang6402e142019-12-04 08:46:28 +0800310 case ELFDATA2LSB:
311 r = rle;
312 r2 = r2le;
313 r8 = r8le;
314 w = wle;
315 w2 = w2le;
316 w8 = w8le;
317 break;
318 case ELFDATA2MSB:
319 r = rbe;
320 r2 = r2be;
321 r8 = r8be;
322 w = wbe;
323 w2 = w2be;
324 w8 = w8be;
325 break;
David Daneya79f2482012-04-19 14:59:55 -0700326 default:
327 fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
328 ehdr->e_ident[EI_DATA], fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800329 return -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800330 }
331
332 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
333 (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
334 ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100335 fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800336 return -1;
David Daneya79f2482012-04-19 14:59:55 -0700337 }
338
David Daneyd59a1682012-04-24 11:23:14 -0700339 switch (r2(&ehdr->e_machine)) {
David Daneya79f2482012-04-19 14:59:55 -0700340 case EM_386:
Mark Rutlande2323332021-11-08 11:42:20 +0000341 case EM_AARCH64:
Jisheng Zhang2bf847d2021-11-18 19:26:05 +0800342 case EM_RISCV:
David Daneya79f2482012-04-19 14:59:55 -0700343 case EM_X86_64:
Mark Rutlande2323332021-11-08 11:42:20 +0000344 custom_sort = sort_relative_table_with_data;
Tony Luck548acf12016-02-17 10:20:12 -0800345 break;
Heiko Carstens3193a982012-07-24 14:51:34 +0200346 case EM_S390:
Ilya Leoshkevich05a68e82020-06-30 20:52:03 +0200347 custom_sort = s390_sort_relative_table;
348 break;
Helge Deller0de79852016-03-23 16:00:46 +0100349 case EM_PARISC:
Nicholas Piggin5b9ff022016-10-13 16:42:55 +1100350 case EM_PPC:
351 case EM_PPC64:
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200352 custom_sort = sort_relative_table;
353 break;
Vineet Guptaf06d19e2013-11-15 12:08:05 +0530354 case EM_ARCOMPACT:
Vineet Guptab3210d142013-11-22 13:05:58 +0530355 case EM_ARCV2:
Stephen Boydee951c62012-10-29 19:19:34 +0100356 case EM_ARM:
Michal Simek372c7202014-01-23 15:52:46 -0800357 case EM_MICROBLAZE:
David Daneyd59a1682012-04-24 11:23:14 -0700358 case EM_MIPS:
Max Filippov25df8192014-02-18 15:29:11 +0400359 case EM_XTENSA:
David Daneya79f2482012-04-19 14:59:55 -0700360 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800361 default:
362 fprintf(stderr, "unrecognized e_machine %d %s\n",
363 r2(&ehdr->e_machine), fname);
364 return -1;
365 }
David Daneya79f2482012-04-19 14:59:55 -0700366
367 switch (ehdr->e_ident[EI_CLASS]) {
David Daneya79f2482012-04-19 14:59:55 -0700368 case ELFCLASS32:
Shile Zhang6402e142019-12-04 08:46:28 +0800369 if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
370 r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700371 fprintf(stderr,
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100372 "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800373 break;
David Daneya79f2482012-04-19 14:59:55 -0700374 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800375 rc = do_sort_32(ehdr, fname, custom_sort);
David Daneya79f2482012-04-19 14:59:55 -0700376 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800377 case ELFCLASS64:
378 {
David Daneya79f2482012-04-19 14:59:55 -0700379 Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
Shile Zhang6402e142019-12-04 08:46:28 +0800380 if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
381 r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700382 fprintf(stderr,
Shile Zhang6402e142019-12-04 08:46:28 +0800383 "unrecognized ET_EXEC/ET_DYN file: %s\n",
384 fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800385 break;
David Daneya79f2482012-04-19 14:59:55 -0700386 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800387 rc = do_sort_64(ghdr, fname, custom_sort);
Shile Zhang6402e142019-12-04 08:46:28 +0800388 }
389 break;
390 default:
391 fprintf(stderr, "unrecognized ELF class %d %s\n",
392 ehdr->e_ident[EI_CLASS], fname);
David Daneya79f2482012-04-19 14:59:55 -0700393 break;
394 }
David Daneya79f2482012-04-19 14:59:55 -0700395
Shile Zhang3c47b782019-12-04 08:46:27 +0800396 return rc;
David Daneya79f2482012-04-19 14:59:55 -0700397}
398
Shile Zhang6402e142019-12-04 08:46:28 +0800399int main(int argc, char *argv[])
David Daneya79f2482012-04-19 14:59:55 -0700400{
Shile Zhang3c47b782019-12-04 08:46:27 +0800401 int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
402 size_t size = 0;
403 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700404
405 if (argc < 2) {
Shile Zhang10916702019-12-04 08:46:31 +0800406 fprintf(stderr, "usage: sorttable vmlinux...\n");
David Daneya79f2482012-04-19 14:59:55 -0700407 return 0;
408 }
409
410 /* Process each file in turn, allowing deep failure. */
411 for (i = 1; i < argc; i++) {
Shile Zhang3c47b782019-12-04 08:46:27 +0800412 addr = mmap_file(argv[i], &size);
413 if (!addr) {
David Daneya79f2482012-04-19 14:59:55 -0700414 ++n_error;
Shile Zhang3c47b782019-12-04 08:46:27 +0800415 continue;
416 }
417
418 if (do_file(argv[i], addr))
419 ++n_error;
420
421 munmap(addr, size);
David Daneya79f2482012-04-19 14:59:55 -0700422 }
Shile Zhang6402e142019-12-04 08:46:28 +0800423
David Daneya79f2482012-04-19 14:59:55 -0700424 return !!n_error;
425}