Thomas Gleixner | 4317cf95 | 2019-05-31 01:09:38 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 2 | /* |
Shile Zhang | 1091670 | 2019-12-04 08:46:31 +0800 | [diff] [blame] | 3 | * sorttable.c: Sort the kernel's table |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 4 | * |
Shile Zhang | 57fa189 | 2019-12-04 08:46:32 +0800 | [diff] [blame] | 5 | * Added ORC unwind tables sort support and other updates: |
| 6 | * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by: |
| 7 | * Shile Zhang <shile.zhang@linux.alibaba.com> |
| 8 | * |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 9 | * Copyright 2011 - 2012 Cavium, Inc. |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 10 | * |
| 11 | * Based on code taken from recortmcount.c which is: |
| 12 | * |
| 13 | * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved. |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 14 | * |
| 15 | * Restructured to fit Linux format, as well as other updates: |
Shile Zhang | 57fa189 | 2019-12-04 08:46:32 +0800 | [diff] [blame] | 16 | * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc. |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 17 | */ |
| 18 | |
| 19 | /* |
| 20 | * Strategy: alter the vmlinux file in-place. |
| 21 | */ |
| 22 | |
| 23 | #include <sys/types.h> |
| 24 | #include <sys/mman.h> |
| 25 | #include <sys/stat.h> |
| 26 | #include <getopt.h> |
| 27 | #include <elf.h> |
| 28 | #include <fcntl.h> |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 29 | #include <stdio.h> |
| 30 | #include <stdlib.h> |
| 31 | #include <string.h> |
| 32 | #include <unistd.h> |
Yinan Liu | 72b3942 | 2021-12-12 19:33:58 +0800 | [diff] [blame] | 33 | #include <errno.h> |
| 34 | #include <pthread.h> |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 35 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 36 | #include <tools/be_byteshift.h> |
| 37 | #include <tools/le_byteshift.h> |
| 38 | |
Vineet Gupta | f06d19e | 2013-11-15 12:08:05 +0530 | [diff] [blame] | 39 | #ifndef EM_ARCOMPACT |
| 40 | #define EM_ARCOMPACT 93 |
| 41 | #endif |
| 42 | |
Max Filippov | 25df819 | 2014-02-18 15:29:11 +0400 | [diff] [blame] | 43 | #ifndef EM_XTENSA |
| 44 | #define EM_XTENSA 94 |
| 45 | #endif |
| 46 | |
Will Deacon | adace89 | 2013-05-08 17:29:24 +0100 | [diff] [blame] | 47 | #ifndef EM_AARCH64 |
| 48 | #define EM_AARCH64 183 |
| 49 | #endif |
| 50 | |
Michal Simek | 372c720 | 2014-01-23 15:52:46 -0800 | [diff] [blame] | 51 | #ifndef EM_MICROBLAZE |
| 52 | #define EM_MICROBLAZE 189 |
| 53 | #endif |
| 54 | |
Vineet Gupta | b3210d14 | 2013-11-22 13:05:58 +0530 | [diff] [blame] | 55 | #ifndef EM_ARCV2 |
| 56 | #define EM_ARCV2 195 |
| 57 | #endif |
| 58 | |
Miles Chen | d09c387 | 2021-09-24 15:43:38 -0700 | [diff] [blame] | 59 | #ifndef EM_RISCV |
| 60 | #define EM_RISCV 243 |
| 61 | #endif |
| 62 | |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 63 | static uint32_t (*r)(const uint32_t *); |
| 64 | static uint16_t (*r2)(const uint16_t *); |
| 65 | static uint64_t (*r8)(const uint64_t *); |
| 66 | static void (*w)(uint32_t, uint32_t *); |
| 67 | static void (*w2)(uint16_t, uint16_t *); |
| 68 | static void (*w8)(uint64_t, uint64_t *); |
| 69 | typedef void (*table_sort_t)(char *, int); |
| 70 | |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 71 | /* |
| 72 | * Get the whole file as a programming convenience in order to avoid |
| 73 | * malloc+lseek+read+free of many pieces. If successful, then mmap |
| 74 | * avoids copying unused pieces; else just read the whole file. |
| 75 | * Open for both read and write. |
| 76 | */ |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 77 | static void *mmap_file(char const *fname, size_t *size) |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 78 | { |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 79 | int fd; |
| 80 | struct stat sb; |
| 81 | void *addr = NULL; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 82 | |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 83 | fd = open(fname, O_RDWR); |
| 84 | if (fd < 0) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 85 | perror(fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 86 | return NULL; |
| 87 | } |
| 88 | if (fstat(fd, &sb) < 0) { |
| 89 | perror(fname); |
| 90 | goto out; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 91 | } |
| 92 | if (!S_ISREG(sb.st_mode)) { |
| 93 | fprintf(stderr, "not a regular file: %s\n", fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 94 | goto out; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 95 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 96 | |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 97 | addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 98 | if (addr == MAP_FAILED) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 99 | fprintf(stderr, "Could not mmap file: %s\n", fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 100 | goto out; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 101 | } |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 102 | |
| 103 | *size = sb.st_size; |
| 104 | |
| 105 | out: |
| 106 | close(fd); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 107 | return addr; |
| 108 | } |
| 109 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 110 | static uint32_t rbe(const uint32_t *x) |
| 111 | { |
| 112 | return get_unaligned_be32(x); |
| 113 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 114 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 115 | static uint16_t r2be(const uint16_t *x) |
| 116 | { |
| 117 | return get_unaligned_be16(x); |
| 118 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 119 | |
| 120 | static uint64_t r8be(const uint64_t *x) |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 121 | { |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 122 | return get_unaligned_be64(x); |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 123 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 124 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 125 | static uint32_t rle(const uint32_t *x) |
| 126 | { |
| 127 | return get_unaligned_le32(x); |
| 128 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 129 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 130 | static uint16_t r2le(const uint16_t *x) |
| 131 | { |
| 132 | return get_unaligned_le16(x); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 135 | static uint64_t r8le(const uint64_t *x) |
| 136 | { |
| 137 | return get_unaligned_le64(x); |
| 138 | } |
| 139 | |
| 140 | static void wbe(uint32_t val, uint32_t *x) |
| 141 | { |
| 142 | put_unaligned_be32(val, x); |
| 143 | } |
| 144 | |
| 145 | static void w2be(uint16_t val, uint16_t *x) |
| 146 | { |
| 147 | put_unaligned_be16(val, x); |
| 148 | } |
| 149 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 150 | static void w8be(uint64_t val, uint64_t *x) |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 151 | { |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 152 | put_unaligned_be64(val, x); |
| 153 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 154 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 155 | static void wle(uint32_t val, uint32_t *x) |
| 156 | { |
| 157 | put_unaligned_le32(val, x); |
| 158 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 159 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 160 | static void w2le(uint16_t val, uint16_t *x) |
| 161 | { |
| 162 | put_unaligned_le16(val, x); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 163 | } |
| 164 | |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 165 | static void w8le(uint64_t val, uint64_t *x) |
| 166 | { |
| 167 | put_unaligned_le64(val, x); |
| 168 | } |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 169 | |
Jamie Iles | 59c3645 | 2013-11-12 15:06:51 -0800 | [diff] [blame] | 170 | /* |
| 171 | * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of |
| 172 | * the way to -256..-1, to avoid conflicting with real section |
| 173 | * indices. |
| 174 | */ |
| 175 | #define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1)) |
| 176 | |
| 177 | static inline int is_shndx_special(unsigned int i) |
| 178 | { |
| 179 | return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE; |
| 180 | } |
| 181 | |
| 182 | /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ |
| 183 | static inline unsigned int get_secindex(unsigned int shndx, |
| 184 | unsigned int sym_offs, |
| 185 | const Elf32_Word *symtab_shndx_start) |
| 186 | { |
| 187 | if (is_shndx_special(shndx)) |
| 188 | return SPECIAL(shndx); |
| 189 | if (shndx != SHN_XINDEX) |
| 190 | return shndx; |
| 191 | return r(&symtab_shndx_start[sym_offs]); |
| 192 | } |
| 193 | |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 194 | /* 32 bit and 64 bit are very similar */ |
Shile Zhang | 1091670 | 2019-12-04 08:46:31 +0800 | [diff] [blame] | 195 | #include "sorttable.h" |
| 196 | #define SORTTABLE_64 |
| 197 | #include "sorttable.h" |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 198 | |
Heiko Carstens | eb608fb | 2012-09-05 13:26:11 +0200 | [diff] [blame] | 199 | static int compare_relative_table(const void *a, const void *b) |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 200 | { |
| 201 | int32_t av = (int32_t)r(a); |
| 202 | int32_t bv = (int32_t)r(b); |
| 203 | |
| 204 | if (av < bv) |
| 205 | return -1; |
| 206 | if (av > bv) |
| 207 | return 1; |
| 208 | return 0; |
| 209 | } |
| 210 | |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 211 | static void sort_relative_table(char *extab_image, int image_size) |
| 212 | { |
| 213 | int i = 0; |
| 214 | |
| 215 | /* |
| 216 | * Do the same thing the runtime sort does, first normalize to |
| 217 | * being relative to the start of the section. |
| 218 | */ |
| 219 | while (i < image_size) { |
| 220 | uint32_t *loc = (uint32_t *)(extab_image + i); |
| 221 | w(r(loc) + i, loc); |
| 222 | i += 4; |
| 223 | } |
| 224 | |
| 225 | qsort(extab_image, image_size / 8, 8, compare_relative_table); |
| 226 | |
| 227 | /* Now denormalize. */ |
| 228 | i = 0; |
| 229 | while (i < image_size) { |
| 230 | uint32_t *loc = (uint32_t *)(extab_image + i); |
| 231 | w(r(loc) - i, loc); |
| 232 | i += 4; |
| 233 | } |
| 234 | } |
| 235 | |
Mark Rutland | e232333 | 2021-11-08 11:42:20 +0000 | [diff] [blame] | 236 | static void sort_relative_table_with_data(char *extab_image, int image_size) |
Mark Rutland | d6e2cc5 | 2021-10-19 17:02:16 +0100 | [diff] [blame] | 237 | { |
| 238 | int i = 0; |
| 239 | |
| 240 | while (i < image_size) { |
| 241 | uint32_t *loc = (uint32_t *)(extab_image + i); |
| 242 | |
| 243 | w(r(loc) + i, loc); |
| 244 | w(r(loc + 1) + i + 4, loc + 1); |
| 245 | /* Don't touch the fixup type or data */ |
| 246 | |
| 247 | i += sizeof(uint32_t) * 3; |
| 248 | } |
| 249 | |
| 250 | qsort(extab_image, image_size / 12, 12, compare_relative_table); |
| 251 | |
| 252 | i = 0; |
| 253 | while (i < image_size) { |
| 254 | uint32_t *loc = (uint32_t *)(extab_image + i); |
| 255 | |
| 256 | w(r(loc) - i, loc); |
| 257 | w(r(loc + 1) - (i + 4), loc + 1); |
| 258 | /* Don't touch the fixup type or data */ |
| 259 | |
| 260 | i += sizeof(uint32_t) * 3; |
| 261 | } |
| 262 | } |
| 263 | |
Ilya Leoshkevich | 05a68e8 | 2020-06-30 20:52:03 +0200 | [diff] [blame] | 264 | static void s390_sort_relative_table(char *extab_image, int image_size) |
| 265 | { |
| 266 | int i; |
| 267 | |
| 268 | for (i = 0; i < image_size; i += 16) { |
| 269 | char *loc = extab_image + i; |
| 270 | uint64_t handler; |
| 271 | |
| 272 | w(r((uint32_t *)loc) + i, (uint32_t *)loc); |
| 273 | w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4)); |
| 274 | /* |
| 275 | * 0 is a special self-relative handler value, which means that |
| 276 | * handler should be ignored. It is safe, because it means that |
| 277 | * handler field points to itself, which should never happen. |
| 278 | * When creating extable-relative values, keep it as 0, since |
| 279 | * this should never occur either: it would mean that handler |
| 280 | * field points to the first extable entry. |
| 281 | */ |
| 282 | handler = r8((uint64_t *)(loc + 8)); |
| 283 | if (handler) |
| 284 | handler += i + 8; |
| 285 | w8(handler, (uint64_t *)(loc + 8)); |
| 286 | } |
| 287 | |
| 288 | qsort(extab_image, image_size / 16, 16, compare_relative_table); |
| 289 | |
| 290 | for (i = 0; i < image_size; i += 16) { |
| 291 | char *loc = extab_image + i; |
| 292 | uint64_t handler; |
| 293 | |
| 294 | w(r((uint32_t *)loc) - i, (uint32_t *)loc); |
| 295 | w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4)); |
| 296 | handler = r8((uint64_t *)(loc + 8)); |
| 297 | if (handler) |
| 298 | handler -= i + 8; |
| 299 | w8(handler, (uint64_t *)(loc + 8)); |
| 300 | } |
| 301 | } |
| 302 | |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 303 | static int do_file(char const *const fname, void *addr) |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 304 | { |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 305 | int rc = -1; |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 306 | Elf32_Ehdr *ehdr = addr; |
| 307 | table_sort_t custom_sort = NULL; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 308 | |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 309 | switch (ehdr->e_ident[EI_DATA]) { |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 310 | case ELFDATA2LSB: |
| 311 | r = rle; |
| 312 | r2 = r2le; |
| 313 | r8 = r8le; |
| 314 | w = wle; |
| 315 | w2 = w2le; |
| 316 | w8 = w8le; |
| 317 | break; |
| 318 | case ELFDATA2MSB: |
| 319 | r = rbe; |
| 320 | r2 = r2be; |
| 321 | r8 = r8be; |
| 322 | w = wbe; |
| 323 | w2 = w2be; |
| 324 | w8 = w8be; |
| 325 | break; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 326 | default: |
| 327 | fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", |
| 328 | ehdr->e_ident[EI_DATA], fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 329 | return -1; |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 || |
| 333 | (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) || |
| 334 | ehdr->e_ident[EI_VERSION] != EV_CURRENT) { |
Ard Biesheuvel | 7b957b6 | 2016-01-10 11:42:28 +0100 | [diff] [blame] | 335 | fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 336 | return -1; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 337 | } |
| 338 | |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 339 | switch (r2(&ehdr->e_machine)) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 340 | case EM_386: |
Mark Rutland | e232333 | 2021-11-08 11:42:20 +0000 | [diff] [blame] | 341 | case EM_AARCH64: |
Jisheng Zhang | 2bf847d | 2021-11-18 19:26:05 +0800 | [diff] [blame] | 342 | case EM_RISCV: |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 343 | case EM_X86_64: |
Mark Rutland | e232333 | 2021-11-08 11:42:20 +0000 | [diff] [blame] | 344 | custom_sort = sort_relative_table_with_data; |
Tony Luck | 548acf1 | 2016-02-17 10:20:12 -0800 | [diff] [blame] | 345 | break; |
Heiko Carstens | 3193a98 | 2012-07-24 14:51:34 +0200 | [diff] [blame] | 346 | case EM_S390: |
Ilya Leoshkevich | 05a68e8 | 2020-06-30 20:52:03 +0200 | [diff] [blame] | 347 | custom_sort = s390_sort_relative_table; |
| 348 | break; |
Helge Deller | 0de7985 | 2016-03-23 16:00:46 +0100 | [diff] [blame] | 349 | case EM_PARISC: |
Nicholas Piggin | 5b9ff02 | 2016-10-13 16:42:55 +1100 | [diff] [blame] | 350 | case EM_PPC: |
| 351 | case EM_PPC64: |
Heiko Carstens | eb608fb | 2012-09-05 13:26:11 +0200 | [diff] [blame] | 352 | custom_sort = sort_relative_table; |
| 353 | break; |
Vineet Gupta | f06d19e | 2013-11-15 12:08:05 +0530 | [diff] [blame] | 354 | case EM_ARCOMPACT: |
Vineet Gupta | b3210d14 | 2013-11-22 13:05:58 +0530 | [diff] [blame] | 355 | case EM_ARCV2: |
Stephen Boyd | ee951c6 | 2012-10-29 19:19:34 +0100 | [diff] [blame] | 356 | case EM_ARM: |
Michal Simek | 372c720 | 2014-01-23 15:52:46 -0800 | [diff] [blame] | 357 | case EM_MICROBLAZE: |
David Daney | d59a168 | 2012-04-24 11:23:14 -0700 | [diff] [blame] | 358 | case EM_MIPS: |
Max Filippov | 25df819 | 2014-02-18 15:29:11 +0400 | [diff] [blame] | 359 | case EM_XTENSA: |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 360 | break; |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 361 | default: |
| 362 | fprintf(stderr, "unrecognized e_machine %d %s\n", |
| 363 | r2(&ehdr->e_machine), fname); |
| 364 | return -1; |
| 365 | } |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 366 | |
| 367 | switch (ehdr->e_ident[EI_CLASS]) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 368 | case ELFCLASS32: |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 369 | if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) || |
| 370 | r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 371 | fprintf(stderr, |
Ard Biesheuvel | 7b957b6 | 2016-01-10 11:42:28 +0100 | [diff] [blame] | 372 | "unrecognized ET_EXEC/ET_DYN file: %s\n", fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 373 | break; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 374 | } |
Shile Zhang | 57cafdf | 2019-12-04 08:46:30 +0800 | [diff] [blame] | 375 | rc = do_sort_32(ehdr, fname, custom_sort); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 376 | break; |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 377 | case ELFCLASS64: |
| 378 | { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 379 | Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 380 | if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) || |
| 381 | r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 382 | fprintf(stderr, |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 383 | "unrecognized ET_EXEC/ET_DYN file: %s\n", |
| 384 | fname); |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 385 | break; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 386 | } |
Shile Zhang | 57cafdf | 2019-12-04 08:46:30 +0800 | [diff] [blame] | 387 | rc = do_sort_64(ghdr, fname, custom_sort); |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 388 | } |
| 389 | break; |
| 390 | default: |
| 391 | fprintf(stderr, "unrecognized ELF class %d %s\n", |
| 392 | ehdr->e_ident[EI_CLASS], fname); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 393 | break; |
| 394 | } |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 395 | |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 396 | return rc; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 397 | } |
| 398 | |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 399 | int main(int argc, char *argv[]) |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 400 | { |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 401 | int i, n_error = 0; /* gcc-4.3.0 false positive complaint */ |
| 402 | size_t size = 0; |
| 403 | void *addr = NULL; |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 404 | |
| 405 | if (argc < 2) { |
Shile Zhang | 1091670 | 2019-12-04 08:46:31 +0800 | [diff] [blame] | 406 | fprintf(stderr, "usage: sorttable vmlinux...\n"); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 407 | return 0; |
| 408 | } |
| 409 | |
| 410 | /* Process each file in turn, allowing deep failure. */ |
| 411 | for (i = 1; i < argc; i++) { |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 412 | addr = mmap_file(argv[i], &size); |
| 413 | if (!addr) { |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 414 | ++n_error; |
Shile Zhang | 3c47b78 | 2019-12-04 08:46:27 +0800 | [diff] [blame] | 415 | continue; |
| 416 | } |
| 417 | |
| 418 | if (do_file(argv[i], addr)) |
| 419 | ++n_error; |
| 420 | |
| 421 | munmap(addr, size); |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 422 | } |
Shile Zhang | 6402e14 | 2019-12-04 08:46:28 +0800 | [diff] [blame] | 423 | |
David Daney | a79f248 | 2012-04-19 14:59:55 -0700 | [diff] [blame] | 424 | return !!n_error; |
| 425 | } |