Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 2 | /* |
| 3 | * ld script for the x86 kernel |
| 4 | * |
| 5 | * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
| 6 | * |
Ingo Molnar | 91fd7fe | 2009-04-29 10:58:38 +0200 | [diff] [blame] | 7 | * Modernisation, unification and other changes and fixes: |
| 8 | * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 9 | * |
| 10 | * |
| 11 | * Don't define absolute symbols until and unless you know that symbol |
| 12 | * value is should remain constant even if kernel image is relocated |
| 13 | * at run time. Absolute symbols are not relocated. If symbol value should |
| 14 | * change if kernel is relocated, make the symbol section relative and |
| 15 | * put it inside the section definition. |
| 16 | */ |
| 17 | |
| 18 | #ifdef CONFIG_X86_32 |
| 19 | #define LOAD_OFFSET __PAGE_OFFSET |
| 20 | #else |
| 21 | #define LOAD_OFFSET __START_KERNEL_map |
| 22 | #endif |
| 23 | |
| 24 | #include <asm-generic/vmlinux.lds.h> |
| 25 | #include <asm/asm-offsets.h> |
| 26 | #include <asm/thread_info.h> |
| 27 | #include <asm/page_types.h> |
Josh Poimboeuf | ee9f8fc | 2017-07-24 18:36:57 -0500 | [diff] [blame] | 28 | #include <asm/orc_lookup.h> |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 29 | #include <asm/cache.h> |
| 30 | #include <asm/boot.h> |
| 31 | |
| 32 | #undef i386 /* in case the preprocessor is a 32bit one */ |
| 33 | |
Borislav Petkov | e6d7bc0 | 2019-01-09 17:32:10 +0100 | [diff] [blame] | 34 | OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 35 | |
| 36 | #ifdef CONFIG_X86_32 |
| 37 | OUTPUT_ARCH(i386) |
| 38 | ENTRY(phys_startup_32) |
Ingo Molnar | 6b35eb9 | 2011-01-19 10:09:42 +0100 | [diff] [blame] | 39 | jiffies = jiffies_64; |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 40 | #else |
| 41 | OUTPUT_ARCH(i386:x86-64) |
| 42 | ENTRY(phys_startup_64) |
Ingo Molnar | 6b35eb9 | 2011-01-19 10:09:42 +0100 | [diff] [blame] | 43 | jiffies_64 = jiffies; |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 44 | #endif |
| 45 | |
Kees Cook | 9ccaf77 | 2016-02-17 14:41:14 -0800 | [diff] [blame] | 46 | #if defined(CONFIG_X86_64) |
Suresh Siddha | d6cc1c3 | 2009-10-19 06:12:04 -0700 | [diff] [blame] | 47 | /* |
Kees Cook | 9ccaf77 | 2016-02-17 14:41:14 -0800 | [diff] [blame] | 48 | * On 64-bit, align RODATA to 2MB so we retain large page mappings for |
| 49 | * boundaries spanning kernel text, rodata and data sections. |
Suresh Siddha | d6cc1c3 | 2009-10-19 06:12:04 -0700 | [diff] [blame] | 50 | * |
| 51 | * However, kernel identity mappings will have different RWX permissions |
| 52 | * to the pages mapping to text and to the pages padding (which are freed) the |
| 53 | * text section. Hence kernel identity mappings will be broken to smaller |
| 54 | * pages. For 64-bit, kernel text and kernel identity mappings are different, |
Kees Cook | 9ccaf77 | 2016-02-17 14:41:14 -0800 | [diff] [blame] | 55 | * so we can enable protection checks as well as retain 2MB large page |
| 56 | * mappings for kernel text. |
Suresh Siddha | d6cc1c3 | 2009-10-19 06:12:04 -0700 | [diff] [blame] | 57 | */ |
Joerg Roedel | 39d668e | 2018-07-18 11:41:04 +0200 | [diff] [blame] | 58 | #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 59 | |
Joerg Roedel | 39d668e | 2018-07-18 11:41:04 +0200 | [diff] [blame] | 60 | #define X86_ALIGN_RODATA_END \ |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 61 | . = ALIGN(HPAGE_SIZE); \ |
Joerg Roedel | 39d668e | 2018-07-18 11:41:04 +0200 | [diff] [blame] | 62 | __end_rodata_hpage_align = .; \ |
| 63 | __end_rodata_aligned = .; |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 64 | |
Thomas Gleixner | 2f7412b | 2017-12-04 15:07:46 +0100 | [diff] [blame] | 65 | #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); |
| 66 | #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); |
| 67 | |
Brijesh Singh | b3f0907 | 2018-09-14 08:45:58 -0500 | [diff] [blame] | 68 | /* |
| 69 | * This section contains data which will be mapped as decrypted. Memory |
| 70 | * encryption operates on a page basis. Make this section PMD-aligned |
| 71 | * to avoid splitting the pages while mapping the section early. |
| 72 | * |
| 73 | * Note: We use a separate section so that only this section gets |
| 74 | * decrypted to avoid exposing more than we wish. |
| 75 | */ |
| 76 | #define BSS_DECRYPTED \ |
| 77 | . = ALIGN(PMD_SIZE); \ |
| 78 | __start_bss_decrypted = .; \ |
| 79 | *(.bss..decrypted); \ |
| 80 | . = ALIGN(PAGE_SIZE); \ |
| 81 | __start_bss_decrypted_unused = .; \ |
| 82 | . = ALIGN(PMD_SIZE); \ |
| 83 | __end_bss_decrypted = .; \ |
| 84 | |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 85 | #else |
| 86 | |
Joerg Roedel | 39d668e | 2018-07-18 11:41:04 +0200 | [diff] [blame] | 87 | #define X86_ALIGN_RODATA_BEGIN |
| 88 | #define X86_ALIGN_RODATA_END \ |
| 89 | . = ALIGN(PAGE_SIZE); \ |
| 90 | __end_rodata_aligned = .; |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 91 | |
Thomas Gleixner | 2f7412b | 2017-12-04 15:07:46 +0100 | [diff] [blame] | 92 | #define ALIGN_ENTRY_TEXT_BEGIN |
| 93 | #define ALIGN_ENTRY_TEXT_END |
Brijesh Singh | b3f0907 | 2018-09-14 08:45:58 -0500 | [diff] [blame] | 94 | #define BSS_DECRYPTED |
Thomas Gleixner | 2f7412b | 2017-12-04 15:07:46 +0100 | [diff] [blame] | 95 | |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 96 | #endif |
| 97 | |
Sam Ravnborg | afb8095a | 2009-04-29 09:47:19 +0200 | [diff] [blame] | 98 | PHDRS { |
| 99 | text PT_LOAD FLAGS(5); /* R_E */ |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 100 | data PT_LOAD FLAGS(6); /* RW_ */ |
Sam Ravnborg | afb8095a | 2009-04-29 09:47:19 +0200 | [diff] [blame] | 101 | #ifdef CONFIG_X86_64 |
Sam Ravnborg | afb8095a | 2009-04-29 09:47:19 +0200 | [diff] [blame] | 102 | #ifdef CONFIG_SMP |
Jan Beulich | 8d0cc63 | 2009-09-04 09:18:07 +0100 | [diff] [blame] | 103 | percpu PT_LOAD FLAGS(6); /* RW_ */ |
Sam Ravnborg | afb8095a | 2009-04-29 09:47:19 +0200 | [diff] [blame] | 104 | #endif |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 105 | init PT_LOAD FLAGS(7); /* RWE */ |
Sam Ravnborg | afb8095a | 2009-04-29 09:47:19 +0200 | [diff] [blame] | 106 | #endif |
| 107 | note PT_NOTE FLAGS(0); /* ___ */ |
| 108 | } |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 109 | |
Sam Ravnborg | 444e0ae | 2009-04-29 09:47:20 +0200 | [diff] [blame] | 110 | SECTIONS |
| 111 | { |
| 112 | #ifdef CONFIG_X86_32 |
Ard Biesheuvel | 142b9e6 | 2016-03-18 10:04:37 +0100 | [diff] [blame] | 113 | . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; |
| 114 | phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); |
Sam Ravnborg | 444e0ae | 2009-04-29 09:47:20 +0200 | [diff] [blame] | 115 | #else |
Ard Biesheuvel | 142b9e6 | 2016-03-18 10:04:37 +0100 | [diff] [blame] | 116 | . = __START_KERNEL; |
| 117 | phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); |
Sam Ravnborg | 444e0ae | 2009-04-29 09:47:20 +0200 | [diff] [blame] | 118 | #endif |
| 119 | |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 120 | /* Text and read-only data */ |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 121 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
Tim Abbott | 4ae59b9 | 2009-09-16 16:44:28 -0400 | [diff] [blame] | 122 | _text = .; |
Josh Poimboeuf | e728f61 | 2016-09-21 16:04:07 -0500 | [diff] [blame] | 123 | _stext = .; |
Tim Abbott | 4ae59b9 | 2009-09-16 16:44:28 -0400 | [diff] [blame] | 124 | /* bootstrapping code */ |
| 125 | HEAD_TEXT |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 126 | TEXT_TEXT |
| 127 | SCHED_TEXT |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 128 | CPUIDLE_TEXT |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 129 | LOCK_TEXT |
| 130 | KPROBES_TEXT |
Thomas Gleixner | 2f7412b | 2017-12-04 15:07:46 +0100 | [diff] [blame] | 131 | ALIGN_ENTRY_TEXT_BEGIN |
Jiri Olsa | ea71454 | 2011-03-07 19:10:39 +0100 | [diff] [blame] | 132 | ENTRY_TEXT |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 133 | IRQENTRY_TEXT |
Thomas Gleixner | 2f7412b | 2017-12-04 15:07:46 +0100 | [diff] [blame] | 134 | ALIGN_ENTRY_TEXT_END |
Alexander Potapenko | be7635e | 2016-03-25 14:22:05 -0700 | [diff] [blame] | 135 | SOFTIRQENTRY_TEXT |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 136 | *(.fixup) |
| 137 | *(.gnu.warning) |
Andy Lutomirski | 3386bc8 | 2017-12-04 15:07:25 +0100 | [diff] [blame] | 138 | |
Masami Hiramatsu | 736e80a | 2018-01-19 01:14:21 +0900 | [diff] [blame] | 139 | #ifdef CONFIG_RETPOLINE |
| 140 | __indirect_thunk_start = .; |
| 141 | *(.text.__x86.indirect_thunk) |
| 142 | __indirect_thunk_end = .; |
| 143 | #endif |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 144 | |
Ross Zwisler | 013c66e | 2019-07-01 09:52:08 -0600 | [diff] [blame] | 145 | /* End of text section */ |
| 146 | _etext = .; |
| 147 | } :text = 0x9090 |
Kees Cook | 392bef70 | 2019-04-23 11:38:27 -0700 | [diff] [blame] | 148 | |
Sam Ravnborg | dfc20895 | 2009-04-29 09:47:21 +0200 | [diff] [blame] | 149 | NOTES :text :note |
| 150 | |
Tim Abbott | 123f3e1 | 2009-09-16 16:44:30 -0400 | [diff] [blame] | 151 | EXCEPTION_TABLE(16) :text = 0x9090 |
Sam Ravnborg | 448bc3a | 2009-04-29 09:47:22 +0200 | [diff] [blame] | 152 | |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 153 | /* .text should occupy whole number of pages */ |
| 154 | . = ALIGN(PAGE_SIZE); |
Joerg Roedel | 39d668e | 2018-07-18 11:41:04 +0200 | [diff] [blame] | 155 | X86_ALIGN_RODATA_BEGIN |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 156 | RO_DATA(PAGE_SIZE) |
Joerg Roedel | 39d668e | 2018-07-18 11:41:04 +0200 | [diff] [blame] | 157 | X86_ALIGN_RODATA_END |
Sam Ravnborg | 448bc3a | 2009-04-29 09:47:22 +0200 | [diff] [blame] | 158 | |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 159 | /* Data */ |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 160 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
Catalin Marinas | 1260866 | 2009-05-11 13:22:00 +0100 | [diff] [blame] | 161 | /* Start of data section */ |
| 162 | _sdata = .; |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 163 | |
| 164 | /* init_task */ |
| 165 | INIT_TASK_DATA(THREAD_SIZE) |
| 166 | |
| 167 | #ifdef CONFIG_X86_32 |
| 168 | /* 32 bit has nosave before _edata */ |
| 169 | NOSAVE_DATA |
| 170 | #endif |
| 171 | |
| 172 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 173 | |
Jan Beulich | 350f8f5 | 2009-11-13 11:54:40 +0000 | [diff] [blame] | 174 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 175 | |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 176 | DATA_DATA |
| 177 | CONSTRUCTORS |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 178 | |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 179 | /* rarely changed data like cpu maps */ |
Jan Beulich | 350f8f5 | 2009-11-13 11:54:40 +0000 | [diff] [blame] | 180 | READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 181 | |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 182 | /* End of data section */ |
| 183 | _edata = .; |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 184 | } :data |
Sam Ravnborg | 1f6397b | 2009-04-29 09:47:23 +0200 | [diff] [blame] | 185 | |
Peter Zijlstra | b5effd3 | 2017-03-30 17:49:27 +0200 | [diff] [blame] | 186 | BUG_TABLE |
Sam Ravnborg | ff6f87e | 2009-04-29 09:47:24 +0200 | [diff] [blame] | 187 | |
Josh Poimboeuf | ee9f8fc | 2017-07-24 18:36:57 -0500 | [diff] [blame] | 188 | ORC_UNWIND_TABLE |
| 189 | |
Andy Lutomirski | 9c40818 | 2011-08-03 09:31:50 -0400 | [diff] [blame] | 190 | . = ALIGN(PAGE_SIZE); |
| 191 | __vvar_page = .; |
| 192 | |
| 193 | .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { |
Andy Lutomirski | f670bb7 | 2011-08-03 09:31:51 -0400 | [diff] [blame] | 194 | /* work around gold bug 13023 */ |
| 195 | __vvar_beginning_hack = .; |
Andy Lutomirski | 9c40818 | 2011-08-03 09:31:50 -0400 | [diff] [blame] | 196 | |
Andy Lutomirski | f670bb7 | 2011-08-03 09:31:51 -0400 | [diff] [blame] | 197 | /* Place all vvars at the offsets in asm/vvar.h. */ |
| 198 | #define EMIT_VVAR(name, offset) \ |
| 199 | . = __vvar_beginning_hack + offset; \ |
Andy Lutomirski | 9c40818 | 2011-08-03 09:31:50 -0400 | [diff] [blame] | 200 | *(.vvar_ ## name) |
| 201 | #define __VVAR_KERNEL_LDS |
| 202 | #include <asm/vvar.h> |
| 203 | #undef __VVAR_KERNEL_LDS |
| 204 | #undef EMIT_VVAR |
| 205 | |
Andy Lutomirski | 309944b | 2014-03-17 23:22:11 +0100 | [diff] [blame] | 206 | /* |
| 207 | * Pad the rest of the page with zeros. Otherwise the loader |
| 208 | * can leave garbage here. |
| 209 | */ |
| 210 | . = __vvar_beginning_hack + PAGE_SIZE; |
Andy Lutomirski | 9c40818 | 2011-08-03 09:31:50 -0400 | [diff] [blame] | 211 | } :data |
| 212 | |
Cao jin | a06cc94 | 2018-02-08 14:38:57 +0800 | [diff] [blame] | 213 | . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); |
Andy Lutomirski | 9c40818 | 2011-08-03 09:31:50 -0400 | [diff] [blame] | 214 | |
Sam Ravnborg | e58bdaa | 2009-04-29 09:47:25 +0200 | [diff] [blame] | 215 | /* Init code and data - will be freed after init */ |
| 216 | . = ALIGN(PAGE_SIZE); |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 217 | .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { |
Ingo Molnar | fd07319 | 2009-04-29 12:56:58 +0200 | [diff] [blame] | 218 | __init_begin = .; /* paired with __init_end */ |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) |
| 222 | /* |
| 223 | * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the |
| 224 | * output PHDR, so the next output section - .init.text - should |
| 225 | * start another segment - init. |
| 226 | */ |
Tejun Heo | 19df0c2 | 2011-01-25 14:26:50 +0100 | [diff] [blame] | 227 | PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) |
Jan Beulich | 97b67ae | 2014-11-04 08:50:48 +0000 | [diff] [blame] | 228 | ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, |
| 229 | "per-CPU data too large - increase CONFIG_PHYSICAL_START") |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 230 | #endif |
| 231 | |
Tim Abbott | 123f3e1 | 2009-09-16 16:44:30 -0400 | [diff] [blame] | 232 | INIT_TEXT_SECTION(PAGE_SIZE) |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 233 | #ifdef CONFIG_X86_64 |
| 234 | :init |
| 235 | #endif |
Sam Ravnborg | e58bdaa | 2009-04-29 09:47:25 +0200 | [diff] [blame] | 236 | |
Borislav Petkov | 337e4cc | 2016-01-26 22:12:07 +0100 | [diff] [blame] | 237 | /* |
| 238 | * Section for code used exclusively before alternatives are run. All |
| 239 | * references to such code must be patched out by alternatives, normally |
| 240 | * by using X86_FEATURE_ALWAYS CPU feature bit. |
| 241 | * |
| 242 | * See static_cpu_has() for an example. |
| 243 | */ |
| 244 | .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { |
| 245 | *(.altinstr_aux) |
| 246 | } |
| 247 | |
Tim Abbott | 123f3e1 | 2009-09-16 16:44:30 -0400 | [diff] [blame] | 248 | INIT_DATA_SECTION(16) |
Sam Ravnborg | e58bdaa | 2009-04-29 09:47:25 +0200 | [diff] [blame] | 249 | |
| 250 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
| 251 | __x86_cpu_dev_start = .; |
| 252 | *(.x86_cpu_dev.init) |
| 253 | __x86_cpu_dev_end = .; |
| 254 | } |
| 255 | |
David Cohen | 66ac501 | 2013-10-17 15:35:35 -0700 | [diff] [blame] | 256 | #ifdef CONFIG_X86_INTEL_MID |
| 257 | .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ |
| 258 | LOAD_OFFSET) { |
| 259 | __x86_intel_mid_dev_start = .; |
| 260 | *(.x86_intel_mid_dev.init) |
| 261 | __x86_intel_mid_dev_end = .; |
| 262 | } |
| 263 | #endif |
| 264 | |
Konrad Rzeszutek Wilk | 6f44d03 | 2010-08-27 14:19:33 -0400 | [diff] [blame] | 265 | /* |
| 266 | * start address and size of operations which during runtime |
| 267 | * can be patched with virtualization friendly instructions or |
| 268 | * baremetal native ones. Think page table operations. |
| 269 | * Details in paravirt_types.h |
| 270 | */ |
Sam Ravnborg | ae61836 | 2009-04-29 09:47:26 +0200 | [diff] [blame] | 271 | . = ALIGN(8); |
| 272 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { |
| 273 | __parainstructions = .; |
| 274 | *(.parainstructions) |
| 275 | __parainstructions_end = .; |
| 276 | } |
| 277 | |
Konrad Rzeszutek Wilk | 6f44d03 | 2010-08-27 14:19:33 -0400 | [diff] [blame] | 278 | /* |
| 279 | * struct alt_inst entries. From the header (alternative.h): |
| 280 | * "Alternative instructions for different CPU types or capabilities" |
| 281 | * Think locking instructions on spinlocks. |
| 282 | */ |
Sam Ravnborg | ae61836 | 2009-04-29 09:47:26 +0200 | [diff] [blame] | 283 | . = ALIGN(8); |
| 284 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
| 285 | __alt_instructions = .; |
| 286 | *(.altinstructions) |
| 287 | __alt_instructions_end = .; |
| 288 | } |
| 289 | |
Konrad Rzeszutek Wilk | 6f44d03 | 2010-08-27 14:19:33 -0400 | [diff] [blame] | 290 | /* |
| 291 | * And here are the replacement instructions. The linker sticks |
| 292 | * them as binary blobs. The .altinstructions has enough data to |
| 293 | * get the address and the length of them to patch the kernel safely. |
| 294 | */ |
Sam Ravnborg | ae61836 | 2009-04-29 09:47:26 +0200 | [diff] [blame] | 295 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
| 296 | *(.altinstr_replacement) |
| 297 | } |
| 298 | |
Konrad Rzeszutek Wilk | 6f44d03 | 2010-08-27 14:19:33 -0400 | [diff] [blame] | 299 | /* |
| 300 | * struct iommu_table_entry entries are injected in this section. |
| 301 | * It is an array of IOMMUs which during run time gets sorted depending |
| 302 | * on its dependency order. After rootfs_initcall is complete |
| 303 | * this section can be safely removed. |
| 304 | */ |
Konrad Rzeszutek Wilk | 0444ad9 | 2010-08-26 13:57:56 -0400 | [diff] [blame] | 305 | .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { |
| 306 | __iommu_table = .; |
| 307 | *(.iommu_table) |
Konrad Rzeszutek Wilk | 0444ad9 | 2010-08-26 13:57:56 -0400 | [diff] [blame] | 308 | __iommu_table_end = .; |
| 309 | } |
H. Peter Anvin | 4822b7f | 2011-02-14 15:34:57 -0800 | [diff] [blame] | 310 | |
Konrad Rzeszutek Wilk | 7ac41cc | 2010-08-30 14:10:02 -0400 | [diff] [blame] | 311 | . = ALIGN(8); |
Suresh Siddha | 107e0e0 | 2011-05-20 17:51:17 -0700 | [diff] [blame] | 312 | .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { |
| 313 | __apicdrivers = .; |
| 314 | *(.apicdrivers); |
| 315 | __apicdrivers_end = .; |
| 316 | } |
| 317 | |
| 318 | . = ALIGN(8); |
Sam Ravnborg | bf6a574 | 2009-04-29 09:47:27 +0200 | [diff] [blame] | 319 | /* |
| 320 | * .exit.text is discard at runtime, not link time, to deal with |
| 321 | * references from .altinstructions and .eh_frame |
| 322 | */ |
| 323 | .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { |
| 324 | EXIT_TEXT |
| 325 | } |
| 326 | |
| 327 | .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { |
| 328 | EXIT_DATA |
| 329 | } |
| 330 | |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 331 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
Tejun Heo | 0415b00d1 | 2011-03-24 18:50:09 +0100 | [diff] [blame] | 332 | PERCPU_SECTION(INTERNODE_CACHE_BYTES) |
Sam Ravnborg | 9d16e78 | 2009-04-29 09:47:28 +0200 | [diff] [blame] | 333 | #endif |
| 334 | |
| 335 | . = ALIGN(PAGE_SIZE); |
Ingo Molnar | fd07319 | 2009-04-29 12:56:58 +0200 | [diff] [blame] | 336 | |
Sam Ravnborg | 9d16e78 | 2009-04-29 09:47:28 +0200 | [diff] [blame] | 337 | /* freed after init ends here */ |
Ingo Molnar | fd07319 | 2009-04-29 12:56:58 +0200 | [diff] [blame] | 338 | .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { |
| 339 | __init_end = .; |
| 340 | } |
Sam Ravnborg | 9d16e78 | 2009-04-29 09:47:28 +0200 | [diff] [blame] | 341 | |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 342 | /* |
| 343 | * smp_locks might be freed after init |
| 344 | * start/end must be page aligned |
| 345 | */ |
| 346 | . = ALIGN(PAGE_SIZE); |
| 347 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
| 348 | __smp_locks = .; |
| 349 | *(.smp_locks) |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 350 | . = ALIGN(PAGE_SIZE); |
Yinghai Lu | 596b711 | 2010-03-28 19:42:54 -0700 | [diff] [blame] | 351 | __smp_locks_end = .; |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 352 | } |
| 353 | |
Sam Ravnborg | 9d16e78 | 2009-04-29 09:47:28 +0200 | [diff] [blame] | 354 | #ifdef CONFIG_X86_64 |
| 355 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { |
Jan Beulich | c62e432 | 2009-08-25 14:50:53 +0100 | [diff] [blame] | 356 | NOSAVE_DATA |
| 357 | } |
Sam Ravnborg | 9d16e78 | 2009-04-29 09:47:28 +0200 | [diff] [blame] | 358 | #endif |
| 359 | |
Sam Ravnborg | 091e52c | 2009-04-29 09:47:29 +0200 | [diff] [blame] | 360 | /* BSS */ |
| 361 | . = ALIGN(PAGE_SIZE); |
| 362 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { |
| 363 | __bss_start = .; |
Tim Abbott | 7c74df0 | 2010-02-20 01:03:38 +0100 | [diff] [blame] | 364 | *(.bss..page_aligned) |
Sami Tolvanen | 6a03469 | 2019-04-15 09:49:56 -0700 | [diff] [blame] | 365 | *(BSS_MAIN) |
Brijesh Singh | b3f0907 | 2018-09-14 08:45:58 -0500 | [diff] [blame] | 366 | BSS_DECRYPTED |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 367 | . = ALIGN(PAGE_SIZE); |
Sam Ravnborg | 091e52c | 2009-04-29 09:47:29 +0200 | [diff] [blame] | 368 | __bss_stop = .; |
| 369 | } |
Sam Ravnborg | 9d16e78 | 2009-04-29 09:47:28 +0200 | [diff] [blame] | 370 | |
Thomas Lendacky | c603a30 | 2019-06-19 18:40:57 +0000 | [diff] [blame] | 371 | /* |
| 372 | * The memory occupied from _text to here, __end_of_kernel_reserve, is |
| 373 | * automatically reserved in setup_arch(). Anything after here must be |
| 374 | * explicitly reserved using memblock_reserve() or it will be discarded |
| 375 | * and treated as available memory. |
| 376 | */ |
| 377 | __end_of_kernel_reserve = .; |
| 378 | |
Sam Ravnborg | 091e52c | 2009-04-29 09:47:29 +0200 | [diff] [blame] | 379 | . = ALIGN(PAGE_SIZE); |
| 380 | .brk : AT(ADDR(.brk) - LOAD_OFFSET) { |
| 381 | __brk_base = .; |
| 382 | . += 64 * 1024; /* 64k alignment slop space */ |
| 383 | *(.brk_reservation) /* areas brk users have reserved */ |
| 384 | __brk_limit = .; |
| 385 | } |
| 386 | |
Yinghai Lu | 974f221 | 2016-04-28 17:09:04 -0700 | [diff] [blame] | 387 | . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ |
H. Peter Anvin | 873b527 | 2009-12-14 13:55:20 -0800 | [diff] [blame] | 388 | _end = .; |
Sam Ravnborg | 091e52c | 2009-04-29 09:47:29 +0200 | [diff] [blame] | 389 | |
Thomas Lendacky | e1bfa87 | 2019-06-19 18:40:59 +0000 | [diff] [blame] | 390 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
| 391 | /* |
| 392 | * Early scratch/workarea section: Lives outside of the kernel proper |
| 393 | * (_text - _end). |
| 394 | * |
| 395 | * Resides after _end because even though the .brk section is after |
| 396 | * __end_of_kernel_reserve, the .brk section is later reserved as a |
| 397 | * part of the kernel. Since it is located after __end_of_kernel_reserve |
| 398 | * it will be discarded and become part of the available memory. As |
| 399 | * such, it can only be used by very early boot code and must not be |
| 400 | * needed afterwards. |
| 401 | * |
| 402 | * Currently used by SME for performing in-place encryption of the |
| 403 | * kernel during boot. Resides on a 2MB boundary to simplify the |
| 404 | * pagetable setup used for SME in-place encryption. |
| 405 | */ |
| 406 | . = ALIGN(HPAGE_SIZE); |
| 407 | .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { |
| 408 | __init_scratch_begin = .; |
| 409 | *(.init.scratch) |
| 410 | . = ALIGN(HPAGE_SIZE); |
| 411 | __init_scratch_end = .; |
| 412 | } |
| 413 | #endif |
| 414 | |
Cao jin | a06cc94 | 2018-02-08 14:38:57 +0800 | [diff] [blame] | 415 | STABS_DEBUG |
| 416 | DWARF_DEBUG |
Tejun Heo | 023bf6f | 2009-07-09 11:27:40 +0900 | [diff] [blame] | 417 | |
Tejun Heo | 023bf6f | 2009-07-09 11:27:40 +0900 | [diff] [blame] | 418 | DISCARDS |
Josh Poimboeuf | 9a99417 | 2016-02-28 22:22:35 -0600 | [diff] [blame] | 419 | /DISCARD/ : { |
| 420 | *(.eh_frame) |
Josh Poimboeuf | 9a99417 | 2016-02-28 22:22:35 -0600 | [diff] [blame] | 421 | } |
Sam Ravnborg | 444e0ae | 2009-04-29 09:47:20 +0200 | [diff] [blame] | 422 | } |
| 423 | |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 424 | |
| 425 | #ifdef CONFIG_X86_32 |
Ingo Molnar | a5912f6 | 2009-10-16 07:18:46 +0200 | [diff] [blame] | 426 | /* |
| 427 | * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: |
| 428 | */ |
H. Peter Anvin | d2ba8b2 | 2009-08-03 14:44:54 -0700 | [diff] [blame] | 429 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), |
| 430 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 431 | #else |
| 432 | /* |
| 433 | * Per-cpu symbols which need to be offset from __per_cpu_load |
| 434 | * for the boot processor. |
| 435 | */ |
Rafael Ávila de Espíndola | d071ae0 | 2018-12-19 11:01:43 -0800 | [diff] [blame] | 436 | #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 437 | INIT_PER_CPU(gdt_page); |
Andy Lutomirski | e6401c1 | 2019-04-14 18:00:06 +0200 | [diff] [blame] | 438 | INIT_PER_CPU(fixed_percpu_data); |
| 439 | INIT_PER_CPU(irq_stack_backing_store); |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 440 | |
| 441 | /* |
| 442 | * Build-time check on the image size: |
| 443 | */ |
H. Peter Anvin | d2ba8b2 | 2009-08-03 14:44:54 -0700 | [diff] [blame] | 444 | . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), |
| 445 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 446 | |
| 447 | #ifdef CONFIG_SMP |
Andy Lutomirski | e6401c1 | 2019-04-14 18:00:06 +0200 | [diff] [blame] | 448 | . = ASSERT((fixed_percpu_data == 0), |
| 449 | "fixed_percpu_data is not at start of per-cpu area"); |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 450 | #endif |
| 451 | |
| 452 | #endif /* CONFIG_X86_32 */ |
| 453 | |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 454 | #ifdef CONFIG_KEXEC_CORE |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 455 | #include <asm/kexec.h> |
| 456 | |
H. Peter Anvin | d2ba8b2 | 2009-08-03 14:44:54 -0700 | [diff] [blame] | 457 | . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, |
| 458 | "kexec control code size is too big"); |
Sam Ravnborg | 17ce265 | 2009-04-29 09:47:18 +0200 | [diff] [blame] | 459 | #endif |
| 460 | |