Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 2 | /* |
| 3 | * This file implements KASLR memory randomization for x86_64. It randomizes |
| 4 | * the virtual address space of kernel memory regions (physical memory |
| 5 | * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates |
| 6 | * exploits relying on predictable kernel addresses. |
| 7 | * |
| 8 | * Entropy is generated using the KASLR early boot functions now shared in |
| 9 | * the lib directory (originally written by Kees Cook). Randomization is |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame] | 10 | * done on PGD & P4D/PUD page table levels to increase possible addresses. |
| 11 | * The physical memory mapping code was adapted to support P4D/PUD level |
| 12 | * virtual addresses. This implementation on the best configuration provides |
| 13 | * 30,000 possible virtual addresses in average for each memory region. |
| 14 | * An additional low memory page is used to ensure each CPU can start with |
| 15 | * a PGD aligned virtual address (for realmode). |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 16 | * |
| 17 | * The order of each memory region is not changed. The feature looks at |
| 18 | * the available space for the regions based on different configuration |
| 19 | * options and randomizes the base and space between each. The size of the |
| 20 | * physical memory mapping is the available physical memory. |
| 21 | */ |
| 22 | |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/random.h> |
| 26 | |
| 27 | #include <asm/pgalloc.h> |
| 28 | #include <asm/pgtable.h> |
| 29 | #include <asm/setup.h> |
| 30 | #include <asm/kaslr.h> |
| 31 | |
| 32 | #include "mm_internal.h" |
| 33 | |
| 34 | #define TB_SHIFT 40 |
| 35 | |
| 36 | /* |
Thomas Gleixner | 1dddd25 | 2018-01-04 12:32:03 +0100 | [diff] [blame] | 37 | * The end address could depend on more configuration options to make the |
| 38 | * highest amount of space for randomization available, but that's too hard |
| 39 | * to keep straight and caused issues already. |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 40 | */ |
Thomas Gleixner | 1dddd25 | 2018-01-04 12:32:03 +0100 | [diff] [blame] | 41 | static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 42 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 43 | /* |
| 44 | * Memory regions randomized by KASLR (except modules that use a separate logic |
| 45 | * earlier during boot). The list is ordered based on virtual addresses. This |
| 46 | * order is kept after randomization. |
| 47 | */ |
| 48 | static __initdata struct kaslr_memory_region { |
| 49 | unsigned long *base; |
| 50 | unsigned long size_tb; |
| 51 | } kaslr_regions[] = { |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 52 | { &page_offset_base, 0 }, |
Kirill A. Shutemov | a741254 | 2018-02-14 21:25:37 +0300 | [diff] [blame] | 53 | { &vmalloc_base, 0 }, |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 54 | { &vmemmap_base, 1 }, |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 55 | }; |
| 56 | |
| 57 | /* Get size in bytes used by the memory region */ |
| 58 | static inline unsigned long get_padding(struct kaslr_memory_region *region) |
| 59 | { |
| 60 | return (region->size_tb << TB_SHIFT); |
| 61 | } |
| 62 | |
| 63 | /* |
| 64 | * Apply no randomization if KASLR was disabled at boot or if KASAN |
| 65 | * is enabled. KASAN shadow mappings rely on regions being PGD aligned. |
| 66 | */ |
| 67 | static inline bool kaslr_memory_enabled(void) |
| 68 | { |
Masahiro Yamada | a5ff1b3 | 2016-08-25 15:17:02 -0700 | [diff] [blame] | 69 | return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | /* Initialize base and padding for each memory region randomized with KASLR */ |
| 73 | void __init kernel_randomize_memory(void) |
| 74 | { |
| 75 | size_t i; |
Kirill A. Shutemov | 4fa5662 | 2018-02-14 21:25:36 +0300 | [diff] [blame] | 76 | unsigned long vaddr_start, vaddr; |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 77 | unsigned long rand, memory_tb; |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 78 | struct rnd_state rand_state; |
| 79 | unsigned long remain_entropy; |
| 80 | |
Kirill A. Shutemov | ed7588d | 2018-05-18 13:35:24 +0300 | [diff] [blame] | 81 | vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; |
Kirill A. Shutemov | 4fa5662 | 2018-02-14 21:25:36 +0300 | [diff] [blame] | 82 | vaddr = vaddr_start; |
| 83 | |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 84 | /* |
Thomas Gleixner | 1dddd25 | 2018-01-04 12:32:03 +0100 | [diff] [blame] | 85 | * These BUILD_BUG_ON checks ensure the memory layout is consistent |
| 86 | * with the vaddr_start/vaddr_end variables. These checks are very |
| 87 | * limited.... |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 88 | */ |
| 89 | BUILD_BUG_ON(vaddr_start >= vaddr_end); |
Thomas Gleixner | 1dddd25 | 2018-01-04 12:32:03 +0100 | [diff] [blame] | 90 | BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 91 | BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); |
| 92 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 93 | if (!kaslr_memory_enabled()) |
| 94 | return; |
| 95 | |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 96 | kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT); |
Kirill A. Shutemov | a741254 | 2018-02-14 21:25:37 +0300 | [diff] [blame] | 97 | kaslr_regions[1].size_tb = VMALLOC_SIZE_TB; |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 98 | |
Thomas Garnier | 90397a4 | 2016-06-21 17:47:06 -0700 | [diff] [blame] | 99 | /* |
| 100 | * Update Physical memory mapping to available and |
| 101 | * add padding if needed (especially for memory hotplug support). |
| 102 | */ |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 103 | BUG_ON(kaslr_regions[0].base != &page_offset_base); |
Thomas Garnier | c7d2361 | 2016-08-09 10:11:04 -0700 | [diff] [blame] | 104 | memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + |
Thomas Garnier | 90397a4 | 2016-06-21 17:47:06 -0700 | [diff] [blame] | 105 | CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 106 | |
| 107 | /* Adapt phyiscal memory region size based on available memory */ |
| 108 | if (memory_tb < kaslr_regions[0].size_tb) |
| 109 | kaslr_regions[0].size_tb = memory_tb; |
| 110 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 111 | /* Calculate entropy available between regions */ |
| 112 | remain_entropy = vaddr_end - vaddr_start; |
| 113 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) |
| 114 | remain_entropy -= get_padding(&kaslr_regions[i]); |
| 115 | |
| 116 | prandom_seed_state(&rand_state, kaslr_get_random_long("Memory")); |
| 117 | |
| 118 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) { |
| 119 | unsigned long entropy; |
| 120 | |
| 121 | /* |
| 122 | * Select a random virtual address using the extra entropy |
| 123 | * available. |
| 124 | */ |
| 125 | entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); |
| 126 | prandom_bytes_state(&rand_state, &rand, sizeof(rand)); |
Kirill A. Shutemov | ed7588d | 2018-05-18 13:35:24 +0300 | [diff] [blame] | 127 | if (pgtable_l5_enabled()) |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame] | 128 | entropy = (rand % (entropy + 1)) & P4D_MASK; |
| 129 | else |
| 130 | entropy = (rand % (entropy + 1)) & PUD_MASK; |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 131 | vaddr += entropy; |
| 132 | *kaslr_regions[i].base = vaddr; |
| 133 | |
| 134 | /* |
| 135 | * Jump the region and add a minimum padding based on |
| 136 | * randomization alignment. |
| 137 | */ |
| 138 | vaddr += get_padding(&kaslr_regions[i]); |
Kirill A. Shutemov | ed7588d | 2018-05-18 13:35:24 +0300 | [diff] [blame] | 139 | if (pgtable_l5_enabled()) |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame] | 140 | vaddr = round_up(vaddr + 1, P4D_SIZE); |
| 141 | else |
| 142 | vaddr = round_up(vaddr + 1, PUD_SIZE); |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 143 | remain_entropy -= entropy; |
| 144 | } |
| 145 | } |
| 146 | |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame] | 147 | static void __meminit init_trampoline_pud(void) |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 148 | { |
| 149 | unsigned long paddr, paddr_next; |
| 150 | pgd_t *pgd; |
| 151 | pud_t *pud_page, *pud_page_tramp; |
| 152 | int i; |
| 153 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 154 | pud_page_tramp = alloc_low_page(); |
| 155 | |
| 156 | paddr = 0; |
| 157 | pgd = pgd_offset_k((unsigned long)__va(paddr)); |
| 158 | pud_page = (pud_t *) pgd_page_vaddr(*pgd); |
| 159 | |
| 160 | for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
| 161 | pud_t *pud, *pud_tramp; |
| 162 | unsigned long vaddr = (unsigned long)__va(paddr); |
| 163 | |
| 164 | pud_tramp = pud_page_tramp + pud_index(paddr); |
| 165 | pud = pud_page + pud_index(vaddr); |
| 166 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
| 167 | |
| 168 | *pud_tramp = *pud; |
| 169 | } |
| 170 | |
| 171 | set_pgd(&trampoline_pgd_entry, |
| 172 | __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); |
| 173 | } |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame] | 174 | |
| 175 | static void __meminit init_trampoline_p4d(void) |
| 176 | { |
| 177 | unsigned long paddr, paddr_next; |
| 178 | pgd_t *pgd; |
| 179 | p4d_t *p4d_page, *p4d_page_tramp; |
| 180 | int i; |
| 181 | |
| 182 | p4d_page_tramp = alloc_low_page(); |
| 183 | |
| 184 | paddr = 0; |
| 185 | pgd = pgd_offset_k((unsigned long)__va(paddr)); |
| 186 | p4d_page = (p4d_t *) pgd_page_vaddr(*pgd); |
| 187 | |
| 188 | for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) { |
| 189 | p4d_t *p4d, *p4d_tramp; |
| 190 | unsigned long vaddr = (unsigned long)__va(paddr); |
| 191 | |
| 192 | p4d_tramp = p4d_page_tramp + p4d_index(paddr); |
| 193 | p4d = p4d_page + p4d_index(vaddr); |
| 194 | paddr_next = (paddr & P4D_MASK) + P4D_SIZE; |
| 195 | |
| 196 | *p4d_tramp = *p4d; |
| 197 | } |
| 198 | |
| 199 | set_pgd(&trampoline_pgd_entry, |
| 200 | __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp))); |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Create PGD aligned trampoline table to allow real mode initialization |
| 205 | * of additional CPUs. Consume only 1 low memory page. |
| 206 | */ |
| 207 | void __meminit init_trampoline(void) |
| 208 | { |
| 209 | |
| 210 | if (!kaslr_memory_enabled()) { |
| 211 | init_trampoline_default(); |
| 212 | return; |
| 213 | } |
| 214 | |
Kirill A. Shutemov | ed7588d | 2018-05-18 13:35:24 +0300 | [diff] [blame] | 215 | if (pgtable_l5_enabled()) |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame] | 216 | init_trampoline_p4d(); |
| 217 | else |
| 218 | init_trampoline_pud(); |
| 219 | } |