blob: 557f0fe25dff47641f03a86e5166a03f19627901 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Thomas Garnier0483e1f2016-06-21 17:47:02 -07002/*
3 * This file implements KASLR memory randomization for x86_64. It randomizes
4 * the virtual address space of kernel memory regions (physical memory
5 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
6 * exploits relying on predictable kernel addresses.
7 *
8 * Entropy is generated using the KASLR early boot functions now shared in
9 * the lib directory (originally written by Kees Cook). Randomization is
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +030010 * done on PGD & P4D/PUD page table levels to increase possible addresses.
11 * The physical memory mapping code was adapted to support P4D/PUD level
12 * virtual addresses. This implementation on the best configuration provides
13 * 30,000 possible virtual addresses in average for each memory region.
14 * An additional low memory page is used to ensure each CPU can start with
15 * a PGD aligned virtual address (for realmode).
Thomas Garnier0483e1f2016-06-21 17:47:02 -070016 *
17 * The order of each memory region is not changed. The feature looks at
18 * the available space for the regions based on different configuration
19 * options and randomizes the base and space between each. The size of the
20 * physical memory mapping is the available physical memory.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/random.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070026#include <linux/memblock.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070027#include <linux/pgtable.h>
Thomas Garnier0483e1f2016-06-21 17:47:02 -070028
Thomas Garnier0483e1f2016-06-21 17:47:02 -070029#include <asm/setup.h>
30#include <asm/kaslr.h>
31
32#include "mm_internal.h"
33
34#define TB_SHIFT 40
35
36/*
Thomas Gleixner1dddd252018-01-04 12:32:03 +010037 * The end address could depend on more configuration options to make the
38 * highest amount of space for randomization available, but that's too hard
39 * to keep straight and caused issues already.
Thomas Garnier0483e1f2016-06-21 17:47:02 -070040 */
Thomas Gleixner1dddd252018-01-04 12:32:03 +010041static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
Thomas Garnier021182e2016-06-21 17:47:03 -070042
Thomas Garnier0483e1f2016-06-21 17:47:02 -070043/*
44 * Memory regions randomized by KASLR (except modules that use a separate logic
45 * earlier during boot). The list is ordered based on virtual addresses. This
46 * order is kept after randomization.
47 */
48static __initdata struct kaslr_memory_region {
49 unsigned long *base;
50 unsigned long size_tb;
51} kaslr_regions[] = {
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +030052 { &page_offset_base, 0 },
Kirill A. Shutemova7412542018-02-14 21:25:37 +030053 { &vmalloc_base, 0 },
Baoquan He00e5a2b2019-05-23 10:57:44 +080054 { &vmemmap_base, 0 },
Thomas Garnier0483e1f2016-06-21 17:47:02 -070055};
56
57/* Get size in bytes used by the memory region */
58static inline unsigned long get_padding(struct kaslr_memory_region *region)
59{
60 return (region->size_tb << TB_SHIFT);
61}
62
Thomas Garnier0483e1f2016-06-21 17:47:02 -070063/* Initialize base and padding for each memory region randomized with KASLR */
64void __init kernel_randomize_memory(void)
65{
66 size_t i;
Kirill A. Shutemov4fa56622018-02-14 21:25:36 +030067 unsigned long vaddr_start, vaddr;
Thomas Garnier021182e2016-06-21 17:47:03 -070068 unsigned long rand, memory_tb;
Thomas Garnier0483e1f2016-06-21 17:47:02 -070069 struct rnd_state rand_state;
70 unsigned long remain_entropy;
Baoquan He00e5a2b2019-05-23 10:57:44 +080071 unsigned long vmemmap_size;
Thomas Garnier0483e1f2016-06-21 17:47:02 -070072
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +030073 vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
Kirill A. Shutemov4fa56622018-02-14 21:25:36 +030074 vaddr = vaddr_start;
75
Thomas Garnier25dfe472016-07-27 08:59:56 -070076 /*
Thomas Gleixner1dddd252018-01-04 12:32:03 +010077 * These BUILD_BUG_ON checks ensure the memory layout is consistent
78 * with the vaddr_start/vaddr_end variables. These checks are very
79 * limited....
Thomas Garnier25dfe472016-07-27 08:59:56 -070080 */
81 BUILD_BUG_ON(vaddr_start >= vaddr_end);
Thomas Gleixner1dddd252018-01-04 12:32:03 +010082 BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
Thomas Garnier25dfe472016-07-27 08:59:56 -070083 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
84
Thomas Garnier0483e1f2016-06-21 17:47:02 -070085 if (!kaslr_memory_enabled())
86 return;
87
Baoquan Heec393712019-04-04 10:03:13 +080088 kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
Kirill A. Shutemova7412542018-02-14 21:25:37 +030089 kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +030090
Thomas Garnier90397a42016-06-21 17:47:06 -070091 /*
92 * Update Physical memory mapping to available and
93 * add padding if needed (especially for memory hotplug support).
94 */
Thomas Garnier021182e2016-06-21 17:47:03 -070095 BUG_ON(kaslr_regions[0].base != &page_offset_base);
Thomas Garnierc7d23612016-08-09 10:11:04 -070096 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
Thomas Garnier90397a42016-06-21 17:47:06 -070097 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
Thomas Garnier021182e2016-06-21 17:47:03 -070098
Ingo Molnard9f6e122021-03-18 15:28:01 +010099 /* Adapt physical memory region size based on available memory */
Thomas Garnier021182e2016-06-21 17:47:03 -0700100 if (memory_tb < kaslr_regions[0].size_tb)
101 kaslr_regions[0].size_tb = memory_tb;
102
Baoquan He00e5a2b2019-05-23 10:57:44 +0800103 /*
104 * Calculate the vmemmap region size in TBs, aligned to a TB
105 * boundary.
106 */
107 vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
108 sizeof(struct page);
109 kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
110
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700111 /* Calculate entropy available between regions */
112 remain_entropy = vaddr_end - vaddr_start;
113 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
114 remain_entropy -= get_padding(&kaslr_regions[i]);
115
116 prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
117
118 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
119 unsigned long entropy;
120
121 /*
122 * Select a random virtual address using the extra entropy
123 * available.
124 */
125 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
126 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
Baoquan Heb569c1842019-03-08 10:56:16 +0800127 entropy = (rand % (entropy + 1)) & PUD_MASK;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700128 vaddr += entropy;
129 *kaslr_regions[i].base = vaddr;
130
131 /*
132 * Jump the region and add a minimum padding based on
133 * randomization alignment.
134 */
135 vaddr += get_padding(&kaslr_regions[i]);
Baoquan Heb569c1842019-03-08 10:56:16 +0800136 vaddr = round_up(vaddr + 1, PUD_SIZE);
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700137 remain_entropy -= entropy;
138 }
139}
140
Mike Rapoport88107d32020-06-08 21:33:01 -0700141void __meminit init_trampoline_kaslr(void)
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700142{
Baoquan He0925dda2019-03-08 10:56:15 +0800143 pud_t *pud_page_tramp, *pud, *pud_tramp;
144 p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
145 unsigned long paddr, vaddr;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700146 pgd_t *pgd;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700147
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700148 pud_page_tramp = alloc_low_page();
149
Baoquan He0925dda2019-03-08 10:56:15 +0800150 /*
151 * There are two mappings for the low 1MB area, the direct mapping
152 * and the 1:1 mapping for the real mode trampoline:
153 *
154 * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
155 * 1:1 mapping: virt_addr = phys_addr
156 */
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700157 paddr = 0;
Baoquan He0925dda2019-03-08 10:56:15 +0800158 vaddr = (unsigned long)__va(paddr);
159 pgd = pgd_offset_k(vaddr);
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700160
Baoquan He0925dda2019-03-08 10:56:15 +0800161 p4d = p4d_offset(pgd, vaddr);
162 pud = pud_offset(p4d, vaddr);
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700163
Baoquan He0925dda2019-03-08 10:56:15 +0800164 pud_tramp = pud_page_tramp + pud_index(paddr);
165 *pud_tramp = *pud;
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700166
Baoquan He0925dda2019-03-08 10:56:15 +0800167 if (pgtable_l5_enabled()) {
168 p4d_page_tramp = alloc_low_page();
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300169
170 p4d_tramp = p4d_page_tramp + p4d_index(paddr);
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300171
Baoquan He0925dda2019-03-08 10:56:15 +0800172 set_p4d(p4d_tramp,
173 __p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
174
175 set_pgd(&trampoline_pgd_entry,
176 __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
177 } else {
178 set_pgd(&trampoline_pgd_entry,
179 __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300180 }
Kirill A. Shutemov8624c1f2017-06-06 14:31:31 +0300181}