Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright IBM Corp. 2019 |
| 4 | */ |
Mike Rapoport | ca5999f | 2020-06-08 21:32:38 -0700 | [diff] [blame] | 5 | #include <linux/pgtable.h> |
Mike Rapoport | 65fddcf | 2020-06-08 21:32:42 -0700 | [diff] [blame] | 6 | #include <asm/mem_detect.h> |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 7 | #include <asm/cpacf.h> |
| 8 | #include <asm/timex.h> |
| 9 | #include <asm/sclp.h> |
Vasily Gorbik | 0c4ec02 | 2020-10-05 13:53:36 +0200 | [diff] [blame] | 10 | #include <asm/kasan.h> |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 11 | #include "compressed/decompressor.h" |
Vasily Gorbik | 868202c | 2019-07-17 19:38:42 +0200 | [diff] [blame] | 12 | #include "boot.h" |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 13 | |
| 14 | #define PRNG_MODE_TDES 1 |
| 15 | #define PRNG_MODE_SHA512 2 |
| 16 | #define PRNG_MODE_TRNG 3 |
| 17 | |
| 18 | struct prno_parm { |
| 19 | u32 res; |
| 20 | u32 reseed_counter; |
| 21 | u64 stream_bytes; |
| 22 | u8 V[112]; |
| 23 | u8 C[112]; |
| 24 | }; |
| 25 | |
| 26 | struct prng_parm { |
| 27 | u8 parm_block[32]; |
| 28 | u32 reseed_counter; |
| 29 | u64 byte_counter; |
| 30 | }; |
| 31 | |
| 32 | static int check_prng(void) |
| 33 | { |
| 34 | if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) { |
| 35 | sclp_early_printk("KASLR disabled: CPU has no PRNG\n"); |
| 36 | return 0; |
| 37 | } |
| 38 | if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) |
| 39 | return PRNG_MODE_TRNG; |
| 40 | if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) |
| 41 | return PRNG_MODE_SHA512; |
| 42 | else |
| 43 | return PRNG_MODE_TDES; |
| 44 | } |
| 45 | |
Vasily Gorbik | 07a699b | 2020-09-23 09:37:43 +0200 | [diff] [blame] | 46 | static int get_random(unsigned long limit, unsigned long *value) |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 47 | { |
| 48 | struct prng_parm prng = { |
| 49 | /* initial parameter block for tdes mode, copied from libica */ |
| 50 | .parm_block = { |
| 51 | 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52, |
| 52 | 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4, |
| 53 | 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF, |
| 54 | 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 |
| 55 | }, |
| 56 | }; |
| 57 | unsigned long seed, random; |
| 58 | struct prno_parm prno; |
| 59 | __u64 entropy[4]; |
| 60 | int mode, i; |
| 61 | |
| 62 | mode = check_prng(); |
| 63 | seed = get_tod_clock_fast(); |
| 64 | switch (mode) { |
| 65 | case PRNG_MODE_TRNG: |
| 66 | cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random)); |
| 67 | break; |
| 68 | case PRNG_MODE_SHA512: |
| 69 | cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0, |
| 70 | (u8 *) &seed, sizeof(seed)); |
| 71 | cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random, |
| 72 | sizeof(random), NULL, 0); |
| 73 | break; |
| 74 | case PRNG_MODE_TDES: |
| 75 | /* add entropy */ |
| 76 | *(unsigned long *) prng.parm_block ^= seed; |
| 77 | for (i = 0; i < 16; i++) { |
| 78 | cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, |
Nathan Chancellor | 788d6715 | 2020-02-08 07:10:52 -0700 | [diff] [blame] | 79 | (u8 *) entropy, (u8 *) entropy, |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 80 | sizeof(entropy)); |
| 81 | memcpy(prng.parm_block, entropy, sizeof(entropy)); |
| 82 | } |
| 83 | random = seed; |
| 84 | cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random, |
| 85 | (u8 *) &random, sizeof(random)); |
| 86 | break; |
| 87 | default: |
Vasily Gorbik | 07a699b | 2020-09-23 09:37:43 +0200 | [diff] [blame] | 88 | return -1; |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 89 | } |
Vasily Gorbik | 07a699b | 2020-09-23 09:37:43 +0200 | [diff] [blame] | 90 | *value = random % limit; |
| 91 | return 0; |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 92 | } |
| 93 | |
Vasily Gorbik | 5c46f27 | 2020-09-18 16:02:45 +0200 | [diff] [blame] | 94 | /* |
| 95 | * To randomize kernel base address we have to consider several facts: |
| 96 | * 1. physical online memory might not be continuous and have holes. mem_detect |
| 97 | * info contains list of online memory ranges we should consider. |
| 98 | * 2. we have several memory regions which are occupied and we should not |
| 99 | * overlap and destroy them. Currently safe_addr tells us the border below |
| 100 | * which all those occupied regions are. We are safe to use anything above |
| 101 | * safe_addr. |
| 102 | * 3. the upper limit might apply as well, even if memory above that limit is |
| 103 | * online. Currently those limitations are: |
| 104 | * 3.1. Limit set by "mem=" kernel command line option |
| 105 | * 3.2. memory reserved at the end for kasan initialization. |
| 106 | * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size). |
| 107 | * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages |
| 108 | * (16 pages when the kernel is built with kasan enabled) |
| 109 | * Assumptions: |
| 110 | * 1. kernel size (including .bss size) and upper memory limit are page aligned. |
| 111 | * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE |
| 112 | * aligned (in practice memory configurations granularity on z/VM and LPAR |
| 113 | * is 1mb). |
| 114 | * |
| 115 | * To guarantee uniform distribution of kernel base address among all suitable |
| 116 | * addresses we generate random value just once. For that we need to build a |
| 117 | * continuous range in which every value would be suitable. We can build this |
| 118 | * range by simply counting all suitable addresses (let's call them positions) |
| 119 | * which would be valid as kernel base address. To count positions we iterate |
| 120 | * over online memory ranges. For each range which is big enough for the |
| 121 | * kernel image we count all suitable addresses we can put the kernel image at |
| 122 | * that is |
| 123 | * (end - start - kernel_size) / THREAD_SIZE + 1 |
| 124 | * Two functions count_valid_kernel_positions and position_to_address help |
| 125 | * to count positions in memory range given and then convert position back |
| 126 | * to address. |
| 127 | */ |
| 128 | static unsigned long count_valid_kernel_positions(unsigned long kernel_size, |
| 129 | unsigned long _min, |
| 130 | unsigned long _max) |
| 131 | { |
| 132 | unsigned long start, end, pos = 0; |
| 133 | int i; |
| 134 | |
| 135 | for_each_mem_detect_block(i, &start, &end) { |
| 136 | if (_min >= end) |
| 137 | continue; |
| 138 | if (start >= _max) |
| 139 | break; |
| 140 | start = max(_min, start); |
| 141 | end = min(_max, end); |
| 142 | if (end - start < kernel_size) |
| 143 | continue; |
| 144 | pos += (end - start - kernel_size) / THREAD_SIZE + 1; |
| 145 | } |
| 146 | |
| 147 | return pos; |
| 148 | } |
| 149 | |
| 150 | static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size, |
| 151 | unsigned long _min, unsigned long _max) |
| 152 | { |
| 153 | unsigned long start, end; |
| 154 | int i; |
| 155 | |
| 156 | for_each_mem_detect_block(i, &start, &end) { |
| 157 | if (_min >= end) |
| 158 | continue; |
| 159 | if (start >= _max) |
| 160 | break; |
| 161 | start = max(_min, start); |
| 162 | end = min(_max, end); |
| 163 | if (end - start < kernel_size) |
| 164 | continue; |
| 165 | if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos) |
| 166 | return start + (pos - 1) * THREAD_SIZE; |
| 167 | pos -= (end - start - kernel_size) / THREAD_SIZE + 1; |
| 168 | } |
| 169 | |
| 170 | return 0; |
| 171 | } |
| 172 | |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 173 | unsigned long get_random_base(unsigned long safe_addr) |
| 174 | { |
Vasily Gorbik | 5c46f27 | 2020-09-18 16:02:45 +0200 | [diff] [blame] | 175 | unsigned long memory_limit = get_mem_detect_end(); |
| 176 | unsigned long base_pos, max_pos, kernel_size; |
Vasily Gorbik | 759d489 | 2019-08-19 23:19:00 +0200 | [diff] [blame] | 177 | unsigned long kasan_needs; |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 178 | int i; |
| 179 | |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 180 | memory_limit = min(memory_limit, ident_map_size); |
Vasily Gorbik | 5c46f27 | 2020-09-18 16:02:45 +0200 | [diff] [blame] | 181 | |
Vasily Gorbik | 0c4ec02 | 2020-10-05 13:53:36 +0200 | [diff] [blame] | 182 | /* |
| 183 | * Avoid putting kernel in the end of physical memory |
| 184 | * which kasan will use for shadow memory and early pgtable |
| 185 | * mapping allocations. |
| 186 | */ |
| 187 | memory_limit -= kasan_estimate_memory_needs(memory_limit); |
| 188 | |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 189 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) { |
| 190 | if (safe_addr < INITRD_START + INITRD_SIZE) |
| 191 | safe_addr = INITRD_START + INITRD_SIZE; |
| 192 | } |
| 193 | safe_addr = ALIGN(safe_addr, THREAD_SIZE); |
| 194 | |
| 195 | kernel_size = vmlinux.image_size + vmlinux.bss_size; |
Vasily Gorbik | 5c46f27 | 2020-09-18 16:02:45 +0200 | [diff] [blame] | 196 | if (safe_addr + kernel_size > memory_limit) |
| 197 | return 0; |
| 198 | |
| 199 | max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit); |
| 200 | if (!max_pos) { |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 201 | sclp_early_printk("KASLR disabled: not enough memory\n"); |
| 202 | return 0; |
| 203 | } |
| 204 | |
Vasily Gorbik | 5c46f27 | 2020-09-18 16:02:45 +0200 | [diff] [blame] | 205 | /* we need a value in the range [1, base_pos] inclusive */ |
| 206 | if (get_random(max_pos, &base_pos)) |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 207 | return 0; |
Vasily Gorbik | 5c46f27 | 2020-09-18 16:02:45 +0200 | [diff] [blame] | 208 | return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit); |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 209 | } |