Thomas Gleixner | 5b497af | 2019-05-29 07:18:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright(c) 2017 Intel Corporation. All rights reserved. |
| 4 | * |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 5 | * This code is based in part on work published here: |
| 6 | * |
| 7 | * https://github.com/IAIK/KAISER |
| 8 | * |
| 9 | * The original work was written by and and signed off by for the Linux |
| 10 | * kernel by: |
| 11 | * |
| 12 | * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at> |
| 13 | * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at> |
| 14 | * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at> |
| 15 | * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at> |
| 16 | * |
| 17 | * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com> |
| 18 | * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and |
| 19 | * Andy Lutomirsky <luto@amacapital.net> |
| 20 | */ |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/string.h> |
| 24 | #include <linux/types.h> |
| 25 | #include <linux/bug.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/spinlock.h> |
| 28 | #include <linux/mm.h> |
| 29 | #include <linux/uaccess.h> |
Josh Poimboeuf | d68be4c | 2019-04-12 15:39:29 -0500 | [diff] [blame] | 30 | #include <linux/cpu.h> |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 31 | |
| 32 | #include <asm/cpufeature.h> |
| 33 | #include <asm/hypervisor.h> |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 34 | #include <asm/vsyscall.h> |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 35 | #include <asm/cmdline.h> |
| 36 | #include <asm/pti.h> |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 37 | #include <asm/pgalloc.h> |
| 38 | #include <asm/tlbflush.h> |
| 39 | #include <asm/desc.h> |
Nicolai Stange | 447ae31 | 2018-07-29 12:15:33 +0200 | [diff] [blame] | 40 | #include <asm/sections.h> |
Benjamin Thiel | 5bacdc0 | 2020-03-27 11:26:06 +0100 | [diff] [blame] | 41 | #include <asm/set_memory.h> |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 42 | |
| 43 | #undef pr_fmt |
| 44 | #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt |
| 45 | |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 46 | /* Backporting helper */ |
| 47 | #ifndef __GFP_NOTRACK |
| 48 | #define __GFP_NOTRACK 0 |
| 49 | #endif |
| 50 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 51 | /* |
| 52 | * Define the page-table levels we clone for user-space on 32 |
| 53 | * and 64 bit. |
| 54 | */ |
| 55 | #ifdef CONFIG_X86_64 |
| 56 | #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD |
| 57 | #else |
| 58 | #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE |
| 59 | #endif |
| 60 | |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 61 | static void __init pti_print_if_insecure(const char *reason) |
| 62 | { |
Thomas Gleixner | de79182 | 2018-01-05 15:27:34 +0100 | [diff] [blame] | 63 | if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 64 | pr_info("%s\n", reason); |
| 65 | } |
| 66 | |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 67 | static void __init pti_print_if_secure(const char *reason) |
| 68 | { |
Thomas Gleixner | de79182 | 2018-01-05 15:27:34 +0100 | [diff] [blame] | 69 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 70 | pr_info("%s\n", reason); |
| 71 | } |
| 72 | |
Valdis Kletnieks | 4fe64a6 | 2019-03-12 03:47:53 -0400 | [diff] [blame] | 73 | static enum pti_mode { |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 74 | PTI_AUTO = 0, |
| 75 | PTI_FORCE_OFF, |
| 76 | PTI_FORCE_ON |
| 77 | } pti_mode; |
| 78 | |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 79 | void __init pti_check_boottime_disable(void) |
| 80 | { |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 81 | char arg[5]; |
| 82 | int ret; |
| 83 | |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 84 | /* Assume mode is auto unless overridden. */ |
| 85 | pti_mode = PTI_AUTO; |
| 86 | |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 87 | if (hypervisor_is_type(X86_HYPER_XEN_PV)) { |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 88 | pti_mode = PTI_FORCE_OFF; |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 89 | pti_print_if_insecure("disabled on XEN PV."); |
| 90 | return; |
| 91 | } |
| 92 | |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 93 | ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); |
| 94 | if (ret > 0) { |
| 95 | if (ret == 3 && !strncmp(arg, "off", 3)) { |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 96 | pti_mode = PTI_FORCE_OFF; |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 97 | pti_print_if_insecure("disabled on command line."); |
| 98 | return; |
| 99 | } |
| 100 | if (ret == 2 && !strncmp(arg, "on", 2)) { |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 101 | pti_mode = PTI_FORCE_ON; |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 102 | pti_print_if_secure("force enabled on command line."); |
| 103 | goto enable; |
| 104 | } |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 105 | if (ret == 4 && !strncmp(arg, "auto", 4)) { |
| 106 | pti_mode = PTI_AUTO; |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 107 | goto autosel; |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 108 | } |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 109 | } |
| 110 | |
Josh Poimboeuf | d68be4c | 2019-04-12 15:39:29 -0500 | [diff] [blame] | 111 | if (cmdline_find_option_bool(boot_command_line, "nopti") || |
| 112 | cpu_mitigations_off()) { |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 113 | pti_mode = PTI_FORCE_OFF; |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 114 | pti_print_if_insecure("disabled on command line."); |
| 115 | return; |
| 116 | } |
| 117 | |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 118 | autosel: |
Thomas Gleixner | de79182 | 2018-01-05 15:27:34 +0100 | [diff] [blame] | 119 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 120 | return; |
Borislav Petkov | 41f4c20 | 2017-12-12 14:39:52 +0100 | [diff] [blame] | 121 | enable: |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 122 | setup_force_cpu_cap(X86_FEATURE_PTI); |
| 123 | } |
| 124 | |
Joerg Roedel | 23b7728 | 2018-07-18 11:40:52 +0200 | [diff] [blame] | 125 | pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) |
Dave Hansen | 61e9b36 | 2017-12-04 15:07:37 +0100 | [diff] [blame] | 126 | { |
| 127 | /* |
| 128 | * Changes to the high (kernel) portion of the kernelmode page |
| 129 | * tables are not automatically propagated to the usermode tables. |
| 130 | * |
| 131 | * Users should keep in mind that, unlike the kernelmode tables, |
| 132 | * there is no vmalloc_fault equivalent for the usermode tables. |
| 133 | * Top-level entries added to init_mm's usermode pgd after boot |
| 134 | * will not be automatically propagated to other mms. |
| 135 | */ |
| 136 | if (!pgdp_maps_userspace(pgdp)) |
| 137 | return pgd; |
| 138 | |
| 139 | /* |
| 140 | * The user page tables get the full PGD, accessible from |
| 141 | * userspace: |
| 142 | */ |
| 143 | kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; |
| 144 | |
| 145 | /* |
| 146 | * If this is normal user memory, make it NX in the kernel |
| 147 | * pagetables so that, if we somehow screw up and return to |
| 148 | * usermode with the kernel CR3 loaded, we'll get a page fault |
| 149 | * instead of allowing user code to execute with the wrong CR3. |
| 150 | * |
| 151 | * As exceptions, we don't set NX if: |
| 152 | * - _PAGE_USER is not set. This could be an executable |
| 153 | * EFI runtime mapping or something similar, and the kernel |
| 154 | * may execute from it |
| 155 | * - we don't have NX support |
| 156 | * - we're clearing the PGD (i.e. the new pgd is not present). |
| 157 | */ |
| 158 | if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && |
| 159 | (__supported_pte_mask & _PAGE_NX)) |
| 160 | pgd.pgd |= _PAGE_NX; |
| 161 | |
| 162 | /* return the copy of the PGD we want the kernel to use: */ |
| 163 | return pgd; |
| 164 | } |
| 165 | |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 166 | /* |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 167 | * Walk the user copy of the page tables (optionally) trying to allocate |
| 168 | * page table pages on the way down. |
| 169 | * |
| 170 | * Returns a pointer to a P4D on success, or NULL on failure. |
| 171 | */ |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 172 | static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 173 | { |
| 174 | pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); |
| 175 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
| 176 | |
| 177 | if (address < PAGE_OFFSET) { |
| 178 | WARN_ONCE(1, "attempt to walk user address\n"); |
| 179 | return NULL; |
| 180 | } |
| 181 | |
| 182 | if (pgd_none(*pgd)) { |
| 183 | unsigned long new_p4d_page = __get_free_page(gfp); |
Jiang Biao | b2b7d98 | 2018-07-20 08:06:31 +0800 | [diff] [blame] | 184 | if (WARN_ON_ONCE(!new_p4d_page)) |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 185 | return NULL; |
| 186 | |
Jike Song | 8d56eff | 2018-01-09 00:03:41 +0800 | [diff] [blame] | 187 | set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 188 | } |
| 189 | BUILD_BUG_ON(pgd_large(*pgd) != 0); |
| 190 | |
| 191 | return p4d_offset(pgd, address); |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * Walk the user copy of the page tables (optionally) trying to allocate |
| 196 | * page table pages on the way down. |
| 197 | * |
| 198 | * Returns a pointer to a PMD on success, or NULL on failure. |
| 199 | */ |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 200 | static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 201 | { |
| 202 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
Jiang Biao | b2b7d98 | 2018-07-20 08:06:31 +0800 | [diff] [blame] | 203 | p4d_t *p4d; |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 204 | pud_t *pud; |
| 205 | |
Jiang Biao | b2b7d98 | 2018-07-20 08:06:31 +0800 | [diff] [blame] | 206 | p4d = pti_user_pagetable_walk_p4d(address); |
| 207 | if (!p4d) |
| 208 | return NULL; |
| 209 | |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 210 | BUILD_BUG_ON(p4d_large(*p4d) != 0); |
| 211 | if (p4d_none(*p4d)) { |
| 212 | unsigned long new_pud_page = __get_free_page(gfp); |
Jiang Biao | 8c934e0 | 2018-07-20 08:06:32 +0800 | [diff] [blame] | 213 | if (WARN_ON_ONCE(!new_pud_page)) |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 214 | return NULL; |
| 215 | |
Jike Song | 8d56eff | 2018-01-09 00:03:41 +0800 | [diff] [blame] | 216 | set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | pud = pud_offset(p4d, address); |
| 220 | /* The user page tables do not use large mappings: */ |
| 221 | if (pud_large(*pud)) { |
| 222 | WARN_ON(1); |
| 223 | return NULL; |
| 224 | } |
| 225 | if (pud_none(*pud)) { |
| 226 | unsigned long new_pmd_page = __get_free_page(gfp); |
Jiang Biao | 8c934e0 | 2018-07-20 08:06:32 +0800 | [diff] [blame] | 227 | if (WARN_ON_ONCE(!new_pmd_page)) |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 228 | return NULL; |
| 229 | |
Jike Song | 8d56eff | 2018-01-09 00:03:41 +0800 | [diff] [blame] | 230 | set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | return pmd_offset(pud, address); |
| 234 | } |
| 235 | |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 236 | /* |
| 237 | * Walk the shadow copy of the page tables (optionally) trying to allocate |
| 238 | * page table pages on the way down. Does not support large pages. |
| 239 | * |
| 240 | * Note: this is only used when mapping *new* kernel data into the |
| 241 | * user/shadow page tables. It is never used for userspace data. |
| 242 | * |
| 243 | * Returns a pointer to a PTE on success, or NULL on failure. |
| 244 | */ |
Randy Dunlap | ff924c5 | 2018-09-01 21:01:28 -0700 | [diff] [blame] | 245 | static pte_t *pti_user_pagetable_walk_pte(unsigned long address) |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 246 | { |
| 247 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
Jiang Biao | 8c934e0 | 2018-07-20 08:06:32 +0800 | [diff] [blame] | 248 | pmd_t *pmd; |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 249 | pte_t *pte; |
| 250 | |
Jiang Biao | 8c934e0 | 2018-07-20 08:06:32 +0800 | [diff] [blame] | 251 | pmd = pti_user_pagetable_walk_pmd(address); |
| 252 | if (!pmd) |
| 253 | return NULL; |
| 254 | |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 255 | /* We can't do anything sensible if we hit a large mapping. */ |
| 256 | if (pmd_large(*pmd)) { |
| 257 | WARN_ON(1); |
| 258 | return NULL; |
| 259 | } |
| 260 | |
| 261 | if (pmd_none(*pmd)) { |
| 262 | unsigned long new_pte_page = __get_free_page(gfp); |
| 263 | if (!new_pte_page) |
| 264 | return NULL; |
| 265 | |
Jike Song | 8d56eff | 2018-01-09 00:03:41 +0800 | [diff] [blame] | 266 | set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | pte = pte_offset_kernel(pmd, address); |
| 270 | if (pte_flags(*pte) & _PAGE_USER) { |
| 271 | WARN_ONCE(1, "attempt to walk to user pte\n"); |
| 272 | return NULL; |
| 273 | } |
| 274 | return pte; |
| 275 | } |
| 276 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 277 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 278 | static void __init pti_setup_vsyscall(void) |
| 279 | { |
| 280 | pte_t *pte, *target_pte; |
| 281 | unsigned int level; |
| 282 | |
| 283 | pte = lookup_address(VSYSCALL_ADDR, &level); |
| 284 | if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) |
| 285 | return; |
| 286 | |
| 287 | target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); |
| 288 | if (WARN_ON(!target_pte)) |
| 289 | return; |
| 290 | |
| 291 | *target_pte = *pte; |
| 292 | set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); |
| 293 | } |
| 294 | #else |
| 295 | static void __init pti_setup_vsyscall(void) { } |
| 296 | #endif |
| 297 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 298 | enum pti_clone_level { |
| 299 | PTI_CLONE_PMD, |
| 300 | PTI_CLONE_PTE, |
| 301 | }; |
| 302 | |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 303 | static void |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 304 | pti_clone_pgtable(unsigned long start, unsigned long end, |
| 305 | enum pti_clone_level level) |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 306 | { |
| 307 | unsigned long addr; |
| 308 | |
| 309 | /* |
| 310 | * Clone the populated PMDs which cover start to end. These PMD areas |
| 311 | * can have holes. |
| 312 | */ |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 313 | for (addr = start; addr < end;) { |
| 314 | pte_t *pte, *target_pte; |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 315 | pmd_t *pmd, *target_pmd; |
| 316 | pgd_t *pgd; |
| 317 | p4d_t *p4d; |
| 318 | pud_t *pud; |
| 319 | |
Joerg Roedel | 935232c | 2018-07-18 11:41:01 +0200 | [diff] [blame] | 320 | /* Overflow check */ |
| 321 | if (addr < start) |
| 322 | break; |
| 323 | |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 324 | pgd = pgd_offset_k(addr); |
| 325 | if (WARN_ON(pgd_none(*pgd))) |
| 326 | return; |
| 327 | p4d = p4d_offset(pgd, addr); |
| 328 | if (WARN_ON(p4d_none(*p4d))) |
| 329 | return; |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 330 | |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 331 | pud = pud_offset(p4d, addr); |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 332 | if (pud_none(*pud)) { |
Song Liu | 825d0b7 | 2019-08-28 23:54:55 +0200 | [diff] [blame] | 333 | WARN_ON_ONCE(addr & ~PUD_MASK); |
| 334 | addr = round_up(addr + 1, PUD_SIZE); |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 335 | continue; |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 336 | } |
| 337 | |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 338 | pmd = pmd_offset(pud, addr); |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 339 | if (pmd_none(*pmd)) { |
Song Liu | 825d0b7 | 2019-08-28 23:54:55 +0200 | [diff] [blame] | 340 | WARN_ON_ONCE(addr & ~PMD_MASK); |
| 341 | addr = round_up(addr + 1, PMD_SIZE); |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 342 | continue; |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 343 | } |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 344 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 345 | if (pmd_large(*pmd) || level == PTI_CLONE_PMD) { |
| 346 | target_pmd = pti_user_pagetable_walk_pmd(addr); |
| 347 | if (WARN_ON(!target_pmd)) |
| 348 | return; |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 349 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 350 | /* |
| 351 | * Only clone present PMDs. This ensures only setting |
| 352 | * _PAGE_GLOBAL on present PMDs. This should only be |
| 353 | * called on well-known addresses anyway, so a non- |
| 354 | * present PMD would be a surprise. |
| 355 | */ |
| 356 | if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT))) |
| 357 | return; |
Dave Hansen | 0f561fc | 2018-04-06 13:55:15 -0700 | [diff] [blame] | 358 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 359 | /* |
| 360 | * Setting 'target_pmd' below creates a mapping in both |
| 361 | * the user and kernel page tables. It is effectively |
| 362 | * global, so set it as global in both copies. Note: |
| 363 | * the X86_FEATURE_PGE check is not _required_ because |
| 364 | * the CPU ignores _PAGE_GLOBAL when PGE is not |
| 365 | * supported. The check keeps consistentency with |
| 366 | * code that only set this bit when supported. |
| 367 | */ |
| 368 | if (boot_cpu_has(X86_FEATURE_PGE)) |
| 369 | *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL); |
Dave Hansen | 0f561fc | 2018-04-06 13:55:15 -0700 | [diff] [blame] | 370 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 371 | /* |
| 372 | * Copy the PMD. That is, the kernelmode and usermode |
| 373 | * tables will share the last-level page tables of this |
| 374 | * address range |
| 375 | */ |
| 376 | *target_pmd = *pmd; |
| 377 | |
| 378 | addr += PMD_SIZE; |
| 379 | |
| 380 | } else if (level == PTI_CLONE_PTE) { |
| 381 | |
| 382 | /* Walk the page-table down to the pte level */ |
| 383 | pte = pte_offset_kernel(pmd, addr); |
| 384 | if (pte_none(*pte)) { |
| 385 | addr += PAGE_SIZE; |
| 386 | continue; |
| 387 | } |
| 388 | |
| 389 | /* Only clone present PTEs */ |
| 390 | if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT))) |
| 391 | return; |
| 392 | |
| 393 | /* Allocate PTE in the user page-table */ |
| 394 | target_pte = pti_user_pagetable_walk_pte(addr); |
| 395 | if (WARN_ON(!target_pte)) |
| 396 | return; |
| 397 | |
| 398 | /* Set GLOBAL bit in both PTEs */ |
| 399 | if (boot_cpu_has(X86_FEATURE_PGE)) |
| 400 | *pte = pte_set_flags(*pte, _PAGE_GLOBAL); |
| 401 | |
| 402 | /* Clone the PTE */ |
| 403 | *target_pte = *pte; |
| 404 | |
| 405 | addr += PAGE_SIZE; |
| 406 | |
| 407 | } else { |
| 408 | BUG(); |
| 409 | } |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 410 | } |
| 411 | } |
| 412 | |
Joerg Roedel | f94560c | 2018-07-18 11:41:03 +0200 | [diff] [blame] | 413 | #ifdef CONFIG_X86_64 |
Andy Lutomirski | 03f4424 | 2017-12-04 15:07:42 +0100 | [diff] [blame] | 414 | /* |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 415 | * Clone a single p4d (i.e. a top-level entry on 4-level systems and a |
| 416 | * next-level entry on 5-level systems. |
| 417 | */ |
| 418 | static void __init pti_clone_p4d(unsigned long addr) |
| 419 | { |
| 420 | p4d_t *kernel_p4d, *user_p4d; |
| 421 | pgd_t *kernel_pgd; |
| 422 | |
| 423 | user_p4d = pti_user_pagetable_walk_p4d(addr); |
Jiang Biao | b2b7d98 | 2018-07-20 08:06:31 +0800 | [diff] [blame] | 424 | if (!user_p4d) |
| 425 | return; |
| 426 | |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 427 | kernel_pgd = pgd_offset_k(addr); |
| 428 | kernel_p4d = p4d_offset(kernel_pgd, addr); |
| 429 | *user_p4d = *kernel_p4d; |
| 430 | } |
| 431 | |
| 432 | /* |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 433 | * Clone the CPU_ENTRY_AREA and associated data into the user space visible |
| 434 | * page table. |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 435 | */ |
| 436 | static void __init pti_clone_user_shared(void) |
| 437 | { |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 438 | unsigned int cpu; |
| 439 | |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 440 | pti_clone_p4d(CPU_ENTRY_AREA_BASE); |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 441 | |
| 442 | for_each_possible_cpu(cpu) { |
| 443 | /* |
| 444 | * The SYSCALL64 entry code needs to be able to find the |
| 445 | * thread stack and needs one word of scratch space in which |
| 446 | * to spill a register. All of this lives in the TSS, in |
| 447 | * the sp1 and sp2 slots. |
| 448 | * |
| 449 | * This is done for all possible CPUs during boot to ensure |
Joerg Roedel | 7f0a002 | 2020-06-01 21:52:40 -0700 | [diff] [blame] | 450 | * that it's propagated to all mms. |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 451 | */ |
| 452 | |
| 453 | unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu); |
| 454 | phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); |
| 455 | pte_t *target_pte; |
| 456 | |
| 457 | target_pte = pti_user_pagetable_walk_pte(va); |
| 458 | if (WARN_ON(!target_pte)) |
| 459 | return; |
| 460 | |
| 461 | *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL); |
| 462 | } |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 463 | } |
| 464 | |
Joerg Roedel | f94560c | 2018-07-18 11:41:03 +0200 | [diff] [blame] | 465 | #else /* CONFIG_X86_64 */ |
| 466 | |
| 467 | /* |
| 468 | * On 32 bit PAE systems with 1GB of Kernel address space there is only |
| 469 | * one pgd/p4d for the whole kernel. Cloning that would map the whole |
| 470 | * address space into the user page-tables, making PTI useless. So clone |
| 471 | * the page-table on the PMD level to prevent that. |
| 472 | */ |
| 473 | static void __init pti_clone_user_shared(void) |
| 474 | { |
| 475 | unsigned long start, end; |
| 476 | |
| 477 | start = CPU_ENTRY_AREA_BASE; |
| 478 | end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); |
| 479 | |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 480 | pti_clone_pgtable(start, end, PTI_CLONE_PMD); |
Joerg Roedel | f94560c | 2018-07-18 11:41:03 +0200 | [diff] [blame] | 481 | } |
| 482 | #endif /* CONFIG_X86_64 */ |
| 483 | |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 484 | /* |
Seunghun Han | c5b679f | 2018-03-07 13:32:15 +0900 | [diff] [blame] | 485 | * Clone the ESPFIX P4D into the user space visible page table |
Andy Lutomirski | 4b6bbe9 | 2017-12-15 22:08:18 +0100 | [diff] [blame] | 486 | */ |
| 487 | static void __init pti_setup_espfix64(void) |
| 488 | { |
| 489 | #ifdef CONFIG_X86_ESPFIX64 |
| 490 | pti_clone_p4d(ESPFIX_BASE_ADDR); |
| 491 | #endif |
| 492 | } |
| 493 | |
| 494 | /* |
Thomas Gleixner | f0178fc | 2020-06-10 08:37:01 +0200 | [diff] [blame] | 495 | * Clone the populated PMDs of the entry text and force it RO. |
Thomas Gleixner | 6dc72c3 | 2017-12-04 15:07:47 +0100 | [diff] [blame] | 496 | */ |
Joerg Roedel | ba0364e | 2018-07-18 11:41:07 +0200 | [diff] [blame] | 497 | static void pti_clone_entry_text(void) |
Thomas Gleixner | 6dc72c3 | 2017-12-04 15:07:47 +0100 | [diff] [blame] | 498 | { |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 499 | pti_clone_pgtable((unsigned long) __entry_text_start, |
Thomas Gleixner | f0178fc | 2020-06-10 08:37:01 +0200 | [diff] [blame] | 500 | (unsigned long) __entry_text_end, |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 501 | PTI_CLONE_PMD); |
Thomas Gleixner | 6dc72c3 | 2017-12-04 15:07:47 +0100 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | /* |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 505 | * Global pages and PCIDs are both ways to make kernel TLB entries |
| 506 | * live longer, reduce TLB misses and improve kernel performance. |
| 507 | * But, leaving all kernel text Global makes it potentially accessible |
| 508 | * to Meltdown-style attacks which make it trivial to find gadgets or |
| 509 | * defeat KASLR. |
| 510 | * |
| 511 | * Only use global pages when it is really worth it. |
| 512 | */ |
| 513 | static inline bool pti_kernel_image_global_ok(void) |
| 514 | { |
| 515 | /* |
| 516 | * Systems with PCIDs get litlle benefit from global |
| 517 | * kernel text and are not worth the downsides. |
| 518 | */ |
| 519 | if (cpu_feature_enabled(X86_FEATURE_PCID)) |
| 520 | return false; |
| 521 | |
| 522 | /* |
| 523 | * Only do global kernel image for pti=auto. Do the most |
| 524 | * secure thing (not global) if pti=on specified. |
| 525 | */ |
| 526 | if (pti_mode != PTI_AUTO) |
| 527 | return false; |
| 528 | |
| 529 | /* |
| 530 | * K8 may not tolerate the cleared _PAGE_RW on the userspace |
| 531 | * global kernel image pages. Do the safe thing (disable |
| 532 | * global kernel image). This is unlikely to ever be |
| 533 | * noticed because PTI is disabled by default on AMD CPUs. |
| 534 | */ |
| 535 | if (boot_cpu_has(X86_FEATURE_K8)) |
| 536 | return false; |
| 537 | |
Dave Hansen | b7c21bc | 2018-04-20 15:20:26 -0700 | [diff] [blame] | 538 | /* |
| 539 | * RANDSTRUCT derives its hardening benefits from the |
| 540 | * attacker's lack of knowledge about the layout of kernel |
| 541 | * data structures. Keep the kernel image non-global in |
| 542 | * cases where RANDSTRUCT is in use to help keep the layout a |
| 543 | * secret. |
| 544 | */ |
| 545 | if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT)) |
| 546 | return false; |
| 547 | |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 548 | return true; |
| 549 | } |
| 550 | |
| 551 | /* |
| 552 | * For some configurations, map all of kernel text into the user page |
| 553 | * tables. This reduces TLB misses, especially on non-PCID systems. |
| 554 | */ |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 555 | static void pti_clone_kernel_text(void) |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 556 | { |
Dave Hansen | a44ca8f | 2018-04-20 15:20:23 -0700 | [diff] [blame] | 557 | /* |
| 558 | * rodata is part of the kernel image and is normally |
| 559 | * readable on the filesystem or on the web. But, do not |
| 560 | * clone the areas past rodata, they might contain secrets. |
| 561 | */ |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 562 | unsigned long start = PFN_ALIGN(_text); |
Thomas Gleixner | 3157060 | 2018-08-06 20:56:34 +0200 | [diff] [blame] | 563 | unsigned long end_clone = (unsigned long)__end_rodata_aligned; |
Kees Cook | b907693 | 2019-10-29 14:13:37 -0700 | [diff] [blame] | 564 | unsigned long end_global = PFN_ALIGN((unsigned long)_etext); |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 565 | |
| 566 | if (!pti_kernel_image_global_ok()) |
| 567 | return; |
| 568 | |
Dave Hansen | a44ca8f | 2018-04-20 15:20:23 -0700 | [diff] [blame] | 569 | pr_debug("mapping partial kernel image into user address space\n"); |
| 570 | |
| 571 | /* |
| 572 | * Note that this will undo _some_ of the work that |
| 573 | * pti_set_kernel_image_nonglobal() did to clear the |
| 574 | * global bit. |
| 575 | */ |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 576 | pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); |
Dave Hansen | eac7073 | 2018-08-02 15:58:25 -0700 | [diff] [blame] | 577 | |
| 578 | /* |
Joerg Roedel | 16a3fe6 | 2018-08-07 12:24:31 +0200 | [diff] [blame] | 579 | * pti_clone_pgtable() will set the global bit in any PMDs |
Dave Hansen | eac7073 | 2018-08-02 15:58:25 -0700 | [diff] [blame] | 580 | * that it clones, but we also need to get any PTEs in |
| 581 | * the last level for areas that are not huge-page-aligned. |
| 582 | */ |
| 583 | |
| 584 | /* Set the global bit for normal non-__init kernel text: */ |
| 585 | set_memory_global(start, (end_global - start) >> PAGE_SHIFT); |
Dave Hansen | 8c06c77 | 2018-04-06 13:55:18 -0700 | [diff] [blame] | 586 | } |
| 587 | |
Valdis Kletnieks | 4fe64a6 | 2019-03-12 03:47:53 -0400 | [diff] [blame] | 588 | static void pti_set_kernel_image_nonglobal(void) |
Dave Hansen | 39114b7 | 2018-04-06 13:55:17 -0700 | [diff] [blame] | 589 | { |
| 590 | /* |
| 591 | * The identity map is created with PMDs, regardless of the |
| 592 | * actual length of the kernel. We need to clear |
| 593 | * _PAGE_GLOBAL up to a PMD boundary, not just to the end |
| 594 | * of the image. |
| 595 | */ |
| 596 | unsigned long start = PFN_ALIGN(_text); |
| 597 | unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); |
| 598 | |
Dave Hansen | eac7073 | 2018-08-02 15:58:25 -0700 | [diff] [blame] | 599 | /* |
| 600 | * This clears _PAGE_GLOBAL from the entire kernel image. |
| 601 | * pti_clone_kernel_text() map put _PAGE_GLOBAL back for |
| 602 | * areas that are mapped to userspace. |
| 603 | */ |
Dave Hansen | 39114b7 | 2018-04-06 13:55:17 -0700 | [diff] [blame] | 604 | set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); |
| 605 | } |
| 606 | |
| 607 | /* |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 608 | * Initialize kernel page table isolation |
| 609 | */ |
| 610 | void __init pti_init(void) |
| 611 | { |
Borislav Petkov | 28e3ace | 2019-03-29 20:00:38 +0100 | [diff] [blame] | 612 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 613 | return; |
| 614 | |
| 615 | pr_info("enabled\n"); |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 616 | |
Joerg Roedel | 5e81059 | 2018-07-18 11:41:15 +0200 | [diff] [blame] | 617 | #ifdef CONFIG_X86_32 |
| 618 | /* |
| 619 | * We check for X86_FEATURE_PCID here. But the init-code will |
| 620 | * clear the feature flag on 32 bit because the feature is not |
| 621 | * supported on 32 bit anyway. To print the warning we need to |
| 622 | * check with cpuid directly again. |
| 623 | */ |
Joerg Roedel | 88c6f8a | 2018-08-07 12:24:29 +0200 | [diff] [blame] | 624 | if (cpuid_ecx(0x1) & BIT(17)) { |
Joerg Roedel | 5e81059 | 2018-07-18 11:41:15 +0200 | [diff] [blame] | 625 | /* Use printk to work around pr_fmt() */ |
| 626 | printk(KERN_WARNING "\n"); |
| 627 | printk(KERN_WARNING "************************************************************\n"); |
| 628 | printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n"); |
| 629 | printk(KERN_WARNING "** **\n"); |
| 630 | printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n"); |
| 631 | printk(KERN_WARNING "** Your performance will increase dramatically if you **\n"); |
| 632 | printk(KERN_WARNING "** switch to a 64-bit kernel! **\n"); |
| 633 | printk(KERN_WARNING "** **\n"); |
| 634 | printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n"); |
| 635 | printk(KERN_WARNING "************************************************************\n"); |
| 636 | } |
| 637 | #endif |
| 638 | |
Andy Lutomirski | f7cfbee | 2017-12-04 15:07:45 +0100 | [diff] [blame] | 639 | pti_clone_user_shared(); |
Dave Hansen | 39114b7 | 2018-04-06 13:55:17 -0700 | [diff] [blame] | 640 | |
| 641 | /* Undo all global bits from the init pagetables in head_64.S: */ |
| 642 | pti_set_kernel_image_nonglobal(); |
| 643 | /* Replace some of the global bits just for shared entry text: */ |
Thomas Gleixner | 6dc72c3 | 2017-12-04 15:07:47 +0100 | [diff] [blame] | 644 | pti_clone_entry_text(); |
Andy Lutomirski | 4b6bbe9 | 2017-12-15 22:08:18 +0100 | [diff] [blame] | 645 | pti_setup_espfix64(); |
Andy Lutomirski | 85900ea | 2017-12-12 07:56:42 -0800 | [diff] [blame] | 646 | pti_setup_vsyscall(); |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 647 | } |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 648 | |
| 649 | /* |
Joerg Roedel | ba0364e | 2018-07-18 11:41:07 +0200 | [diff] [blame] | 650 | * Finalize the kernel mappings in the userspace page-table. Some of the |
| 651 | * mappings for the kernel image might have changed since pti_init() |
| 652 | * cloned them. This is because parts of the kernel image have been |
| 653 | * mapped RO and/or NX. These changes need to be cloned again to the |
| 654 | * userspace page-table. |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 655 | */ |
| 656 | void pti_finalize(void) |
| 657 | { |
Thomas Gleixner | 990784b | 2019-08-28 16:24:47 +0200 | [diff] [blame] | 658 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
| 659 | return; |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 660 | /* |
Joerg Roedel | ba0364e | 2018-07-18 11:41:07 +0200 | [diff] [blame] | 661 | * We need to clone everything (again) that maps parts of the |
| 662 | * kernel image. |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 663 | */ |
Joerg Roedel | ba0364e | 2018-07-18 11:41:07 +0200 | [diff] [blame] | 664 | pti_clone_entry_text(); |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 665 | pti_clone_kernel_text(); |
Joerg Roedel | d878efc | 2018-08-08 13:16:40 +0200 | [diff] [blame] | 666 | |
| 667 | debug_checkwx_user(); |
Joerg Roedel | b976690 | 2018-07-18 11:41:06 +0200 | [diff] [blame] | 668 | } |