blob: 1aab92930569af4542db384dabd46a56a6890662 [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Thomas Gleixneraa8c6242017-12-04 15:07:36 +01002/*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 *
Thomas Gleixneraa8c6242017-12-04 15:07:36 +01005 * This code is based in part on work published here:
6 *
7 * https://github.com/IAIK/KAISER
8 *
9 * The original work was written by and and signed off by for the Linux
10 * kernel by:
11 *
12 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
13 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
14 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
15 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
16 *
17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
19 * Andy Lutomirsky <luto@amacapital.net>
20 */
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/bug.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/uaccess.h>
Josh Poimboeufd68be4c2019-04-12 15:39:29 -050030#include <linux/cpu.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010031
32#include <asm/cpufeature.h>
33#include <asm/hypervisor.h>
Andy Lutomirski85900ea2017-12-12 07:56:42 -080034#include <asm/vsyscall.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010035#include <asm/cmdline.h>
36#include <asm/pti.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010037#include <asm/tlbflush.h>
38#include <asm/desc.h>
Nicolai Stange447ae312018-07-29 12:15:33 +020039#include <asm/sections.h>
Benjamin Thiel5bacdc02020-03-27 11:26:06 +010040#include <asm/set_memory.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010041
42#undef pr_fmt
43#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
44
Andy Lutomirski03f44242017-12-04 15:07:42 +010045/* Backporting helper */
46#ifndef __GFP_NOTRACK
47#define __GFP_NOTRACK 0
48#endif
49
Joerg Roedel16a3fe62018-08-07 12:24:31 +020050/*
51 * Define the page-table levels we clone for user-space on 32
52 * and 64 bit.
53 */
54#ifdef CONFIG_X86_64
55#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
56#else
57#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
58#endif
59
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010060static void __init pti_print_if_insecure(const char *reason)
61{
Thomas Gleixnerde791822018-01-05 15:27:34 +010062 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010063 pr_info("%s\n", reason);
64}
65
Borislav Petkov41f4c202017-12-12 14:39:52 +010066static void __init pti_print_if_secure(const char *reason)
67{
Thomas Gleixnerde791822018-01-05 15:27:34 +010068 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
Borislav Petkov41f4c202017-12-12 14:39:52 +010069 pr_info("%s\n", reason);
70}
71
Valdis Kletnieks4fe64a62019-03-12 03:47:53 -040072static enum pti_mode {
Dave Hansen8c06c772018-04-06 13:55:18 -070073 PTI_AUTO = 0,
74 PTI_FORCE_OFF,
75 PTI_FORCE_ON
76} pti_mode;
77
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010078void __init pti_check_boottime_disable(void)
79{
Borislav Petkov41f4c202017-12-12 14:39:52 +010080 char arg[5];
81 int ret;
82
Dave Hansen8c06c772018-04-06 13:55:18 -070083 /* Assume mode is auto unless overridden. */
84 pti_mode = PTI_AUTO;
85
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010086 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
Dave Hansen8c06c772018-04-06 13:55:18 -070087 pti_mode = PTI_FORCE_OFF;
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010088 pti_print_if_insecure("disabled on XEN PV.");
89 return;
90 }
91
Borislav Petkov41f4c202017-12-12 14:39:52 +010092 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
93 if (ret > 0) {
94 if (ret == 3 && !strncmp(arg, "off", 3)) {
Dave Hansen8c06c772018-04-06 13:55:18 -070095 pti_mode = PTI_FORCE_OFF;
Borislav Petkov41f4c202017-12-12 14:39:52 +010096 pti_print_if_insecure("disabled on command line.");
97 return;
98 }
99 if (ret == 2 && !strncmp(arg, "on", 2)) {
Dave Hansen8c06c772018-04-06 13:55:18 -0700100 pti_mode = PTI_FORCE_ON;
Borislav Petkov41f4c202017-12-12 14:39:52 +0100101 pti_print_if_secure("force enabled on command line.");
102 goto enable;
103 }
Dave Hansen8c06c772018-04-06 13:55:18 -0700104 if (ret == 4 && !strncmp(arg, "auto", 4)) {
105 pti_mode = PTI_AUTO;
Borislav Petkov41f4c202017-12-12 14:39:52 +0100106 goto autosel;
Dave Hansen8c06c772018-04-06 13:55:18 -0700107 }
Borislav Petkov41f4c202017-12-12 14:39:52 +0100108 }
109
Josh Poimboeufd68be4c2019-04-12 15:39:29 -0500110 if (cmdline_find_option_bool(boot_command_line, "nopti") ||
111 cpu_mitigations_off()) {
Dave Hansen8c06c772018-04-06 13:55:18 -0700112 pti_mode = PTI_FORCE_OFF;
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100113 pti_print_if_insecure("disabled on command line.");
114 return;
115 }
116
Borislav Petkov41f4c202017-12-12 14:39:52 +0100117autosel:
Thomas Gleixnerde791822018-01-05 15:27:34 +0100118 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100119 return;
Borislav Petkov41f4c202017-12-12 14:39:52 +0100120enable:
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100121 setup_force_cpu_cap(X86_FEATURE_PTI);
122}
123
Joerg Roedel23b77282018-07-18 11:40:52 +0200124pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
Dave Hansen61e9b362017-12-04 15:07:37 +0100125{
126 /*
127 * Changes to the high (kernel) portion of the kernelmode page
128 * tables are not automatically propagated to the usermode tables.
129 *
130 * Users should keep in mind that, unlike the kernelmode tables,
131 * there is no vmalloc_fault equivalent for the usermode tables.
132 * Top-level entries added to init_mm's usermode pgd after boot
133 * will not be automatically propagated to other mms.
134 */
135 if (!pgdp_maps_userspace(pgdp))
136 return pgd;
137
138 /*
139 * The user page tables get the full PGD, accessible from
140 * userspace:
141 */
142 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
143
144 /*
145 * If this is normal user memory, make it NX in the kernel
146 * pagetables so that, if we somehow screw up and return to
147 * usermode with the kernel CR3 loaded, we'll get a page fault
148 * instead of allowing user code to execute with the wrong CR3.
149 *
150 * As exceptions, we don't set NX if:
151 * - _PAGE_USER is not set. This could be an executable
152 * EFI runtime mapping or something similar, and the kernel
153 * may execute from it
154 * - we don't have NX support
155 * - we're clearing the PGD (i.e. the new pgd is not present).
156 */
157 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
158 (__supported_pte_mask & _PAGE_NX))
159 pgd.pgd |= _PAGE_NX;
160
161 /* return the copy of the PGD we want the kernel to use: */
162 return pgd;
163}
164
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100165/*
Andy Lutomirski03f44242017-12-04 15:07:42 +0100166 * Walk the user copy of the page tables (optionally) trying to allocate
167 * page table pages on the way down.
168 *
169 * Returns a pointer to a P4D on success, or NULL on failure.
170 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700171static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
Andy Lutomirski03f44242017-12-04 15:07:42 +0100172{
173 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
174 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
175
176 if (address < PAGE_OFFSET) {
177 WARN_ONCE(1, "attempt to walk user address\n");
178 return NULL;
179 }
180
181 if (pgd_none(*pgd)) {
182 unsigned long new_p4d_page = __get_free_page(gfp);
Jiang Biaob2b7d982018-07-20 08:06:31 +0800183 if (WARN_ON_ONCE(!new_p4d_page))
Andy Lutomirski03f44242017-12-04 15:07:42 +0100184 return NULL;
185
Jike Song8d56eff2018-01-09 00:03:41 +0800186 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
Andy Lutomirski03f44242017-12-04 15:07:42 +0100187 }
188 BUILD_BUG_ON(pgd_large(*pgd) != 0);
189
190 return p4d_offset(pgd, address);
191}
192
193/*
194 * Walk the user copy of the page tables (optionally) trying to allocate
195 * page table pages on the way down.
196 *
197 * Returns a pointer to a PMD on success, or NULL on failure.
198 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700199static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
Andy Lutomirski03f44242017-12-04 15:07:42 +0100200{
201 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
Jiang Biaob2b7d982018-07-20 08:06:31 +0800202 p4d_t *p4d;
Andy Lutomirski03f44242017-12-04 15:07:42 +0100203 pud_t *pud;
204
Jiang Biaob2b7d982018-07-20 08:06:31 +0800205 p4d = pti_user_pagetable_walk_p4d(address);
206 if (!p4d)
207 return NULL;
208
Andy Lutomirski03f44242017-12-04 15:07:42 +0100209 BUILD_BUG_ON(p4d_large(*p4d) != 0);
210 if (p4d_none(*p4d)) {
211 unsigned long new_pud_page = __get_free_page(gfp);
Jiang Biao8c934e02018-07-20 08:06:32 +0800212 if (WARN_ON_ONCE(!new_pud_page))
Andy Lutomirski03f44242017-12-04 15:07:42 +0100213 return NULL;
214
Jike Song8d56eff2018-01-09 00:03:41 +0800215 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
Andy Lutomirski03f44242017-12-04 15:07:42 +0100216 }
217
218 pud = pud_offset(p4d, address);
219 /* The user page tables do not use large mappings: */
220 if (pud_large(*pud)) {
221 WARN_ON(1);
222 return NULL;
223 }
224 if (pud_none(*pud)) {
225 unsigned long new_pmd_page = __get_free_page(gfp);
Jiang Biao8c934e02018-07-20 08:06:32 +0800226 if (WARN_ON_ONCE(!new_pmd_page))
Andy Lutomirski03f44242017-12-04 15:07:42 +0100227 return NULL;
228
Jike Song8d56eff2018-01-09 00:03:41 +0800229 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
Andy Lutomirski03f44242017-12-04 15:07:42 +0100230 }
231
232 return pmd_offset(pud, address);
233}
234
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800235/*
236 * Walk the shadow copy of the page tables (optionally) trying to allocate
237 * page table pages on the way down. Does not support large pages.
238 *
239 * Note: this is only used when mapping *new* kernel data into the
240 * user/shadow page tables. It is never used for userspace data.
241 *
242 * Returns a pointer to a PTE on success, or NULL on failure.
243 */
Randy Dunlapff924c52018-09-01 21:01:28 -0700244static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800245{
246 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
Jiang Biao8c934e02018-07-20 08:06:32 +0800247 pmd_t *pmd;
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800248 pte_t *pte;
249
Jiang Biao8c934e02018-07-20 08:06:32 +0800250 pmd = pti_user_pagetable_walk_pmd(address);
251 if (!pmd)
252 return NULL;
253
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800254 /* We can't do anything sensible if we hit a large mapping. */
255 if (pmd_large(*pmd)) {
256 WARN_ON(1);
257 return NULL;
258 }
259
260 if (pmd_none(*pmd)) {
261 unsigned long new_pte_page = __get_free_page(gfp);
262 if (!new_pte_page)
263 return NULL;
264
Jike Song8d56eff2018-01-09 00:03:41 +0800265 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800266 }
267
268 pte = pte_offset_kernel(pmd, address);
269 if (pte_flags(*pte) & _PAGE_USER) {
270 WARN_ONCE(1, "attempt to walk to user pte\n");
271 return NULL;
272 }
273 return pte;
274}
275
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200276#ifdef CONFIG_X86_VSYSCALL_EMULATION
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800277static void __init pti_setup_vsyscall(void)
278{
279 pte_t *pte, *target_pte;
280 unsigned int level;
281
282 pte = lookup_address(VSYSCALL_ADDR, &level);
283 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
284 return;
285
286 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
287 if (WARN_ON(!target_pte))
288 return;
289
290 *target_pte = *pte;
291 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
292}
293#else
294static void __init pti_setup_vsyscall(void) { }
295#endif
296
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200297enum pti_clone_level {
298 PTI_CLONE_PMD,
299 PTI_CLONE_PTE,
300};
301
Dave Hansen8c06c772018-04-06 13:55:18 -0700302static void
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200303pti_clone_pgtable(unsigned long start, unsigned long end,
304 enum pti_clone_level level)
Andy Lutomirski03f44242017-12-04 15:07:42 +0100305{
306 unsigned long addr;
307
308 /*
309 * Clone the populated PMDs which cover start to end. These PMD areas
310 * can have holes.
311 */
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200312 for (addr = start; addr < end;) {
313 pte_t *pte, *target_pte;
Andy Lutomirski03f44242017-12-04 15:07:42 +0100314 pmd_t *pmd, *target_pmd;
315 pgd_t *pgd;
316 p4d_t *p4d;
317 pud_t *pud;
318
Joerg Roedel935232c2018-07-18 11:41:01 +0200319 /* Overflow check */
320 if (addr < start)
321 break;
322
Andy Lutomirski03f44242017-12-04 15:07:42 +0100323 pgd = pgd_offset_k(addr);
324 if (WARN_ON(pgd_none(*pgd)))
325 return;
326 p4d = p4d_offset(pgd, addr);
327 if (WARN_ON(p4d_none(*p4d)))
328 return;
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200329
Andy Lutomirski03f44242017-12-04 15:07:42 +0100330 pud = pud_offset(p4d, addr);
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200331 if (pud_none(*pud)) {
Song Liu825d0b72019-08-28 23:54:55 +0200332 WARN_ON_ONCE(addr & ~PUD_MASK);
333 addr = round_up(addr + 1, PUD_SIZE);
Andy Lutomirski03f44242017-12-04 15:07:42 +0100334 continue;
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200335 }
336
Andy Lutomirski03f44242017-12-04 15:07:42 +0100337 pmd = pmd_offset(pud, addr);
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200338 if (pmd_none(*pmd)) {
Song Liu825d0b72019-08-28 23:54:55 +0200339 WARN_ON_ONCE(addr & ~PMD_MASK);
340 addr = round_up(addr + 1, PMD_SIZE);
Andy Lutomirski03f44242017-12-04 15:07:42 +0100341 continue;
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200342 }
Andy Lutomirski03f44242017-12-04 15:07:42 +0100343
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200344 if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
345 target_pmd = pti_user_pagetable_walk_pmd(addr);
346 if (WARN_ON(!target_pmd))
347 return;
Andy Lutomirski03f44242017-12-04 15:07:42 +0100348
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200349 /*
350 * Only clone present PMDs. This ensures only setting
351 * _PAGE_GLOBAL on present PMDs. This should only be
352 * called on well-known addresses anyway, so a non-
353 * present PMD would be a surprise.
354 */
355 if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
356 return;
Dave Hansen0f561fc2018-04-06 13:55:15 -0700357
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200358 /*
359 * Setting 'target_pmd' below creates a mapping in both
360 * the user and kernel page tables. It is effectively
361 * global, so set it as global in both copies. Note:
362 * the X86_FEATURE_PGE check is not _required_ because
363 * the CPU ignores _PAGE_GLOBAL when PGE is not
364 * supported. The check keeps consistentency with
365 * code that only set this bit when supported.
366 */
367 if (boot_cpu_has(X86_FEATURE_PGE))
368 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
Dave Hansen0f561fc2018-04-06 13:55:15 -0700369
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200370 /*
371 * Copy the PMD. That is, the kernelmode and usermode
372 * tables will share the last-level page tables of this
373 * address range
374 */
375 *target_pmd = *pmd;
376
377 addr += PMD_SIZE;
378
379 } else if (level == PTI_CLONE_PTE) {
380
381 /* Walk the page-table down to the pte level */
382 pte = pte_offset_kernel(pmd, addr);
383 if (pte_none(*pte)) {
384 addr += PAGE_SIZE;
385 continue;
386 }
387
388 /* Only clone present PTEs */
389 if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
390 return;
391
392 /* Allocate PTE in the user page-table */
393 target_pte = pti_user_pagetable_walk_pte(addr);
394 if (WARN_ON(!target_pte))
395 return;
396
397 /* Set GLOBAL bit in both PTEs */
398 if (boot_cpu_has(X86_FEATURE_PGE))
399 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
400
401 /* Clone the PTE */
402 *target_pte = *pte;
403
404 addr += PAGE_SIZE;
405
406 } else {
407 BUG();
408 }
Andy Lutomirski03f44242017-12-04 15:07:42 +0100409 }
410}
411
Joerg Roedelf94560c2018-07-18 11:41:03 +0200412#ifdef CONFIG_X86_64
Andy Lutomirski03f44242017-12-04 15:07:42 +0100413/*
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100414 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
415 * next-level entry on 5-level systems.
416 */
417static void __init pti_clone_p4d(unsigned long addr)
418{
419 p4d_t *kernel_p4d, *user_p4d;
420 pgd_t *kernel_pgd;
421
422 user_p4d = pti_user_pagetable_walk_p4d(addr);
Jiang Biaob2b7d982018-07-20 08:06:31 +0800423 if (!user_p4d)
424 return;
425
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100426 kernel_pgd = pgd_offset_k(addr);
427 kernel_p4d = p4d_offset(kernel_pgd, addr);
428 *user_p4d = *kernel_p4d;
429}
430
431/*
Andy Lutomirskibf904d22018-09-03 15:59:44 -0700432 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
433 * page table.
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100434 */
435static void __init pti_clone_user_shared(void)
436{
Andy Lutomirskibf904d22018-09-03 15:59:44 -0700437 unsigned int cpu;
438
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100439 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
Andy Lutomirskibf904d22018-09-03 15:59:44 -0700440
441 for_each_possible_cpu(cpu) {
442 /*
443 * The SYSCALL64 entry code needs to be able to find the
444 * thread stack and needs one word of scratch space in which
445 * to spill a register. All of this lives in the TSS, in
446 * the sp1 and sp2 slots.
447 *
448 * This is done for all possible CPUs during boot to ensure
Joerg Roedel7f0a0022020-06-01 21:52:40 -0700449 * that it's propagated to all mms.
Andy Lutomirskibf904d22018-09-03 15:59:44 -0700450 */
451
452 unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
453 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
454 pte_t *target_pte;
455
456 target_pte = pti_user_pagetable_walk_pte(va);
457 if (WARN_ON(!target_pte))
458 return;
459
460 *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
461 }
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100462}
463
Joerg Roedelf94560c2018-07-18 11:41:03 +0200464#else /* CONFIG_X86_64 */
465
466/*
467 * On 32 bit PAE systems with 1GB of Kernel address space there is only
468 * one pgd/p4d for the whole kernel. Cloning that would map the whole
469 * address space into the user page-tables, making PTI useless. So clone
470 * the page-table on the PMD level to prevent that.
471 */
472static void __init pti_clone_user_shared(void)
473{
474 unsigned long start, end;
475
476 start = CPU_ENTRY_AREA_BASE;
477 end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
478
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200479 pti_clone_pgtable(start, end, PTI_CLONE_PMD);
Joerg Roedelf94560c2018-07-18 11:41:03 +0200480}
481#endif /* CONFIG_X86_64 */
482
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100483/*
Seunghun Hanc5b679f2018-03-07 13:32:15 +0900484 * Clone the ESPFIX P4D into the user space visible page table
Andy Lutomirski4b6bbe92017-12-15 22:08:18 +0100485 */
486static void __init pti_setup_espfix64(void)
487{
488#ifdef CONFIG_X86_ESPFIX64
489 pti_clone_p4d(ESPFIX_BASE_ADDR);
490#endif
491}
492
493/*
Thomas Gleixnerf0178fc2020-06-10 08:37:01 +0200494 * Clone the populated PMDs of the entry text and force it RO.
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100495 */
Joerg Roedelba0364e2018-07-18 11:41:07 +0200496static void pti_clone_entry_text(void)
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100497{
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200498 pti_clone_pgtable((unsigned long) __entry_text_start,
Thomas Gleixnerf0178fc2020-06-10 08:37:01 +0200499 (unsigned long) __entry_text_end,
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200500 PTI_CLONE_PMD);
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100501}
502
503/*
Dave Hansen8c06c772018-04-06 13:55:18 -0700504 * Global pages and PCIDs are both ways to make kernel TLB entries
505 * live longer, reduce TLB misses and improve kernel performance.
506 * But, leaving all kernel text Global makes it potentially accessible
507 * to Meltdown-style attacks which make it trivial to find gadgets or
508 * defeat KASLR.
509 *
510 * Only use global pages when it is really worth it.
511 */
512static inline bool pti_kernel_image_global_ok(void)
513{
514 /*
515 * Systems with PCIDs get litlle benefit from global
516 * kernel text and are not worth the downsides.
517 */
518 if (cpu_feature_enabled(X86_FEATURE_PCID))
519 return false;
520
521 /*
522 * Only do global kernel image for pti=auto. Do the most
523 * secure thing (not global) if pti=on specified.
524 */
525 if (pti_mode != PTI_AUTO)
526 return false;
527
528 /*
529 * K8 may not tolerate the cleared _PAGE_RW on the userspace
530 * global kernel image pages. Do the safe thing (disable
531 * global kernel image). This is unlikely to ever be
532 * noticed because PTI is disabled by default on AMD CPUs.
533 */
534 if (boot_cpu_has(X86_FEATURE_K8))
535 return false;
536
Dave Hansenb7c21bc2018-04-20 15:20:26 -0700537 /*
538 * RANDSTRUCT derives its hardening benefits from the
539 * attacker's lack of knowledge about the layout of kernel
540 * data structures. Keep the kernel image non-global in
541 * cases where RANDSTRUCT is in use to help keep the layout a
542 * secret.
543 */
544 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
545 return false;
546
Dave Hansen8c06c772018-04-06 13:55:18 -0700547 return true;
548}
549
550/*
551 * For some configurations, map all of kernel text into the user page
552 * tables. This reduces TLB misses, especially on non-PCID systems.
553 */
Joerg Roedelb9766902018-07-18 11:41:06 +0200554static void pti_clone_kernel_text(void)
Dave Hansen8c06c772018-04-06 13:55:18 -0700555{
Dave Hansena44ca8f2018-04-20 15:20:23 -0700556 /*
557 * rodata is part of the kernel image and is normally
558 * readable on the filesystem or on the web. But, do not
559 * clone the areas past rodata, they might contain secrets.
560 */
Dave Hansen8c06c772018-04-06 13:55:18 -0700561 unsigned long start = PFN_ALIGN(_text);
Thomas Gleixner31570602018-08-06 20:56:34 +0200562 unsigned long end_clone = (unsigned long)__end_rodata_aligned;
Kees Cookb9076932019-10-29 14:13:37 -0700563 unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
Dave Hansen8c06c772018-04-06 13:55:18 -0700564
565 if (!pti_kernel_image_global_ok())
566 return;
567
Dave Hansena44ca8f2018-04-20 15:20:23 -0700568 pr_debug("mapping partial kernel image into user address space\n");
569
570 /*
571 * Note that this will undo _some_ of the work that
572 * pti_set_kernel_image_nonglobal() did to clear the
573 * global bit.
574 */
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200575 pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
Dave Hanseneac70732018-08-02 15:58:25 -0700576
577 /*
Joerg Roedel16a3fe62018-08-07 12:24:31 +0200578 * pti_clone_pgtable() will set the global bit in any PMDs
Dave Hanseneac70732018-08-02 15:58:25 -0700579 * that it clones, but we also need to get any PTEs in
580 * the last level for areas that are not huge-page-aligned.
581 */
582
583 /* Set the global bit for normal non-__init kernel text: */
584 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
Dave Hansen8c06c772018-04-06 13:55:18 -0700585}
586
Valdis Kletnieks4fe64a62019-03-12 03:47:53 -0400587static void pti_set_kernel_image_nonglobal(void)
Dave Hansen39114b72018-04-06 13:55:17 -0700588{
589 /*
590 * The identity map is created with PMDs, regardless of the
591 * actual length of the kernel. We need to clear
592 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
593 * of the image.
594 */
595 unsigned long start = PFN_ALIGN(_text);
596 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
597
Dave Hanseneac70732018-08-02 15:58:25 -0700598 /*
599 * This clears _PAGE_GLOBAL from the entire kernel image.
600 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
601 * areas that are mapped to userspace.
602 */
Dave Hansen39114b72018-04-06 13:55:17 -0700603 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
604}
605
606/*
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100607 * Initialize kernel page table isolation
608 */
609void __init pti_init(void)
610{
Borislav Petkov28e3ace2019-03-29 20:00:38 +0100611 if (!boot_cpu_has(X86_FEATURE_PTI))
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100612 return;
613
614 pr_info("enabled\n");
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100615
Joerg Roedel5e810592018-07-18 11:41:15 +0200616#ifdef CONFIG_X86_32
617 /*
618 * We check for X86_FEATURE_PCID here. But the init-code will
619 * clear the feature flag on 32 bit because the feature is not
620 * supported on 32 bit anyway. To print the warning we need to
621 * check with cpuid directly again.
622 */
Joerg Roedel88c6f8a2018-08-07 12:24:29 +0200623 if (cpuid_ecx(0x1) & BIT(17)) {
Joerg Roedel5e810592018-07-18 11:41:15 +0200624 /* Use printk to work around pr_fmt() */
625 printk(KERN_WARNING "\n");
626 printk(KERN_WARNING "************************************************************\n");
627 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
628 printk(KERN_WARNING "** **\n");
629 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
630 printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
631 printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
632 printk(KERN_WARNING "** **\n");
633 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
634 printk(KERN_WARNING "************************************************************\n");
635 }
636#endif
637
Andy Lutomirskif7cfbee2017-12-04 15:07:45 +0100638 pti_clone_user_shared();
Dave Hansen39114b72018-04-06 13:55:17 -0700639
640 /* Undo all global bits from the init pagetables in head_64.S: */
641 pti_set_kernel_image_nonglobal();
642 /* Replace some of the global bits just for shared entry text: */
Thomas Gleixner6dc72c32017-12-04 15:07:47 +0100643 pti_clone_entry_text();
Andy Lutomirski4b6bbe92017-12-15 22:08:18 +0100644 pti_setup_espfix64();
Andy Lutomirski85900ea2017-12-12 07:56:42 -0800645 pti_setup_vsyscall();
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100646}
Joerg Roedelb9766902018-07-18 11:41:06 +0200647
648/*
Joerg Roedelba0364e2018-07-18 11:41:07 +0200649 * Finalize the kernel mappings in the userspace page-table. Some of the
650 * mappings for the kernel image might have changed since pti_init()
651 * cloned them. This is because parts of the kernel image have been
652 * mapped RO and/or NX. These changes need to be cloned again to the
653 * userspace page-table.
Joerg Roedelb9766902018-07-18 11:41:06 +0200654 */
655void pti_finalize(void)
656{
Thomas Gleixner990784b2019-08-28 16:24:47 +0200657 if (!boot_cpu_has(X86_FEATURE_PTI))
658 return;
Joerg Roedelb9766902018-07-18 11:41:06 +0200659 /*
Joerg Roedelba0364e2018-07-18 11:41:07 +0200660 * We need to clone everything (again) that maps parts of the
661 * kernel image.
Joerg Roedelb9766902018-07-18 11:41:06 +0200662 */
Joerg Roedelba0364e2018-07-18 11:41:07 +0200663 pti_clone_entry_text();
Joerg Roedelb9766902018-07-18 11:41:06 +0200664 pti_clone_kernel_text();
Joerg Roedeld878efc2018-08-08 13:16:40 +0200665
666 debug_checkwx_user();
Joerg Roedelb9766902018-07-18 11:41:06 +0200667}