blob: 62a7e9f65decfcc5c447c01a9b5f986079f1de6f [file] [log] [blame]
Arjan van de Ven926e5392008-04-17 17:40:45 +02001/*
2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
4 *
5 * (C) Copyright 2008 Intel Corporation
6 *
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020015#include <linux/debugfs.h>
Andrey Ryabinin04b67022017-07-24 18:25:58 +030016#include <linux/kasan.h>
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020017#include <linux/mm.h>
Paul Gortmaker84e629b2016-07-13 20:18:54 -040018#include <linux/init.h>
Andrey Ryabinin146fbb762017-02-10 12:54:05 +030019#include <linux/sched.h>
Arjan van de Ven926e5392008-04-17 17:40:45 +020020#include <linux/seq_file.h>
Arjan van de Ven926e5392008-04-17 17:40:45 +020021
22#include <asm/pgtable.h>
23
24/*
25 * The dumper groups pagetable entries of the same type into one, and for
26 * that it needs to keep some state when walking, and flush this state
27 * when a "break" in the continuity is found.
28 */
29struct pg_state {
30 int level;
31 pgprot_t current_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -070032 pgprotval_t effective_prot;
Arjan van de Ven926e5392008-04-17 17:40:45 +020033 unsigned long start_address;
34 unsigned long current_address;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020035 const struct addr_marker *marker;
H. Peter Anvin3891a042014-04-29 16:46:09 -070036 unsigned long lines;
Borislav Petkovef6bea62014-01-18 12:48:14 +010037 bool to_dmesg;
Stephen Smalleye1a58322015-10-05 12:55:20 -040038 bool check_wx;
39 unsigned long wx_pages;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020040};
41
42struct addr_marker {
43 unsigned long start_address;
44 const char *name;
H. Peter Anvin3891a042014-04-29 16:46:09 -070045 unsigned long max_lines;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020046};
47
Thomas Gleixner146122e2017-12-20 18:07:42 +010048/* Address space markers hints */
49
50#ifdef CONFIG_X86_64
51
Andres Salomon92851e22010-07-20 15:19:46 -070052enum address_markers_idx {
53 USER_SPACE_NR = 0,
Andres Salomon92851e22010-07-20 15:19:46 -070054 KERNEL_SPACE_NR,
55 LOW_KERNEL_NR,
Andy Lutomirskif55f0502017-12-12 07:56:45 -080056#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
57 LDT_NR,
58#endif
Andres Salomon92851e22010-07-20 15:19:46 -070059 VMALLOC_START_NR,
60 VMEMMAP_START_NR,
Andrey Ryabinin025205f2017-02-14 13:08:39 +030061#ifdef CONFIG_KASAN
62 KASAN_SHADOW_START_NR,
63 KASAN_SHADOW_END_NR,
64#endif
Thomas Gleixnerf2078902018-01-04 13:01:40 +010065 CPU_ENTRY_AREA_NR,
Andy Lutomirskif55f0502017-12-12 07:56:45 -080066#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
67 LDT_NR,
68#endif
Thomas Gleixner146122e2017-12-20 18:07:42 +010069#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -070070 ESPFIX_START_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +010071#endif
72#ifdef CONFIG_EFI
73 EFI_END_NR,
74#endif
Andres Salomon92851e22010-07-20 15:19:46 -070075 HIGH_KERNEL_NR,
76 MODULES_VADDR_NR,
77 MODULES_END_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +010078 FIXADDR_START_NR,
79 END_OF_SPACE_NR,
80};
81
82static struct addr_marker address_markers[] = {
83 [USER_SPACE_NR] = { 0, "User Space" },
84 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
85 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
86 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
87 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
88#ifdef CONFIG_KASAN
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +030089 /*
90 * These fields get initialized with the (dynamic)
91 * KASAN_SHADOW_{START,END} values in pt_dump_init().
92 */
93 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
94 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
Thomas Gleixner146122e2017-12-20 18:07:42 +010095#endif
Andy Lutomirskif55f0502017-12-12 07:56:45 -080096#ifdef CONFIG_MODIFY_LDT_SYSCALL
Kirill A. Shutemov5c7919b2018-02-14 14:16:52 +030097 [LDT_NR] = { 0UL, "LDT remap" },
Andy Lutomirskif55f0502017-12-12 07:56:45 -080098#endif
Thomas Gleixner92a0f812017-12-20 18:51:31 +010099 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
Thomas Gleixner146122e2017-12-20 18:07:42 +0100100#ifdef CONFIG_X86_ESPFIX64
101 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
102#endif
103#ifdef CONFIG_EFI
104 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
105#endif
106 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
107 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
108 [MODULES_END_NR] = { MODULES_END, "End Modules" },
109 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
110 [END_OF_SPACE_NR] = { -1, NULL }
111};
112
113#else /* CONFIG_X86_64 */
114
115enum address_markers_idx {
116 USER_SPACE_NR = 0,
Andres Salomon92851e22010-07-20 15:19:46 -0700117 KERNEL_SPACE_NR,
118 VMALLOC_START_NR,
119 VMALLOC_END_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +0100120#ifdef CONFIG_HIGHMEM
Andres Salomon92851e22010-07-20 15:19:46 -0700121 PKMAP_BASE_NR,
Andres Salomon92851e22010-07-20 15:19:46 -0700122#endif
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100123 CPU_ENTRY_AREA_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +0100124 FIXADDR_START_NR,
125 END_OF_SPACE_NR,
Andres Salomon92851e22010-07-20 15:19:46 -0700126};
127
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200128static struct addr_marker address_markers[] = {
Thomas Gleixner146122e2017-12-20 18:07:42 +0100129 [USER_SPACE_NR] = { 0, "User Space" },
130 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
131 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
132 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
133#ifdef CONFIG_HIGHMEM
134 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
Andrey Ryabinin025205f2017-02-14 13:08:39 +0300135#endif
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100136 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
Thomas Gleixner146122e2017-12-20 18:07:42 +0100137 [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
138 [END_OF_SPACE_NR] = { -1, NULL }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200139};
140
Thomas Gleixner146122e2017-12-20 18:07:42 +0100141#endif /* !CONFIG_X86_64 */
142
Arjan van de Ven926e5392008-04-17 17:40:45 +0200143/* Multipliers for offsets within the PTEs */
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200144#define PTE_LEVEL_MULT (PAGE_SIZE)
145#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
146#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300147#define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
Juergen Gross84bbabc2017-04-12 16:36:34 +0200148#define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200149
Borislav Petkovef6bea62014-01-18 12:48:14 +0100150#define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
151({ \
152 if (to_dmesg) \
153 printk(KERN_INFO fmt, ##args); \
154 else \
155 if (m) \
156 seq_printf(m, fmt, ##args); \
157})
158
159#define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
160({ \
161 if (to_dmesg) \
162 printk(KERN_CONT fmt, ##args); \
163 else \
164 if (m) \
165 seq_printf(m, fmt, ##args); \
166})
167
Arjan van de Ven926e5392008-04-17 17:40:45 +0200168/*
169 * Print a readable form of a pgprot_t to the seq_file
170 */
Borislav Petkovef6bea62014-01-18 12:48:14 +0100171static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200172{
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200173 pgprotval_t pr = pgprot_val(prot);
174 static const char * const level_name[] =
Kirill A. Shutemov45dcd202017-07-17 01:59:48 +0300175 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
Arjan van de Ven926e5392008-04-17 17:40:45 +0200176
Thomas Gleixnerc0534492017-12-16 01:14:39 +0100177 if (!(pr & _PAGE_PRESENT)) {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200178 /* Not present */
Juergen Grossf439c429c32014-11-03 14:02:01 +0100179 pt_dump_cont_printf(m, dmsg, " ");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200180 } else {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200181 if (pr & _PAGE_USER)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100182 pt_dump_cont_printf(m, dmsg, "USR ");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200183 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100184 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200185 if (pr & _PAGE_RW)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100186 pt_dump_cont_printf(m, dmsg, "RW ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200187 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100188 pt_dump_cont_printf(m, dmsg, "ro ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200189 if (pr & _PAGE_PWT)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100190 pt_dump_cont_printf(m, dmsg, "PWT ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200191 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100192 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200193 if (pr & _PAGE_PCD)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100194 pt_dump_cont_printf(m, dmsg, "PCD ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200195 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100196 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200197
Juergen Grossf439c429c32014-11-03 14:02:01 +0100198 /* Bit 7 has a different meaning on level 3 vs 4 */
Kirill A. Shutemov45dcd202017-07-17 01:59:48 +0300199 if (level <= 4 && pr & _PAGE_PSE)
Juergen Grossf439c429c32014-11-03 14:02:01 +0100200 pt_dump_cont_printf(m, dmsg, "PSE ");
201 else
202 pt_dump_cont_printf(m, dmsg, " ");
Kirill A. Shutemov45dcd202017-07-17 01:59:48 +0300203 if ((level == 5 && pr & _PAGE_PAT) ||
204 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
Toshi Kanida25e622015-09-17 12:24:19 -0600205 pt_dump_cont_printf(m, dmsg, "PAT ");
Juergen Grossf439c429c32014-11-03 14:02:01 +0100206 else
207 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200208 if (pr & _PAGE_GLOBAL)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100209 pt_dump_cont_printf(m, dmsg, "GLB ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200210 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100211 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200212 if (pr & _PAGE_NX)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100213 pt_dump_cont_printf(m, dmsg, "NX ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200214 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100215 pt_dump_cont_printf(m, dmsg, "x ");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200216 }
Borislav Petkovef6bea62014-01-18 12:48:14 +0100217 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200218}
219
220/*
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200221 * On 64 bits, sign-extend the 48 bit address to 64 bit
Arjan van de Ven926e5392008-04-17 17:40:45 +0200222 */
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200223static unsigned long normalize_addr(unsigned long u)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200224{
Kirill A. Shutemov3a366f72017-07-17 01:59:47 +0300225 int shift;
226 if (!IS_ENABLED(CONFIG_X86_64))
227 return u;
228
229 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
230 return (signed long)(u << shift) >> shift;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200231}
232
233/*
234 * This function gets called on a break in a continuous series
235 * of PTE entries; the next one is different so we need to
236 * print what we collected so far.
237 */
238static void note_page(struct seq_file *m, struct pg_state *st,
Jan Beulich672c0ae2018-02-23 01:27:37 -0700239 pgprot_t new_prot, pgprotval_t new_eff, int level)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200240{
Jan Beulich672c0ae2018-02-23 01:27:37 -0700241 pgprotval_t prot, cur, eff;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700242 static const char units[] = "BKMGTPE";
Arjan van de Ven926e5392008-04-17 17:40:45 +0200243
244 /*
245 * If we have a "break" in the series, we need to flush the state that
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200246 * we have now. "break" is either changing perms, levels or
247 * address space marker.
Arjan van de Ven926e5392008-04-17 17:40:45 +0200248 */
Toshi Kanida25e622015-09-17 12:24:19 -0600249 prot = pgprot_val(new_prot);
250 cur = pgprot_val(st->current_prot);
Jan Beulich672c0ae2018-02-23 01:27:37 -0700251 eff = st->effective_prot;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200252
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200253 if (!st->level) {
254 /* First entry */
255 st->current_prot = new_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700256 st->effective_prot = new_eff;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200257 st->level = level;
258 st->marker = address_markers;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700259 st->lines = 0;
Borislav Petkovef6bea62014-01-18 12:48:14 +0100260 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
261 st->marker->name);
Jan Beulich672c0ae2018-02-23 01:27:37 -0700262 } else if (prot != cur || new_eff != eff || level != st->level ||
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200263 st->current_address >= st->marker[1].start_address) {
264 const char *unit = units;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200265 unsigned long delta;
Yinghai Lu6424fb32009-04-13 23:51:46 -0700266 int width = sizeof(unsigned long) * 2;
Stephen Smalleye1a58322015-10-05 12:55:20 -0400267
Jan Beulich672c0ae2018-02-23 01:27:37 -0700268 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
Stephen Smalleye1a58322015-10-05 12:55:20 -0400269 WARN_ONCE(1,
270 "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
271 (void *)st->start_address,
272 (void *)st->start_address);
273 st->wx_pages += (st->current_address -
274 st->start_address) / PAGE_SIZE;
275 }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200276
277 /*
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200278 * Now print the actual finished series
279 */
H. Peter Anvin3891a042014-04-29 16:46:09 -0700280 if (!st->marker->max_lines ||
281 st->lines < st->marker->max_lines) {
282 pt_dump_seq_printf(m, st->to_dmesg,
283 "0x%0*lx-0x%0*lx ",
284 width, st->start_address,
285 width, st->current_address);
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200286
H. Peter Anvin3891a042014-04-29 16:46:09 -0700287 delta = st->current_address - st->start_address;
288 while (!(delta & 1023) && unit[1]) {
289 delta >>= 10;
290 unit++;
291 }
292 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
293 delta, *unit);
294 printk_prot(m, st->current_prot, st->level,
295 st->to_dmesg);
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200296 }
H. Peter Anvin3891a042014-04-29 16:46:09 -0700297 st->lines++;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200298
299 /*
Arjan van de Ven926e5392008-04-17 17:40:45 +0200300 * We print markers for special areas of address space,
301 * such as the start of vmalloc space etc.
302 * This helps in the interpretation.
303 */
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200304 if (st->current_address >= st->marker[1].start_address) {
H. Peter Anvin3891a042014-04-29 16:46:09 -0700305 if (st->marker->max_lines &&
306 st->lines > st->marker->max_lines) {
307 unsigned long nskip =
308 st->lines - st->marker->max_lines;
309 pt_dump_seq_printf(m, st->to_dmesg,
310 "... %lu entr%s skipped ... \n",
311 nskip,
312 nskip == 1 ? "y" : "ies");
313 }
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200314 st->marker++;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700315 st->lines = 0;
Borislav Petkovef6bea62014-01-18 12:48:14 +0100316 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
317 st->marker->name);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200318 }
319
Arjan van de Ven926e5392008-04-17 17:40:45 +0200320 st->start_address = st->current_address;
321 st->current_prot = new_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700322 st->effective_prot = new_eff;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200323 st->level = level;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200324 }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200325}
326
Jan Beulich672c0ae2018-02-23 01:27:37 -0700327static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
328{
329 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
330 ((prot1 | prot2) & _PAGE_NX);
331}
332
333static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
334 pgprotval_t eff_in, unsigned long P)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200335{
336 int i;
337 pte_t *start;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700338 pgprotval_t prot, eff;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200339
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300340 start = (pte_t *)pmd_page_vaddr(addr);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200341 for (i = 0; i < PTRS_PER_PTE; i++) {
Toshi Kanida25e622015-09-17 12:24:19 -0600342 prot = pte_flags(*start);
Jan Beulich672c0ae2018-02-23 01:27:37 -0700343 eff = effective_prot(eff_in, prot);
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200344 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
Jan Beulich672c0ae2018-02-23 01:27:37 -0700345 note_page(m, st, __pgprot(prot), eff, 5);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200346 start++;
347 }
348}
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300349#ifdef CONFIG_KASAN
350
351/*
352 * This is an optimization for KASAN=y case. Since all kasan page tables
353 * eventually point to the kasan_zero_page we could call note_page()
354 * right away without walking through lower level page tables. This saves
355 * us dozens of seconds (minutes for 5-level config) while checking for
356 * W+X mapping or reading kernel_page_tables debugfs file.
357 */
358static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
359 void *pt)
360{
361 if (__pa(pt) == __pa(kasan_zero_pmd) ||
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300362 (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) ||
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300363 __pa(pt) == __pa(kasan_zero_pud)) {
364 pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
Jan Beulich672c0ae2018-02-23 01:27:37 -0700365 note_page(m, st, __pgprot(prot), 0, 5);
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300366 return true;
367 }
368 return false;
369}
370#else
371static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
372 void *pt)
373{
374 return false;
375}
376#endif
Arjan van de Ven926e5392008-04-17 17:40:45 +0200377
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200378#if PTRS_PER_PMD > 1
Arjan van de Ven926e5392008-04-17 17:40:45 +0200379
Jan Beulich672c0ae2018-02-23 01:27:37 -0700380static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
381 pgprotval_t eff_in, unsigned long P)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200382{
383 int i;
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300384 pmd_t *start, *pmd_start;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700385 pgprotval_t prot, eff;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200386
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300387 pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200388 for (i = 0; i < PTRS_PER_PMD; i++) {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200389 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200390 if (!pmd_none(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700391 prot = pmd_flags(*start);
392 eff = effective_prot(eff_in, prot);
Toshi Kanida25e622015-09-17 12:24:19 -0600393 if (pmd_large(*start) || !pmd_present(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700394 note_page(m, st, __pgprot(prot), eff, 4);
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300395 } else if (!kasan_page_table(m, st, pmd_start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700396 walk_pte_level(m, st, *start, eff,
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200397 P + i * PMD_LEVEL_MULT);
Toshi Kanida25e622015-09-17 12:24:19 -0600398 }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200399 } else
Jan Beulich672c0ae2018-02-23 01:27:37 -0700400 note_page(m, st, __pgprot(0), 0, 4);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200401 start++;
402 }
403}
404
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200405#else
Jan Beulich672c0ae2018-02-23 01:27:37 -0700406#define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200407#define pud_large(a) pmd_large(__pmd(pud_val(a)))
408#define pud_none(a) pmd_none(__pmd(pud_val(a)))
409#endif
Arjan van de Ven926e5392008-04-17 17:40:45 +0200410
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200411#if PTRS_PER_PUD > 1
412
Jan Beulich672c0ae2018-02-23 01:27:37 -0700413static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
414 pgprotval_t eff_in, unsigned long P)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200415{
416 int i;
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300417 pud_t *start, *pud_start;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700418 pgprotval_t prot, eff;
Andrey Ryabinin243b72a2017-02-14 13:08:38 +0300419 pud_t *prev_pud = NULL;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200420
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300421 pud_start = start = (pud_t *)p4d_page_vaddr(addr);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200422
423 for (i = 0; i < PTRS_PER_PUD; i++) {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200424 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300425 if (!pud_none(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700426 prot = pud_flags(*start);
427 eff = effective_prot(eff_in, prot);
Toshi Kanida25e622015-09-17 12:24:19 -0600428 if (pud_large(*start) || !pud_present(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700429 note_page(m, st, __pgprot(prot), eff, 3);
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300430 } else if (!kasan_page_table(m, st, pud_start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700431 walk_pmd_level(m, st, *start, eff,
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200432 P + i * PUD_LEVEL_MULT);
Toshi Kanida25e622015-09-17 12:24:19 -0600433 }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200434 } else
Jan Beulich672c0ae2018-02-23 01:27:37 -0700435 note_page(m, st, __pgprot(0), 0, 3);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200436
Andrey Ryabinin243b72a2017-02-14 13:08:38 +0300437 prev_pud = start;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200438 start++;
439 }
440}
441
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200442#else
Jan Beulich672c0ae2018-02-23 01:27:37 -0700443#define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300444#define p4d_large(a) pud_large(__pud(p4d_val(a)))
445#define p4d_none(a) pud_none(__pud(p4d_val(a)))
446#endif
447
Jan Beulich672c0ae2018-02-23 01:27:37 -0700448static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
449 pgprotval_t eff_in, unsigned long P)
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300450{
451 int i;
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300452 p4d_t *start, *p4d_start;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700453 pgprotval_t prot, eff;
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300454
Kirill A. Shutemovc65e7742018-02-14 14:16:53 +0300455 if (PTRS_PER_P4D == 1)
Jan Beulich672c0ae2018-02-23 01:27:37 -0700456 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
Kirill A. Shutemovc65e7742018-02-14 14:16:53 +0300457
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300458 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300459
460 for (i = 0; i < PTRS_PER_P4D; i++) {
461 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
462 if (!p4d_none(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700463 prot = p4d_flags(*start);
464 eff = effective_prot(eff_in, prot);
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300465 if (p4d_large(*start) || !p4d_present(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700466 note_page(m, st, __pgprot(prot), eff, 2);
Andrey Ryabinin04b67022017-07-24 18:25:58 +0300467 } else if (!kasan_page_table(m, st, p4d_start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700468 walk_pud_level(m, st, *start, eff,
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300469 P + i * P4D_LEVEL_MULT);
470 }
471 } else
Jan Beulich672c0ae2018-02-23 01:27:37 -0700472 note_page(m, st, __pgprot(0), 0, 2);
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300473
474 start++;
475 }
476}
477
Kirill A. Shutemovc65e7742018-02-14 14:16:53 +0300478#define pgd_large(a) (pgtable_l5_enabled ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
479#define pgd_none(a) (pgtable_l5_enabled ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200480
Boris Ostrovskyf4e342c2015-11-05 13:56:35 -0500481static inline bool is_hypervisor_range(int idx)
482{
Borislav Petkovb1768622016-02-18 21:00:41 +0100483#ifdef CONFIG_X86_64
Boris Ostrovskyf4e342c2015-11-05 13:56:35 -0500484 /*
485 * ffff800000000000 - ffff87ffffffffff is reserved for
486 * the hypervisor.
487 */
Borislav Petkovb1768622016-02-18 21:00:41 +0100488 return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
489 (idx < pgd_index(__PAGE_OFFSET));
Boris Ostrovskyf4e342c2015-11-05 13:56:35 -0500490#else
Borislav Petkovb1768622016-02-18 21:00:41 +0100491 return false;
Boris Ostrovskyf4e342c2015-11-05 13:56:35 -0500492#endif
Borislav Petkovb1768622016-02-18 21:00:41 +0100493}
Boris Ostrovskyf4e342c2015-11-05 13:56:35 -0500494
Stephen Smalleye1a58322015-10-05 12:55:20 -0400495static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100496 bool checkwx, bool dmesg)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200497{
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200498#ifdef CONFIG_X86_64
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300499 pgd_t *start = (pgd_t *) &init_top_pgt;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200500#else
501 pgd_t *start = swapper_pg_dir;
502#endif
Jan Beulich672c0ae2018-02-23 01:27:37 -0700503 pgprotval_t prot, eff;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200504 int i;
Borislav Petkovef6bea62014-01-18 12:48:14 +0100505 struct pg_state st = {};
Arjan van de Ven926e5392008-04-17 17:40:45 +0200506
Borislav Petkovef6bea62014-01-18 12:48:14 +0100507 if (pgd) {
508 start = pgd;
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100509 st.to_dmesg = dmesg;
Borislav Petkovef6bea62014-01-18 12:48:14 +0100510 }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200511
Stephen Smalleye1a58322015-10-05 12:55:20 -0400512 st.check_wx = checkwx;
513 if (checkwx)
514 st.wx_pages = 0;
515
Arjan van de Ven926e5392008-04-17 17:40:45 +0200516 for (i = 0; i < PTRS_PER_PGD; i++) {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200517 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
Boris Ostrovskyf4e342c2015-11-05 13:56:35 -0500518 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700519 prot = pgd_flags(*start);
520#ifdef CONFIG_X86_PAE
521 eff = _PAGE_USER | _PAGE_RW;
522#else
523 eff = prot;
524#endif
Toshi Kanida25e622015-09-17 12:24:19 -0600525 if (pgd_large(*start) || !pgd_present(*start)) {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700526 note_page(m, &st, __pgprot(prot), eff, 1);
Toshi Kanida25e622015-09-17 12:24:19 -0600527 } else {
Jan Beulich672c0ae2018-02-23 01:27:37 -0700528 walk_p4d_level(m, &st, *start, eff,
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200529 i * PGD_LEVEL_MULT);
Toshi Kanida25e622015-09-17 12:24:19 -0600530 }
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200531 } else
Jan Beulich672c0ae2018-02-23 01:27:37 -0700532 note_page(m, &st, __pgprot(0), 0, 1);
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200533
Andrey Ryabinin146fbb762017-02-10 12:54:05 +0300534 cond_resched();
Arjan van de Ven926e5392008-04-17 17:40:45 +0200535 start++;
536 }
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200537
538 /* Flush out the last page */
539 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
Jan Beulich672c0ae2018-02-23 01:27:37 -0700540 note_page(m, &st, __pgprot(0), 0, 0);
Stephen Smalleye1a58322015-10-05 12:55:20 -0400541 if (!checkwx)
542 return;
543 if (st.wx_pages)
544 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
545 st.wx_pages);
546 else
547 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200548}
549
Stephen Smalleye1a58322015-10-05 12:55:20 -0400550void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
551{
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100552 ptdump_walk_pgd_level_core(m, pgd, false, true);
Stephen Smalleye1a58322015-10-05 12:55:20 -0400553}
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100554
Thomas Gleixnera4b51ef2017-12-04 15:08:06 +0100555void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100556{
Thomas Gleixnera4b51ef2017-12-04 15:08:06 +0100557#ifdef CONFIG_PAGE_TABLE_ISOLATION
558 if (user && static_cpu_has(X86_FEATURE_PTI))
559 pgd = kernel_to_user_pgdp(pgd);
560#endif
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100561 ptdump_walk_pgd_level_core(m, pgd, false, false);
562}
563EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
564
565static void ptdump_walk_user_pgd_level_checkwx(void)
566{
567#ifdef CONFIG_PAGE_TABLE_ISOLATION
568 pgd_t *pgd = (pgd_t *) &init_top_pgt;
569
570 if (!static_cpu_has(X86_FEATURE_PTI))
571 return;
572
573 pr_info("x86/mm: Checking user space page tables\n");
574 pgd = kernel_to_user_pgdp(pgd);
575 ptdump_walk_pgd_level_core(NULL, pgd, true, false);
576#endif
577}
Stephen Smalleye1a58322015-10-05 12:55:20 -0400578
579void ptdump_walk_pgd_level_checkwx(void)
580{
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100581 ptdump_walk_pgd_level_core(NULL, NULL, true, false);
582 ptdump_walk_user_pgd_level_checkwx();
Stephen Smalleye1a58322015-10-05 12:55:20 -0400583}
584
Kees Cook8609d1b2015-11-19 17:07:55 -0800585static int __init pt_dump_init(void)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200586{
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700587 /*
588 * Various markers are not compile-time constants, so assign them
589 * here.
590 */
591#ifdef CONFIG_X86_64
592 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
593 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
594 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
Kirill A. Shutemov5c7919b2018-02-14 14:16:52 +0300595#ifdef CONFIG_MODIFY_LDT_SYSCALL
596 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
597#endif
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +0300598#ifdef CONFIG_KASAN
599 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
600 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
601#endif
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700602#endif
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200603#ifdef CONFIG_X86_32
Andres Salomon92851e22010-07-20 15:19:46 -0700604 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
605 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200606# ifdef CONFIG_HIGHMEM
Andres Salomon92851e22010-07-20 15:19:46 -0700607 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200608# endif
Andres Salomon92851e22010-07-20 15:19:46 -0700609 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100610 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200611#endif
Arjan van de Ven926e5392008-04-17 17:40:45 +0200612 return 0;
613}
Arjan van de Ven926e5392008-04-17 17:40:45 +0200614__initcall(pt_dump_init);