blob: a0b8ffc927bb96cfd4cd264f8835bc85a59600c6 [file] [log] [blame]
Thomas Gleixnerb886d83c2019-06-01 10:08:55 +02001// SPDX-License-Identifier: GPL-2.0-only
Arjan van de Ven926e5392008-04-17 17:40:45 +02002/*
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
5 *
6 * (C) Copyright 2008 Intel Corporation
7 *
8 * Author: Arjan van de Ven <arjan@linux.intel.com>
Arjan van de Ven926e5392008-04-17 17:40:45 +02009 */
10
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020011#include <linux/debugfs.h>
Andrey Ryabinin04b67022017-07-24 18:25:58 +030012#include <linux/kasan.h>
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020013#include <linux/mm.h>
Paul Gortmaker84e629b2016-07-13 20:18:54 -040014#include <linux/init.h>
Andrey Ryabinin146fbb762017-02-10 12:54:05 +030015#include <linux/sched.h>
Arjan van de Ven926e5392008-04-17 17:40:45 +020016#include <linux/seq_file.h>
Joerg Roedeld6ef1f12018-04-17 15:27:16 +020017#include <linux/highmem.h>
Thomas Gleixnerc200dac2018-10-08 21:53:48 +020018#include <linux/pci.h>
Steven Price2ae27132020-02-03 17:36:24 -080019#include <linux/ptdump.h>
Arjan van de Ven926e5392008-04-17 17:40:45 +020020
Thomas Gleixnerc200dac2018-10-08 21:53:48 +020021#include <asm/e820/types.h>
Arjan van de Ven926e5392008-04-17 17:40:45 +020022#include <asm/pgtable.h>
23
24/*
25 * The dumper groups pagetable entries of the same type into one, and for
26 * that it needs to keep some state when walking, and flush this state
27 * when a "break" in the continuity is found.
28 */
29struct pg_state {
Steven Price2ae27132020-02-03 17:36:24 -080030 struct ptdump_state ptdump;
Arjan van de Ven926e5392008-04-17 17:40:45 +020031 int level;
Steven Price2ae27132020-02-03 17:36:24 -080032 pgprotval_t current_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -070033 pgprotval_t effective_prot;
Steven Price2ae27132020-02-03 17:36:24 -080034 pgprotval_t prot_levels[5];
Arjan van de Ven926e5392008-04-17 17:40:45 +020035 unsigned long start_address;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020036 const struct addr_marker *marker;
H. Peter Anvin3891a042014-04-29 16:46:09 -070037 unsigned long lines;
Borislav Petkovef6bea62014-01-18 12:48:14 +010038 bool to_dmesg;
Stephen Smalleye1a58322015-10-05 12:55:20 -040039 bool check_wx;
40 unsigned long wx_pages;
Steven Price74d2aaa2020-02-03 17:36:07 -080041 struct seq_file *seq;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020042};
43
44struct addr_marker {
45 unsigned long start_address;
46 const char *name;
H. Peter Anvin3891a042014-04-29 16:46:09 -070047 unsigned long max_lines;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +020048};
49
Thomas Gleixner146122e2017-12-20 18:07:42 +010050/* Address space markers hints */
51
52#ifdef CONFIG_X86_64
53
Andres Salomon92851e22010-07-20 15:19:46 -070054enum address_markers_idx {
55 USER_SPACE_NR = 0,
Andres Salomon92851e22010-07-20 15:19:46 -070056 KERNEL_SPACE_NR,
Kirill A. Shutemov254eb552018-11-30 23:23:28 +030057#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirskif55f0502017-12-12 07:56:45 -080058 LDT_NR,
59#endif
Kirill A. Shutemov254eb552018-11-30 23:23:28 +030060 LOW_KERNEL_NR,
Andres Salomon92851e22010-07-20 15:19:46 -070061 VMALLOC_START_NR,
62 VMEMMAP_START_NR,
Andrey Ryabinin025205f2017-02-14 13:08:39 +030063#ifdef CONFIG_KASAN
64 KASAN_SHADOW_START_NR,
65 KASAN_SHADOW_END_NR,
66#endif
Thomas Gleixnerf2078902018-01-04 13:01:40 +010067 CPU_ENTRY_AREA_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +010068#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -070069 ESPFIX_START_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +010070#endif
71#ifdef CONFIG_EFI
72 EFI_END_NR,
73#endif
Andres Salomon92851e22010-07-20 15:19:46 -070074 HIGH_KERNEL_NR,
75 MODULES_VADDR_NR,
76 MODULES_END_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +010077 FIXADDR_START_NR,
78 END_OF_SPACE_NR,
79};
80
81static struct addr_marker address_markers[] = {
82 [USER_SPACE_NR] = { 0, "User Space" },
83 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
84 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
85 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
86 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
87#ifdef CONFIG_KASAN
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +030088 /*
89 * These fields get initialized with the (dynamic)
90 * KASAN_SHADOW_{START,END} values in pt_dump_init().
91 */
92 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
93 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
Thomas Gleixner146122e2017-12-20 18:07:42 +010094#endif
Andy Lutomirskif55f0502017-12-12 07:56:45 -080095#ifdef CONFIG_MODIFY_LDT_SYSCALL
Kirill A. Shutemov5c7919b2018-02-14 14:16:52 +030096 [LDT_NR] = { 0UL, "LDT remap" },
Andy Lutomirskif55f0502017-12-12 07:56:45 -080097#endif
Thomas Gleixner92a0f812017-12-20 18:51:31 +010098 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
Thomas Gleixner146122e2017-12-20 18:07:42 +010099#ifdef CONFIG_X86_ESPFIX64
100 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
101#endif
102#ifdef CONFIG_EFI
103 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
104#endif
105 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
106 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
107 [MODULES_END_NR] = { MODULES_END, "End Modules" },
108 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
109 [END_OF_SPACE_NR] = { -1, NULL }
110};
111
Joerg Roedel4e8537e2018-07-18 11:41:08 +0200112#define INIT_PGD ((pgd_t *) &init_top_pgt)
113
Thomas Gleixner146122e2017-12-20 18:07:42 +0100114#else /* CONFIG_X86_64 */
115
116enum address_markers_idx {
117 USER_SPACE_NR = 0,
Andres Salomon92851e22010-07-20 15:19:46 -0700118 KERNEL_SPACE_NR,
119 VMALLOC_START_NR,
120 VMALLOC_END_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +0100121#ifdef CONFIG_HIGHMEM
Andres Salomon92851e22010-07-20 15:19:46 -0700122 PKMAP_BASE_NR,
Andres Salomon92851e22010-07-20 15:19:46 -0700123#endif
Joerg Roedelf3e48e52018-07-18 11:41:10 +0200124#ifdef CONFIG_MODIFY_LDT_SYSCALL
125 LDT_NR,
126#endif
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100127 CPU_ENTRY_AREA_NR,
Thomas Gleixner146122e2017-12-20 18:07:42 +0100128 FIXADDR_START_NR,
129 END_OF_SPACE_NR,
Andres Salomon92851e22010-07-20 15:19:46 -0700130};
131
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200132static struct addr_marker address_markers[] = {
Thomas Gleixner146122e2017-12-20 18:07:42 +0100133 [USER_SPACE_NR] = { 0, "User Space" },
134 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
135 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
136 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
137#ifdef CONFIG_HIGHMEM
138 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
Andrey Ryabinin025205f2017-02-14 13:08:39 +0300139#endif
Joerg Roedelf3e48e52018-07-18 11:41:10 +0200140#ifdef CONFIG_MODIFY_LDT_SYSCALL
141 [LDT_NR] = { 0UL, "LDT remap" },
142#endif
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100143 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
Thomas Gleixner146122e2017-12-20 18:07:42 +0100144 [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
145 [END_OF_SPACE_NR] = { -1, NULL }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200146};
147
Joerg Roedel4e8537e2018-07-18 11:41:08 +0200148#define INIT_PGD (swapper_pg_dir)
149
Thomas Gleixner146122e2017-12-20 18:07:42 +0100150#endif /* !CONFIG_X86_64 */
151
Arjan van de Ven926e5392008-04-17 17:40:45 +0200152/* Multipliers for offsets within the PTEs */
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200153#define PTE_LEVEL_MULT (PAGE_SIZE)
154#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
155#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
Kirill A. Shutemovfdd3d8c2017-03-28 13:48:06 +0300156#define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
Juergen Gross84bbabc2017-04-12 16:36:34 +0200157#define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200158
Borislav Petkovef6bea62014-01-18 12:48:14 +0100159#define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
160({ \
161 if (to_dmesg) \
162 printk(KERN_INFO fmt, ##args); \
163 else \
164 if (m) \
165 seq_printf(m, fmt, ##args); \
166})
167
168#define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
169({ \
170 if (to_dmesg) \
171 printk(KERN_CONT fmt, ##args); \
172 else \
173 if (m) \
174 seq_printf(m, fmt, ##args); \
175})
176
Arjan van de Ven926e5392008-04-17 17:40:45 +0200177/*
178 * Print a readable form of a pgprot_t to the seq_file
179 */
Steven Price2ae27132020-02-03 17:36:24 -0800180static void printk_prot(struct seq_file *m, pgprotval_t pr, int level, bool dmsg)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200181{
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200182 static const char * const level_name[] =
Kirill A. Shutemov45dcd202017-07-17 01:59:48 +0300183 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
Arjan van de Ven926e5392008-04-17 17:40:45 +0200184
Thomas Gleixnerc0534492017-12-16 01:14:39 +0100185 if (!(pr & _PAGE_PRESENT)) {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200186 /* Not present */
Juergen Grossf439c429c32014-11-03 14:02:01 +0100187 pt_dump_cont_printf(m, dmsg, " ");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200188 } else {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200189 if (pr & _PAGE_USER)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100190 pt_dump_cont_printf(m, dmsg, "USR ");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200191 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100192 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200193 if (pr & _PAGE_RW)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100194 pt_dump_cont_printf(m, dmsg, "RW ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200195 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100196 pt_dump_cont_printf(m, dmsg, "ro ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200197 if (pr & _PAGE_PWT)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100198 pt_dump_cont_printf(m, dmsg, "PWT ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200199 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100200 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200201 if (pr & _PAGE_PCD)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100202 pt_dump_cont_printf(m, dmsg, "PCD ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200203 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100204 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200205
Juergen Grossf439c429c32014-11-03 14:02:01 +0100206 /* Bit 7 has a different meaning on level 3 vs 4 */
Kirill A. Shutemov45dcd202017-07-17 01:59:48 +0300207 if (level <= 4 && pr & _PAGE_PSE)
Juergen Grossf439c429c32014-11-03 14:02:01 +0100208 pt_dump_cont_printf(m, dmsg, "PSE ");
209 else
210 pt_dump_cont_printf(m, dmsg, " ");
Kirill A. Shutemov45dcd202017-07-17 01:59:48 +0300211 if ((level == 5 && pr & _PAGE_PAT) ||
212 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
Toshi Kanida25e622015-09-17 12:24:19 -0600213 pt_dump_cont_printf(m, dmsg, "PAT ");
Juergen Grossf439c429c32014-11-03 14:02:01 +0100214 else
215 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200216 if (pr & _PAGE_GLOBAL)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100217 pt_dump_cont_printf(m, dmsg, "GLB ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200218 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100219 pt_dump_cont_printf(m, dmsg, " ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200220 if (pr & _PAGE_NX)
Borislav Petkovef6bea62014-01-18 12:48:14 +0100221 pt_dump_cont_printf(m, dmsg, "NX ");
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200222 else
Borislav Petkovef6bea62014-01-18 12:48:14 +0100223 pt_dump_cont_printf(m, dmsg, "x ");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200224 }
Borislav Petkovef6bea62014-01-18 12:48:14 +0100225 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200226}
227
Steven Price2ae27132020-02-03 17:36:24 -0800228static void note_wx(struct pg_state *st, unsigned long addr)
Thomas Gleixnerc200dac2018-10-08 21:53:48 +0200229{
230 unsigned long npages;
231
Steven Price2ae27132020-02-03 17:36:24 -0800232 npages = (addr - st->start_address) / PAGE_SIZE;
Thomas Gleixnerc200dac2018-10-08 21:53:48 +0200233
234#ifdef CONFIG_PCI_BIOS
235 /*
236 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
237 * Inform about it, but avoid the warning.
238 */
239 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
Steven Price2ae27132020-02-03 17:36:24 -0800240 addr <= PAGE_OFFSET + BIOS_END) {
Thomas Gleixnerc200dac2018-10-08 21:53:48 +0200241 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
242 return;
243 }
244#endif
245 /* Account the WX pages */
246 st->wx_pages += npages;
Thomas Gleixner510bb962019-04-15 10:46:07 +0200247 WARN_ONCE(__supported_pte_mask & _PAGE_NX,
248 "x86/mm: Found insecure W+X mapping at address %pS\n",
Thomas Gleixnerc200dac2018-10-08 21:53:48 +0200249 (void *)st->start_address);
250}
251
Steven Price2ae27132020-02-03 17:36:24 -0800252static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
253{
254 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
255 ((prot1 | prot2) & _PAGE_NX);
256}
257
Arjan van de Ven926e5392008-04-17 17:40:45 +0200258/*
259 * This function gets called on a break in a continuous series
260 * of PTE entries; the next one is different so we need to
261 * print what we collected so far.
262 */
Steven Price2ae27132020-02-03 17:36:24 -0800263static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
264 unsigned long val)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200265{
Steven Price2ae27132020-02-03 17:36:24 -0800266 struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
267 pgprotval_t new_prot, new_eff;
268 pgprotval_t cur, eff;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700269 static const char units[] = "BKMGTPE";
Steven Price74d2aaa2020-02-03 17:36:07 -0800270 struct seq_file *m = st->seq;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200271
Steven Price2ae27132020-02-03 17:36:24 -0800272 new_prot = val & PTE_FLAGS_MASK;
273
274 if (level > 1) {
275 new_eff = effective_prot(st->prot_levels[level - 2],
276 new_prot);
277 } else {
278 new_eff = new_prot;
279 }
280
281 if (level > 0)
282 st->prot_levels[level - 1] = new_eff;
283
Arjan van de Ven926e5392008-04-17 17:40:45 +0200284 /*
285 * If we have a "break" in the series, we need to flush the state that
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200286 * we have now. "break" is either changing perms, levels or
287 * address space marker.
Arjan van de Ven926e5392008-04-17 17:40:45 +0200288 */
Steven Price2ae27132020-02-03 17:36:24 -0800289 cur = st->current_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700290 eff = st->effective_prot;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200291
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200292 if (!st->level) {
293 /* First entry */
294 st->current_prot = new_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700295 st->effective_prot = new_eff;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200296 st->level = level;
297 st->marker = address_markers;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700298 st->lines = 0;
Borislav Petkovef6bea62014-01-18 12:48:14 +0100299 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
300 st->marker->name);
Steven Price2ae27132020-02-03 17:36:24 -0800301 } else if (new_prot != cur || new_eff != eff || level != st->level ||
302 addr >= st->marker[1].start_address) {
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200303 const char *unit = units;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200304 unsigned long delta;
Yinghai Lu6424fb32009-04-13 23:51:46 -0700305 int width = sizeof(unsigned long) * 2;
Stephen Smalleye1a58322015-10-05 12:55:20 -0400306
Thomas Gleixnerc200dac2018-10-08 21:53:48 +0200307 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
Steven Price2ae27132020-02-03 17:36:24 -0800308 note_wx(st, addr);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200309
310 /*
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200311 * Now print the actual finished series
312 */
H. Peter Anvin3891a042014-04-29 16:46:09 -0700313 if (!st->marker->max_lines ||
314 st->lines < st->marker->max_lines) {
315 pt_dump_seq_printf(m, st->to_dmesg,
316 "0x%0*lx-0x%0*lx ",
317 width, st->start_address,
Steven Price2ae27132020-02-03 17:36:24 -0800318 width, addr);
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200319
Steven Price2ae27132020-02-03 17:36:24 -0800320 delta = addr - st->start_address;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700321 while (!(delta & 1023) && unit[1]) {
322 delta >>= 10;
323 unit++;
324 }
325 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
326 delta, *unit);
327 printk_prot(m, st->current_prot, st->level,
328 st->to_dmesg);
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200329 }
H. Peter Anvin3891a042014-04-29 16:46:09 -0700330 st->lines++;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200331
332 /*
Arjan van de Ven926e5392008-04-17 17:40:45 +0200333 * We print markers for special areas of address space,
334 * such as the start of vmalloc space etc.
335 * This helps in the interpretation.
336 */
Steven Price2ae27132020-02-03 17:36:24 -0800337 if (addr >= st->marker[1].start_address) {
H. Peter Anvin3891a042014-04-29 16:46:09 -0700338 if (st->marker->max_lines &&
339 st->lines > st->marker->max_lines) {
340 unsigned long nskip =
341 st->lines - st->marker->max_lines;
342 pt_dump_seq_printf(m, st->to_dmesg,
343 "... %lu entr%s skipped ... \n",
344 nskip,
345 nskip == 1 ? "y" : "ies");
346 }
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200347 st->marker++;
H. Peter Anvin3891a042014-04-29 16:46:09 -0700348 st->lines = 0;
Borislav Petkovef6bea62014-01-18 12:48:14 +0100349 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
350 st->marker->name);
Arjan van de Ven926e5392008-04-17 17:40:45 +0200351 }
352
Steven Price2ae27132020-02-03 17:36:24 -0800353 st->start_address = addr;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200354 st->current_prot = new_prot;
Jan Beulich672c0ae2018-02-23 01:27:37 -0700355 st->effective_prot = new_eff;
Arjan van de Ven926e5392008-04-17 17:40:45 +0200356 st->level = level;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200357 }
Arjan van de Ven926e5392008-04-17 17:40:45 +0200358}
359
Stephen Smalleye1a58322015-10-05 12:55:20 -0400360static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100361 bool checkwx, bool dmesg)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200362{
Steven Price2ae27132020-02-03 17:36:24 -0800363 const struct ptdump_range ptdump_ranges[] = {
364#ifdef CONFIG_X86_64
Arjan van de Ven926e5392008-04-17 17:40:45 +0200365
Steven Price2ae27132020-02-03 17:36:24 -0800366#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
367#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \
368 normalize_addr_shift)
Stephen Smalleye1a58322015-10-05 12:55:20 -0400369
Steven Price2ae27132020-02-03 17:36:24 -0800370 {0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
371 {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
Jan Beulich672c0ae2018-02-23 01:27:37 -0700372#else
Steven Price2ae27132020-02-03 17:36:24 -0800373 {0, ~0UL},
Jan Beulich672c0ae2018-02-23 01:27:37 -0700374#endif
Steven Price2ae27132020-02-03 17:36:24 -0800375 {0, 0}
376};
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200377
Steven Price2ae27132020-02-03 17:36:24 -0800378 struct pg_state st = {
379 .ptdump = {
380 .note_page = note_page,
381 .range = ptdump_ranges
382 },
383 .to_dmesg = dmesg,
384 .check_wx = checkwx,
385 .seq = m
386 };
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200387
Steven Price2ae27132020-02-03 17:36:24 -0800388 struct mm_struct fake_mm = {
389 .pgd = pgd
390 };
391 init_rwsem(&fake_mm.mmap_sem);
392
393 ptdump_walk_pgd(&st.ptdump, &fake_mm);
394
Stephen Smalleye1a58322015-10-05 12:55:20 -0400395 if (!checkwx)
396 return;
397 if (st.wx_pages)
398 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
399 st.wx_pages);
400 else
401 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
Arjan van de Ven926e5392008-04-17 17:40:45 +0200402}
403
Steven Pricee4552482020-02-03 17:36:11 -0800404void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm)
Stephen Smalleye1a58322015-10-05 12:55:20 -0400405{
Steven Pricee4552482020-02-03 17:36:11 -0800406 ptdump_walk_pgd_level_core(m, mm->pgd, false, true);
Stephen Smalleye1a58322015-10-05 12:55:20 -0400407}
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100408
Steven Pricec5cfae12020-02-03 17:36:16 -0800409void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
410 bool user)
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100411{
Steven Pricec5cfae12020-02-03 17:36:16 -0800412 pgd_t *pgd = mm->pgd;
Thomas Gleixnera4b51ef2017-12-04 15:08:06 +0100413#ifdef CONFIG_PAGE_TABLE_ISOLATION
Borislav Petkov28e3ace2019-03-29 20:00:38 +0100414 if (user && boot_cpu_has(X86_FEATURE_PTI))
Thomas Gleixnera4b51ef2017-12-04 15:08:06 +0100415 pgd = kernel_to_user_pgdp(pgd);
416#endif
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100417 ptdump_walk_pgd_level_core(m, pgd, false, false);
418}
419EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
420
Joerg Roedeld878efc2018-08-08 13:16:40 +0200421void ptdump_walk_user_pgd_level_checkwx(void)
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100422{
423#ifdef CONFIG_PAGE_TABLE_ISOLATION
Joerg Roedel4e8537e2018-07-18 11:41:08 +0200424 pgd_t *pgd = INIT_PGD;
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100425
Joerg Roedeld878efc2018-08-08 13:16:40 +0200426 if (!(__supported_pte_mask & _PAGE_NX) ||
Borislav Petkov28e3ace2019-03-29 20:00:38 +0100427 !boot_cpu_has(X86_FEATURE_PTI))
Thomas Gleixnerb4bf4f92017-12-04 15:08:05 +0100428 return;
429
430 pr_info("x86/mm: Checking user space page tables\n");
431 pgd = kernel_to_user_pgdp(pgd);
432 ptdump_walk_pgd_level_core(NULL, pgd, true, false);
433#endif
434}
Stephen Smalleye1a58322015-10-05 12:55:20 -0400435
436void ptdump_walk_pgd_level_checkwx(void)
437{
Steven Pricec5cfae12020-02-03 17:36:16 -0800438 ptdump_walk_pgd_level_core(NULL, INIT_PGD, true, false);
Stephen Smalleye1a58322015-10-05 12:55:20 -0400439}
440
Kees Cook8609d1b2015-11-19 17:07:55 -0800441static int __init pt_dump_init(void)
Arjan van de Ven926e5392008-04-17 17:40:45 +0200442{
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700443 /*
444 * Various markers are not compile-time constants, so assign them
445 * here.
446 */
447#ifdef CONFIG_X86_64
448 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
449 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
450 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
Kirill A. Shutemov5c7919b2018-02-14 14:16:52 +0300451#ifdef CONFIG_MODIFY_LDT_SYSCALL
452 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
453#endif
Kirill A. Shutemov09e61a72018-02-14 14:16:55 +0300454#ifdef CONFIG_KASAN
455 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
456 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
457#endif
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700458#endif
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200459#ifdef CONFIG_X86_32
Andres Salomon92851e22010-07-20 15:19:46 -0700460 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
461 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200462# ifdef CONFIG_HIGHMEM
Andres Salomon92851e22010-07-20 15:19:46 -0700463 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200464# endif
Andres Salomon92851e22010-07-20 15:19:46 -0700465 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100466 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
Joerg Roedelf3e48e52018-07-18 11:41:10 +0200467# ifdef CONFIG_MODIFY_LDT_SYSCALL
468 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
469# endif
H. Peter Anvinfe770bf02008-04-17 17:40:45 +0200470#endif
Arjan van de Ven926e5392008-04-17 17:40:45 +0200471 return 0;
472}
Arjan van de Ven926e5392008-04-17 17:40:45 +0200473__initcall(pt_dump_init);