Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 2 | /* |
| 3 | * mm/debug.c |
| 4 | * |
| 5 | * mm/ specific debug routines. |
| 6 | * |
| 7 | */ |
| 8 | |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/mm.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 11 | #include <linux/trace_events.h> |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 12 | #include <linux/memcontrol.h> |
Vlastimil Babka | 420adbe9 | 2016-03-15 14:55:52 -0700 | [diff] [blame] | 13 | #include <trace/events/mmflags.h> |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 14 | #include <linux/migrate.h> |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 15 | #include <linux/page_owner.h> |
Alexander Duyck | f682a97 | 2018-10-26 15:07:45 -0700 | [diff] [blame] | 16 | #include <linux/ctype.h> |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 17 | |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 18 | #include "internal.h" |
| 19 | |
Alexey Dobriyan | 9a2f45f | 2018-12-28 00:35:59 -0800 | [diff] [blame] | 20 | const char *migrate_reason_names[MR_TYPES] = { |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 21 | "compaction", |
| 22 | "memory_failure", |
| 23 | "memory_hotplug", |
| 24 | "syscall_or_cpuset", |
| 25 | "mempolicy_mbind", |
| 26 | "numa_misplaced", |
| 27 | "cma", |
| 28 | }; |
| 29 | |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 30 | const struct trace_print_flags pageflag_names[] = { |
| 31 | __def_pageflag_names, |
| 32 | {0, NULL} |
Vlastimil Babka | 420adbe9 | 2016-03-15 14:55:52 -0700 | [diff] [blame] | 33 | }; |
| 34 | |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 35 | const struct trace_print_flags gfpflag_names[] = { |
| 36 | __def_gfpflag_names, |
| 37 | {0, NULL} |
| 38 | }; |
| 39 | |
| 40 | const struct trace_print_flags vmaflag_names[] = { |
| 41 | __def_vmaflag_names, |
| 42 | {0, NULL} |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 43 | }; |
| 44 | |
Vlastimil Babka | ff8e811 | 2016-03-15 14:56:24 -0700 | [diff] [blame] | 45 | void __dump_page(struct page *page, const char *reason) |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 46 | { |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 47 | struct page *head = compound_head(page); |
Robin Murphy | 311ade0e | 2019-02-20 22:19:45 -0800 | [diff] [blame] | 48 | struct address_space *mapping; |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 49 | bool page_poisoned = PagePoisoned(page); |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 50 | bool compound = PageCompound(page); |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 51 | /* |
| 52 | * Accessing the pageblock without the zone lock. It could change to |
| 53 | * "isolate" again in the meantime, but since we are just dumping the |
| 54 | * state for debugging, it should be fine to accept a bit of |
| 55 | * inaccuracy here due to racing. |
| 56 | */ |
| 57 | bool page_cma = is_migrate_cma_page(page); |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 58 | int mapcount; |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 59 | char *type = ""; |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * If struct page is poisoned don't access Page*() functions as that |
| 63 | * leads to recursive loop. Page*() check for poisoned pages, and calls |
| 64 | * dump_page() when detected. |
| 65 | */ |
| 66 | if (page_poisoned) { |
Michal Hocko | e0392cf | 2018-12-28 00:33:42 -0800 | [diff] [blame] | 67 | pr_warn("page:%px is uninitialized and poisoned", page); |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 68 | goto hex_only; |
| 69 | } |
| 70 | |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 71 | if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { |
Matthew Wilcox (Oracle) | e1ab96f | 2020-08-06 23:19:32 -0700 | [diff] [blame] | 72 | /* |
| 73 | * Corrupt page, so we cannot call page_mapping. Instead, do a |
| 74 | * safe subset of the steps that page_mapping() does. Caution: |
| 75 | * this will be misleading for tail pages, PageSwapCache pages, |
| 76 | * and potentially other situations. (See the page_mapping() |
| 77 | * implementation for what's missing here.) |
| 78 | */ |
| 79 | unsigned long tmp = (unsigned long)page->mapping; |
| 80 | |
| 81 | if (tmp & PAGE_MAPPING_ANON) |
| 82 | mapping = NULL; |
| 83 | else |
| 84 | mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 85 | head = page; |
| 86 | compound = false; |
| 87 | } else { |
| 88 | mapping = page_mapping(page); |
| 89 | } |
Robin Murphy | 311ade0e | 2019-02-20 22:19:45 -0800 | [diff] [blame] | 90 | |
Kirill A. Shutemov | 9996f05 | 2016-10-07 17:01:40 -0700 | [diff] [blame] | 91 | /* |
| 92 | * Avoid VM_BUG_ON() in page_mapcount(). |
| 93 | * page->_mapcount space in struct page is used by sl[aou]b pages to |
| 94 | * encode own info. |
| 95 | */ |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 96 | mapcount = PageSlab(head) ? 0 : page_mapcount(page); |
Kirill A. Shutemov | 4d35427 | 2016-09-19 14:44:07 -0700 | [diff] [blame] | 97 | |
Matthew Wilcox (Oracle) | 54a7515 | 2020-08-06 23:19:48 -0700 | [diff] [blame] | 98 | pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", |
Matthew Wilcox (Oracle) | 452b557 | 2020-08-06 23:19:35 -0700 | [diff] [blame] | 99 | page, page_ref_count(head), mapcount, mapping, |
Matthew Wilcox (Oracle) | 54a7515 | 2020-08-06 23:19:48 -0700 | [diff] [blame] | 100 | page_to_pgoff(page), page_to_pfn(page)); |
Matthew Wilcox (Oracle) | 452b557 | 2020-08-06 23:19:35 -0700 | [diff] [blame] | 101 | if (compound) { |
John Hubbard | dc8fb2f2 | 2020-04-01 21:05:52 -0700 | [diff] [blame] | 102 | if (hpage_pincount_available(page)) { |
Matthew Wilcox (Oracle) | 54a7515 | 2020-08-06 23:19:48 -0700 | [diff] [blame] | 103 | pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", |
Matthew Wilcox (Oracle) | 452b557 | 2020-08-06 23:19:35 -0700 | [diff] [blame] | 104 | head, compound_order(head), |
John Hubbard | 6dc5ea1 | 2020-08-06 23:19:51 -0700 | [diff] [blame] | 105 | head_mapcount(head), |
| 106 | head_pincount(head)); |
John Hubbard | dc8fb2f2 | 2020-04-01 21:05:52 -0700 | [diff] [blame] | 107 | } else { |
Matthew Wilcox (Oracle) | 54a7515 | 2020-08-06 23:19:48 -0700 | [diff] [blame] | 108 | pr_warn("head:%p order:%u compound_mapcount:%d\n", |
Matthew Wilcox (Oracle) | 452b557 | 2020-08-06 23:19:35 -0700 | [diff] [blame] | 109 | head, compound_order(head), |
John Hubbard | 6dc5ea1 | 2020-08-06 23:19:51 -0700 | [diff] [blame] | 110 | head_mapcount(head)); |
John Hubbard | dc8fb2f2 | 2020-04-01 21:05:52 -0700 | [diff] [blame] | 111 | } |
Matthew Wilcox (Oracle) | 452b557 | 2020-08-06 23:19:35 -0700 | [diff] [blame] | 112 | } |
Ralph Campbell | 6855ac4 | 2019-11-15 17:35:07 -0800 | [diff] [blame] | 113 | if (PageKsm(page)) |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 114 | type = "ksm "; |
Ralph Campbell | 6855ac4 | 2019-11-15 17:35:07 -0800 | [diff] [blame] | 115 | else if (PageAnon(page)) |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 116 | type = "anon "; |
Michal Hocko | 1c6fb1d | 2018-12-28 00:33:38 -0800 | [diff] [blame] | 117 | else if (mapping) { |
Matthew Wilcox (Oracle) | 9ad3826 | 2020-08-06 23:19:42 -0700 | [diff] [blame] | 118 | struct inode *host; |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 119 | const struct address_space_operations *a_ops; |
Matthew Wilcox (Oracle) | 9ad3826 | 2020-08-06 23:19:42 -0700 | [diff] [blame] | 120 | struct hlist_node *dentry_first; |
| 121 | struct dentry *dentry_ptr; |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 122 | struct dentry dentry; |
| 123 | |
| 124 | /* |
| 125 | * mapping can be invalid pointer and we don't want to crash |
| 126 | * accessing it, so probe everything depending on it carefully |
| 127 | */ |
Matthew Wilcox (Oracle) | 9ad3826 | 2020-08-06 23:19:42 -0700 | [diff] [blame] | 128 | if (get_kernel_nofault(host, &mapping->host) || |
| 129 | get_kernel_nofault(a_ops, &mapping->a_ops)) { |
| 130 | pr_warn("failed to read mapping contents, not a valid kernel address?\n"); |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 131 | goto out_mapping; |
| 132 | } |
| 133 | |
| 134 | if (!host) { |
Matthew Wilcox (Oracle) | 9ad3826 | 2020-08-06 23:19:42 -0700 | [diff] [blame] | 135 | pr_warn("aops:%ps\n", a_ops); |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 136 | goto out_mapping; |
| 137 | } |
| 138 | |
Matthew Wilcox (Oracle) | 9ad3826 | 2020-08-06 23:19:42 -0700 | [diff] [blame] | 139 | if (get_kernel_nofault(dentry_first, &host->i_dentry.first)) { |
| 140 | pr_warn("aops:%ps with invalid host inode %px\n", |
| 141 | a_ops, host); |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 142 | goto out_mapping; |
| 143 | } |
| 144 | |
| 145 | if (!dentry_first) { |
Matthew Wilcox (Oracle) | 9bdaf2c | 2020-08-06 23:19:45 -0700 | [diff] [blame] | 146 | pr_warn("aops:%ps ino:%lx\n", a_ops, host->i_ino); |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 147 | goto out_mapping; |
| 148 | } |
| 149 | |
| 150 | dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias); |
Matthew Wilcox (Oracle) | 9ad3826 | 2020-08-06 23:19:42 -0700 | [diff] [blame] | 151 | if (get_kernel_nofault(dentry, dentry_ptr)) { |
| 152 | pr_warn("aops:%ps with invalid dentry %px\n", a_ops, |
| 153 | dentry_ptr); |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 154 | } else { |
| 155 | /* |
| 156 | * if dentry is corrupted, the %pd handler may still |
| 157 | * crash, but it's unlikely that we reach here with a |
| 158 | * corrupted struct page |
| 159 | */ |
Matthew Wilcox (Oracle) | 9bdaf2c | 2020-08-06 23:19:45 -0700 | [diff] [blame] | 160 | pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", |
| 161 | a_ops, host->i_ino, &dentry); |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 162 | } |
Michal Hocko | 1c6fb1d | 2018-12-28 00:33:38 -0800 | [diff] [blame] | 163 | } |
Vlastimil Babka | 002ae70 | 2020-06-01 21:46:03 -0700 | [diff] [blame] | 164 | out_mapping: |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 165 | BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); |
Vlastimil Babka | ff8e811 | 2016-03-15 14:56:24 -0700 | [diff] [blame] | 166 | |
Matthew Wilcox (Oracle) | 0b93d59 | 2020-08-06 23:19:39 -0700 | [diff] [blame] | 167 | pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags, |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 168 | page_cma ? " CMA" : ""); |
Vlastimil Babka | 5b57b8f | 2020-01-30 22:12:03 -0800 | [diff] [blame] | 169 | |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 170 | hex_only: |
Michal Hocko | e0392cf | 2018-12-28 00:33:42 -0800 | [diff] [blame] | 171 | print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, |
Vlastimil Babka | 46e8a3a | 2016-12-12 16:44:35 -0800 | [diff] [blame] | 172 | sizeof(unsigned long), page, |
| 173 | sizeof(struct page), false); |
Matthew Wilcox (Oracle) | 6197ab9 | 2020-04-01 21:05:49 -0700 | [diff] [blame] | 174 | if (head != page) |
| 175 | print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, |
| 176 | sizeof(unsigned long), head, |
| 177 | sizeof(struct page), false); |
Vlastimil Babka | 46e8a3a | 2016-12-12 16:44:35 -0800 | [diff] [blame] | 178 | |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 179 | if (reason) |
Michal Hocko | e0392cf | 2018-12-28 00:33:42 -0800 | [diff] [blame] | 180 | pr_warn("page dumped because: %s\n", reason); |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 181 | |
Johannes Weiner | 9edad6e | 2014-12-10 15:44:58 -0800 | [diff] [blame] | 182 | #ifdef CONFIG_MEMCG |
Pavel Tatashin | fc36def | 2018-07-03 17:02:53 -0700 | [diff] [blame] | 183 | if (!page_poisoned && page->mem_cgroup) |
Michal Hocko | e0392cf | 2018-12-28 00:33:42 -0800 | [diff] [blame] | 184 | pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup); |
Johannes Weiner | 9edad6e | 2014-12-10 15:44:58 -0800 | [diff] [blame] | 185 | #endif |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | void dump_page(struct page *page, const char *reason) |
| 189 | { |
Vlastimil Babka | ff8e811 | 2016-03-15 14:56:24 -0700 | [diff] [blame] | 190 | __dump_page(page, reason); |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 191 | dump_page_owner(page); |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 192 | } |
| 193 | EXPORT_SYMBOL(dump_page); |
| 194 | |
| 195 | #ifdef CONFIG_DEBUG_VM |
| 196 | |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 197 | void dump_vma(const struct vm_area_struct *vma) |
| 198 | { |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 199 | pr_emerg("vma %px start %px end %px\n" |
| 200 | "next %px prev %px mm %px\n" |
| 201 | "prot %lx anon_vma %px vm_ops %px\n" |
| 202 | "pgoff %lx file %px private_data %px\n" |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 203 | "flags: %#lx(%pGv)\n", |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 204 | vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, |
| 205 | vma->vm_prev, vma->vm_mm, |
| 206 | (unsigned long)pgprot_val(vma->vm_page_prot), |
| 207 | vma->anon_vma, vma->vm_ops, vma->vm_pgoff, |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 208 | vma->vm_file, vma->vm_private_data, |
| 209 | vma->vm_flags, &vma->vm_flags); |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 210 | } |
| 211 | EXPORT_SYMBOL(dump_vma); |
| 212 | |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 213 | void dump_mm(const struct mm_struct *mm) |
| 214 | { |
Linus Torvalds | 7a9cdeb | 2018-09-12 23:57:48 -1000 | [diff] [blame] | 215 | pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 216 | #ifdef CONFIG_MMU |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 217 | "get_unmapped_area %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 218 | #endif |
| 219 | "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 220 | "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 221 | "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 222 | "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 223 | "start_code %lx end_code %lx start_data %lx end_data %lx\n" |
| 224 | "start_brk %lx brk %lx start_stack %lx\n" |
| 225 | "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 226 | "binfmt %px flags %lx core_state %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 227 | #ifdef CONFIG_AIO |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 228 | "ioctx_table %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 229 | #endif |
| 230 | #ifdef CONFIG_MEMCG |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 231 | "owner %px " |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 232 | #endif |
Matthew Wilcox | 152a2d1 | 2018-01-04 16:17:59 -0800 | [diff] [blame] | 233 | "exe_file %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 234 | #ifdef CONFIG_MMU_NOTIFIER |
Jason Gunthorpe | 984cfe4 | 2019-12-18 13:40:35 -0400 | [diff] [blame] | 235 | "notifier_subscriptions %px\n" |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 236 | #endif |
| 237 | #ifdef CONFIG_NUMA_BALANCING |
| 238 | "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" |
| 239 | #endif |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 240 | "tlb_flush_pending %d\n" |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 241 | "def_flags: %#lx(%pGv)\n", |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 242 | |
Linus Torvalds | 7a9cdeb | 2018-09-12 23:57:48 -1000 | [diff] [blame] | 243 | mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 244 | #ifdef CONFIG_MMU |
| 245 | mm->get_unmapped_area, |
| 246 | #endif |
| 247 | mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, |
| 248 | mm->pgd, atomic_read(&mm->mm_users), |
| 249 | atomic_read(&mm->mm_count), |
Kirill A. Shutemov | af5b0f6 | 2017-11-15 17:35:40 -0800 | [diff] [blame] | 250 | mm_pgtables_bytes(mm), |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 251 | mm->map_count, |
| 252 | mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, |
Qian Cai | 44dc1b1 | 2019-03-28 20:43:23 -0700 | [diff] [blame] | 253 | (u64)atomic64_read(&mm->pinned_vm), |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 254 | mm->data_vm, mm->exec_vm, mm->stack_vm, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 255 | mm->start_code, mm->end_code, mm->start_data, mm->end_data, |
| 256 | mm->start_brk, mm->brk, mm->start_stack, |
| 257 | mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, |
| 258 | mm->binfmt, mm->flags, mm->core_state, |
| 259 | #ifdef CONFIG_AIO |
| 260 | mm->ioctx_table, |
| 261 | #endif |
| 262 | #ifdef CONFIG_MEMCG |
| 263 | mm->owner, |
| 264 | #endif |
| 265 | mm->exe_file, |
| 266 | #ifdef CONFIG_MMU_NOTIFIER |
Jason Gunthorpe | 984cfe4 | 2019-12-18 13:40:35 -0400 | [diff] [blame] | 267 | mm->notifier_subscriptions, |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 268 | #endif |
| 269 | #ifdef CONFIG_NUMA_BALANCING |
| 270 | mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, |
| 271 | #endif |
Nadav Amit | 16af97d | 2017-08-10 15:23:56 -0700 | [diff] [blame] | 272 | atomic_read(&mm->tlb_flush_pending), |
Vlastimil Babka | b8eceeb | 2016-03-15 14:55:59 -0700 | [diff] [blame] | 273 | mm->def_flags, &mm->def_flags |
| 274 | ); |
Sasha Levin | 31c9afa | 2014-10-09 15:28:37 -0700 | [diff] [blame] | 275 | } |
| 276 | |
Alexander Duyck | f682a97 | 2018-10-26 15:07:45 -0700 | [diff] [blame] | 277 | static bool page_init_poisoning __read_mostly = true; |
| 278 | |
| 279 | static int __init setup_vm_debug(char *str) |
| 280 | { |
| 281 | bool __page_init_poisoning = true; |
| 282 | |
| 283 | /* |
| 284 | * Calling vm_debug with no arguments is equivalent to requesting |
| 285 | * to enable all debugging options we can control. |
| 286 | */ |
| 287 | if (*str++ != '=' || !*str) |
| 288 | goto out; |
| 289 | |
| 290 | __page_init_poisoning = false; |
| 291 | if (*str == '-') |
| 292 | goto out; |
| 293 | |
| 294 | while (*str) { |
| 295 | switch (tolower(*str)) { |
| 296 | case'p': |
| 297 | __page_init_poisoning = true; |
| 298 | break; |
| 299 | default: |
| 300 | pr_err("vm_debug option '%c' unknown. skipped\n", |
| 301 | *str); |
| 302 | } |
| 303 | |
| 304 | str++; |
| 305 | } |
| 306 | out: |
| 307 | if (page_init_poisoning && !__page_init_poisoning) |
| 308 | pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); |
| 309 | |
| 310 | page_init_poisoning = __page_init_poisoning; |
| 311 | |
| 312 | return 1; |
| 313 | } |
| 314 | __setup("vm_debug", setup_vm_debug); |
| 315 | |
| 316 | void page_init_poison(struct page *page, size_t size) |
| 317 | { |
| 318 | if (page_init_poisoning) |
| 319 | memset(page, PAGE_POISON_PATTERN, size); |
| 320 | } |
| 321 | EXPORT_SYMBOL_GPL(page_init_poison); |
Sasha Levin | 82742a3 | 2014-10-09 15:28:34 -0700 | [diff] [blame] | 322 | #endif /* CONFIG_DEBUG_VM */ |