Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 2 | #include <linux/debugfs.h> |
| 3 | #include <linux/mm.h> |
| 4 | #include <linux/slab.h> |
| 5 | #include <linux/uaccess.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 6 | #include <linux/memblock.h> |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 7 | #include <linux/stacktrace.h> |
| 8 | #include <linux/page_owner.h> |
Vlastimil Babka | 7dd80b8 | 2016-03-15 14:56:12 -0700 | [diff] [blame] | 9 | #include <linux/jump_label.h> |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 10 | #include <linux/migrate.h> |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 11 | #include <linux/stackdepot.h> |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 12 | #include <linux/seq_file.h> |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 13 | #include <linux/sched/clock.h> |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 14 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 15 | #include "internal.h" |
| 16 | |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 17 | /* |
| 18 | * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) |
| 19 | * to use off stack temporal storage |
| 20 | */ |
| 21 | #define PAGE_OWNER_STACK_DEPTH (16) |
| 22 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 23 | struct page_owner { |
Ayush Mittal | 6b4c54e | 2017-11-15 17:34:30 -0800 | [diff] [blame] | 24 | unsigned short order; |
| 25 | short last_migrate_reason; |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 26 | gfp_t gfp_mask; |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 27 | depot_stack_handle_t handle; |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 28 | depot_stack_handle_t free_handle; |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 29 | u64 ts_nsec; |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 30 | u64 free_ts_nsec; |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 31 | pid_t pid; |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
Vlastimil Babka | 0fe9a44 | 2019-10-14 14:11:44 -0700 | [diff] [blame] | 34 | static bool page_owner_enabled = false; |
Vlastimil Babka | 7dd80b8 | 2016-03-15 14:56:12 -0700 | [diff] [blame] | 35 | DEFINE_STATIC_KEY_FALSE(page_owner_inited); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 36 | |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 37 | static depot_stack_handle_t dummy_handle; |
| 38 | static depot_stack_handle_t failure_handle; |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 39 | static depot_stack_handle_t early_handle; |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 40 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 41 | static void init_early_allocated_pages(void); |
| 42 | |
Dou Liyang | 1173194 | 2018-04-05 16:23:49 -0700 | [diff] [blame] | 43 | static int __init early_page_owner_param(char *buf) |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 44 | { |
Sergei Trofimovich | 608b5d6 | 2021-04-29 22:55:05 -0700 | [diff] [blame^] | 45 | return kstrtobool(buf, &page_owner_enabled); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 46 | } |
| 47 | early_param("page_owner", early_page_owner_param); |
| 48 | |
| 49 | static bool need_page_owner(void) |
| 50 | { |
Vlastimil Babka | 0fe9a44 | 2019-10-14 14:11:44 -0700 | [diff] [blame] | 51 | return page_owner_enabled; |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 52 | } |
| 53 | |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 54 | static __always_inline depot_stack_handle_t create_dummy_stack(void) |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 55 | { |
| 56 | unsigned long entries[4]; |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 57 | unsigned int nr_entries; |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 58 | |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 59 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
| 60 | return stack_depot_save(entries, nr_entries, GFP_KERNEL); |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static noinline void register_dummy_stack(void) |
| 64 | { |
| 65 | dummy_handle = create_dummy_stack(); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | static noinline void register_failure_stack(void) |
| 69 | { |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 70 | failure_handle = create_dummy_stack(); |
| 71 | } |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 72 | |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 73 | static noinline void register_early_stack(void) |
| 74 | { |
| 75 | early_handle = create_dummy_stack(); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 78 | static void init_page_owner(void) |
| 79 | { |
Vlastimil Babka | 0fe9a44 | 2019-10-14 14:11:44 -0700 | [diff] [blame] | 80 | if (!page_owner_enabled) |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 81 | return; |
| 82 | |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 83 | register_dummy_stack(); |
| 84 | register_failure_stack(); |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 85 | register_early_stack(); |
Vlastimil Babka | 7dd80b8 | 2016-03-15 14:56:12 -0700 | [diff] [blame] | 86 | static_branch_enable(&page_owner_inited); |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 87 | init_early_allocated_pages(); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | struct page_ext_operations page_owner_ops = { |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 91 | .size = sizeof(struct page_owner), |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 92 | .need = need_page_owner, |
| 93 | .init = init_page_owner, |
| 94 | }; |
| 95 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 96 | static inline struct page_owner *get_page_owner(struct page_ext *page_ext) |
| 97 | { |
| 98 | return (void *)page_ext + page_owner_ops.offset; |
| 99 | } |
| 100 | |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 101 | static inline bool check_recursive_alloc(unsigned long *entries, |
| 102 | unsigned int nr_entries, |
| 103 | unsigned long ip) |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 104 | { |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 105 | unsigned int i; |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 106 | |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 107 | for (i = 0; i < nr_entries; i++) { |
| 108 | if (entries[i] == ip) |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 109 | return true; |
| 110 | } |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 111 | return false; |
| 112 | } |
| 113 | |
| 114 | static noinline depot_stack_handle_t save_stack(gfp_t flags) |
| 115 | { |
| 116 | unsigned long entries[PAGE_OWNER_STACK_DEPTH]; |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 117 | depot_stack_handle_t handle; |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 118 | unsigned int nr_entries; |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 119 | |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 120 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 121 | |
| 122 | /* |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 123 | * We need to check recursion here because our request to |
| 124 | * stackdepot could trigger memory allocation to save new |
| 125 | * entry. New memory allocation would reach here and call |
| 126 | * stack_depot_save_entries() again if we don't catch it. There is |
| 127 | * still not enough memory in stackdepot so it would try to |
| 128 | * allocate memory again and loop forever. |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 129 | */ |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 130 | if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 131 | return dummy_handle; |
| 132 | |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 133 | handle = stack_depot_save(entries, nr_entries, flags); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 134 | if (!handle) |
| 135 | handle = failure_handle; |
| 136 | |
| 137 | return handle; |
| 138 | } |
| 139 | |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 140 | void __reset_page_owner(struct page *page, unsigned int order) |
| 141 | { |
| 142 | int i; |
| 143 | struct page_ext *page_ext; |
Sergei Trofimovich | fab765c | 2021-04-29 22:55:02 -0700 | [diff] [blame] | 144 | depot_stack_handle_t handle; |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 145 | struct page_owner *page_owner; |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 146 | u64 free_ts_nsec = local_clock(); |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 147 | |
Vlastimil Babka | 5556cfe | 2019-10-14 14:11:40 -0700 | [diff] [blame] | 148 | page_ext = lookup_page_ext(page); |
| 149 | if (unlikely(!page_ext)) |
| 150 | return; |
Sergei Trofimovich | fab765c | 2021-04-29 22:55:02 -0700 | [diff] [blame] | 151 | |
| 152 | handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 153 | for (i = 0; i < (1 << order); i++) { |
Vlastimil Babka | fdf3bf8 | 2019-10-14 14:11:47 -0700 | [diff] [blame] | 154 | __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); |
Vlastimil Babka | 0fe9a44 | 2019-10-14 14:11:44 -0700 | [diff] [blame] | 155 | page_owner = get_page_owner(page_ext); |
| 156 | page_owner->free_handle = handle; |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 157 | page_owner->free_ts_nsec = free_ts_nsec; |
Vlastimil Babka | 5556cfe | 2019-10-14 14:11:40 -0700 | [diff] [blame] | 158 | page_ext = page_ext_next(page_ext); |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 159 | } |
| 160 | } |
| 161 | |
zhongjiang-ali | 64ea78d | 2021-04-29 22:55:00 -0700 | [diff] [blame] | 162 | static inline void __set_page_owner_handle(struct page_ext *page_ext, |
| 163 | depot_stack_handle_t handle, |
| 164 | unsigned int order, gfp_t gfp_mask) |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 165 | { |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 166 | struct page_owner *page_owner; |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 167 | int i; |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 168 | |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 169 | for (i = 0; i < (1 << order); i++) { |
| 170 | page_owner = get_page_owner(page_ext); |
| 171 | page_owner->handle = handle; |
| 172 | page_owner->order = order; |
| 173 | page_owner->gfp_mask = gfp_mask; |
| 174 | page_owner->last_migrate_reason = -1; |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 175 | page_owner->pid = current->pid; |
| 176 | page_owner->ts_nsec = local_clock(); |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 177 | __set_bit(PAGE_EXT_OWNER, &page_ext->flags); |
Vlastimil Babka | fdf3bf8 | 2019-10-14 14:11:47 -0700 | [diff] [blame] | 178 | __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 179 | |
Vlastimil Babka | 5556cfe | 2019-10-14 14:11:40 -0700 | [diff] [blame] | 180 | page_ext = page_ext_next(page_ext); |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 181 | } |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 182 | } |
| 183 | |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 184 | noinline void __set_page_owner(struct page *page, unsigned int order, |
| 185 | gfp_t gfp_mask) |
| 186 | { |
| 187 | struct page_ext *page_ext = lookup_page_ext(page); |
| 188 | depot_stack_handle_t handle; |
| 189 | |
| 190 | if (unlikely(!page_ext)) |
| 191 | return; |
| 192 | |
| 193 | handle = save_stack(gfp_mask); |
zhongjiang-ali | 64ea78d | 2021-04-29 22:55:00 -0700 | [diff] [blame] | 194 | __set_page_owner_handle(page_ext, handle, order, gfp_mask); |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 195 | } |
| 196 | |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 197 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
| 198 | { |
| 199 | struct page_ext *page_ext = lookup_page_ext(page); |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 200 | struct page_owner *page_owner; |
| 201 | |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 202 | if (unlikely(!page_ext)) |
| 203 | return; |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 204 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 205 | page_owner = get_page_owner(page_ext); |
| 206 | page_owner->last_migrate_reason = reason; |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 207 | } |
| 208 | |
Matthew Wilcox (Oracle) | 8fb156c | 2020-10-15 20:05:29 -0700 | [diff] [blame] | 209 | void __split_page_owner(struct page *page, unsigned int nr) |
Joonsoo Kim | e2cfc91 | 2015-07-17 16:24:18 -0700 | [diff] [blame] | 210 | { |
Joonsoo Kim | a9627bc | 2016-07-26 15:23:49 -0700 | [diff] [blame] | 211 | int i; |
Joonsoo Kim | e2cfc91 | 2015-07-17 16:24:18 -0700 | [diff] [blame] | 212 | struct page_ext *page_ext = lookup_page_ext(page); |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 213 | struct page_owner *page_owner; |
Joonsoo Kim | e2cfc91 | 2015-07-17 16:24:18 -0700 | [diff] [blame] | 214 | |
Joonsoo Kim | a9627bc | 2016-07-26 15:23:49 -0700 | [diff] [blame] | 215 | if (unlikely(!page_ext)) |
| 216 | return; |
| 217 | |
Matthew Wilcox (Oracle) | 8fb156c | 2020-10-15 20:05:29 -0700 | [diff] [blame] | 218 | for (i = 0; i < nr; i++) { |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 219 | page_owner = get_page_owner(page_ext); |
| 220 | page_owner->order = 0; |
Vlastimil Babka | 5556cfe | 2019-10-14 14:11:40 -0700 | [diff] [blame] | 221 | page_ext = page_ext_next(page_ext); |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 222 | } |
Joonsoo Kim | e2cfc91 | 2015-07-17 16:24:18 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Vlastimil Babka | d435edc | 2016-03-15 14:56:15 -0700 | [diff] [blame] | 225 | void __copy_page_owner(struct page *oldpage, struct page *newpage) |
| 226 | { |
| 227 | struct page_ext *old_ext = lookup_page_ext(oldpage); |
| 228 | struct page_ext *new_ext = lookup_page_ext(newpage); |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 229 | struct page_owner *old_page_owner, *new_page_owner; |
Vlastimil Babka | d435edc | 2016-03-15 14:56:15 -0700 | [diff] [blame] | 230 | |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 231 | if (unlikely(!old_ext || !new_ext)) |
| 232 | return; |
| 233 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 234 | old_page_owner = get_page_owner(old_ext); |
| 235 | new_page_owner = get_page_owner(new_ext); |
| 236 | new_page_owner->order = old_page_owner->order; |
| 237 | new_page_owner->gfp_mask = old_page_owner->gfp_mask; |
| 238 | new_page_owner->last_migrate_reason = |
| 239 | old_page_owner->last_migrate_reason; |
| 240 | new_page_owner->handle = old_page_owner->handle; |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 241 | new_page_owner->pid = old_page_owner->pid; |
| 242 | new_page_owner->ts_nsec = old_page_owner->ts_nsec; |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 243 | new_page_owner->free_ts_nsec = old_page_owner->ts_nsec; |
Vlastimil Babka | d435edc | 2016-03-15 14:56:15 -0700 | [diff] [blame] | 244 | |
| 245 | /* |
| 246 | * We don't clear the bit on the oldpage as it's going to be freed |
| 247 | * after migration. Until then, the info can be useful in case of |
| 248 | * a bug, and the overal stats will be off a bit only temporarily. |
| 249 | * Also, migrate_misplaced_transhuge_page() can still fail the |
| 250 | * migration and then we want the oldpage to retain the info. But |
| 251 | * in that case we also don't need to explicitly clear the info from |
| 252 | * the new page, which will be freed. |
| 253 | */ |
| 254 | __set_bit(PAGE_EXT_OWNER, &new_ext->flags); |
Vlastimil Babka | fdf3bf8 | 2019-10-14 14:11:47 -0700 | [diff] [blame] | 255 | __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags); |
Vlastimil Babka | d435edc | 2016-03-15 14:56:15 -0700 | [diff] [blame] | 256 | } |
| 257 | |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 258 | void pagetypeinfo_showmixedcount_print(struct seq_file *m, |
| 259 | pg_data_t *pgdat, struct zone *zone) |
| 260 | { |
| 261 | struct page *page; |
| 262 | struct page_ext *page_ext; |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 263 | struct page_owner *page_owner; |
Miaohe Lin | 1d2cae8 | 2021-02-24 12:01:39 -0800 | [diff] [blame] | 264 | unsigned long pfn, block_end_pfn; |
| 265 | unsigned long end_pfn = zone_end_pfn(zone); |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 266 | unsigned long count[MIGRATE_TYPES] = { 0, }; |
| 267 | int pageblock_mt, page_mt; |
| 268 | int i; |
| 269 | |
| 270 | /* Scan block by block. First and last block may be incomplete */ |
| 271 | pfn = zone->zone_start_pfn; |
| 272 | |
| 273 | /* |
| 274 | * Walk the zone in pageblock_nr_pages steps. If a page block spans |
| 275 | * a zone boundary, it will be double counted between zones. This does |
| 276 | * not matter as the mixed block count will still be correct |
| 277 | */ |
| 278 | for (; pfn < end_pfn; ) { |
Qian Cai | a26ee56 | 2019-10-18 20:19:29 -0700 | [diff] [blame] | 279 | page = pfn_to_online_page(pfn); |
| 280 | if (!page) { |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 281 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); |
| 282 | continue; |
| 283 | } |
| 284 | |
| 285 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
| 286 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 287 | |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 288 | pageblock_mt = get_pageblock_migratetype(page); |
| 289 | |
| 290 | for (; pfn < block_end_pfn; pfn++) { |
| 291 | if (!pfn_valid_within(pfn)) |
| 292 | continue; |
| 293 | |
Qian Cai | a26ee56 | 2019-10-18 20:19:29 -0700 | [diff] [blame] | 294 | /* The pageblock is online, no need to recheck. */ |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 295 | page = pfn_to_page(pfn); |
| 296 | |
| 297 | if (page_zone(page) != zone) |
| 298 | continue; |
| 299 | |
| 300 | if (PageBuddy(page)) { |
Vinayak Menon | 727c080 | 2017-07-10 15:49:17 -0700 | [diff] [blame] | 301 | unsigned long freepage_order; |
| 302 | |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 303 | freepage_order = buddy_order_unsafe(page); |
Vinayak Menon | 727c080 | 2017-07-10 15:49:17 -0700 | [diff] [blame] | 304 | if (freepage_order < MAX_ORDER) |
| 305 | pfn += (1UL << freepage_order) - 1; |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 306 | continue; |
| 307 | } |
| 308 | |
| 309 | if (PageReserved(page)) |
| 310 | continue; |
| 311 | |
| 312 | page_ext = lookup_page_ext(page); |
| 313 | if (unlikely(!page_ext)) |
| 314 | continue; |
| 315 | |
Vlastimil Babka | fdf3bf8 | 2019-10-14 14:11:47 -0700 | [diff] [blame] | 316 | if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 317 | continue; |
| 318 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 319 | page_owner = get_page_owner(page_ext); |
Wei Yang | 01c0bfe | 2020-06-03 15:59:08 -0700 | [diff] [blame] | 320 | page_mt = gfp_migratetype(page_owner->gfp_mask); |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 321 | if (pageblock_mt != page_mt) { |
| 322 | if (is_migrate_cma(pageblock_mt)) |
| 323 | count[MIGRATE_MOVABLE]++; |
| 324 | else |
| 325 | count[pageblock_mt]++; |
| 326 | |
| 327 | pfn = block_end_pfn; |
| 328 | break; |
| 329 | } |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 330 | pfn += (1UL << page_owner->order) - 1; |
Joonsoo Kim | e2f612e | 2016-10-07 16:58:21 -0700 | [diff] [blame] | 331 | } |
| 332 | } |
| 333 | |
| 334 | /* Print counts */ |
| 335 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
| 336 | for (i = 0; i < MIGRATE_TYPES; i++) |
| 337 | seq_printf(m, "%12lu ", count[i]); |
| 338 | seq_putc(m, '\n'); |
| 339 | } |
| 340 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 341 | static ssize_t |
| 342 | print_page_owner(char __user *buf, size_t count, unsigned long pfn, |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 343 | struct page *page, struct page_owner *page_owner, |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 344 | depot_stack_handle_t handle) |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 345 | { |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 346 | int ret, pageblock_mt, page_mt; |
| 347 | unsigned long *entries; |
| 348 | unsigned int nr_entries; |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 349 | char *kbuf; |
| 350 | |
Miles Chen | c8f61cf | 2018-12-28 00:33:21 -0800 | [diff] [blame] | 351 | count = min_t(size_t, count, PAGE_SIZE); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 352 | kbuf = kmalloc(count, GFP_KERNEL); |
| 353 | if (!kbuf) |
| 354 | return -ENOMEM; |
| 355 | |
| 356 | ret = snprintf(kbuf, count, |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 357 | "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns, free_ts %llu ns\n", |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 358 | page_owner->order, page_owner->gfp_mask, |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 359 | &page_owner->gfp_mask, page_owner->pid, |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 360 | page_owner->ts_nsec, page_owner->free_ts_nsec); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 361 | |
| 362 | if (ret >= count) |
| 363 | goto err; |
| 364 | |
| 365 | /* Print information relevant to grouping pages by mobility */ |
Mel Gorman | 0b423ca | 2016-05-19 17:14:27 -0700 | [diff] [blame] | 366 | pageblock_mt = get_pageblock_migratetype(page); |
Wei Yang | 01c0bfe | 2020-06-03 15:59:08 -0700 | [diff] [blame] | 367 | page_mt = gfp_migratetype(page_owner->gfp_mask); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 368 | ret += snprintf(kbuf + ret, count - ret, |
Vlastimil Babka | 60f3035 | 2016-03-15 14:56:08 -0700 | [diff] [blame] | 369 | "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 370 | pfn, |
Vlastimil Babka | 60f3035 | 2016-03-15 14:56:08 -0700 | [diff] [blame] | 371 | migratetype_names[page_mt], |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 372 | pfn >> pageblock_order, |
Vlastimil Babka | 60f3035 | 2016-03-15 14:56:08 -0700 | [diff] [blame] | 373 | migratetype_names[pageblock_mt], |
| 374 | page->flags, &page->flags); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 375 | |
| 376 | if (ret >= count) |
| 377 | goto err; |
| 378 | |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 379 | nr_entries = stack_depot_fetch(handle, &entries); |
| 380 | ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 381 | if (ret >= count) |
| 382 | goto err; |
| 383 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 384 | if (page_owner->last_migrate_reason != -1) { |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 385 | ret += snprintf(kbuf + ret, count - ret, |
| 386 | "Page has been migrated, last migrate reason: %s\n", |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 387 | migrate_reason_names[page_owner->last_migrate_reason]); |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 388 | if (ret >= count) |
| 389 | goto err; |
| 390 | } |
| 391 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 392 | ret += snprintf(kbuf + ret, count - ret, "\n"); |
| 393 | if (ret >= count) |
| 394 | goto err; |
| 395 | |
| 396 | if (copy_to_user(buf, kbuf, ret)) |
| 397 | ret = -EFAULT; |
| 398 | |
| 399 | kfree(kbuf); |
| 400 | return ret; |
| 401 | |
| 402 | err: |
| 403 | kfree(kbuf); |
| 404 | return -ENOMEM; |
| 405 | } |
| 406 | |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 407 | void __dump_page_owner(struct page *page) |
| 408 | { |
| 409 | struct page_ext *page_ext = lookup_page_ext(page); |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 410 | struct page_owner *page_owner; |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 411 | depot_stack_handle_t handle; |
Thomas Gleixner | af52bf6 | 2019-04-25 11:45:03 +0200 | [diff] [blame] | 412 | unsigned long *entries; |
| 413 | unsigned int nr_entries; |
Sudip Mukherjee | 8285027 | 2016-06-24 14:50:24 -0700 | [diff] [blame] | 414 | gfp_t gfp_mask; |
| 415 | int mt; |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 416 | |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 417 | if (unlikely(!page_ext)) { |
| 418 | pr_alert("There is not page extension available.\n"); |
| 419 | return; |
| 420 | } |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 421 | |
| 422 | page_owner = get_page_owner(page_ext); |
| 423 | gfp_mask = page_owner->gfp_mask; |
Wei Yang | 01c0bfe | 2020-06-03 15:59:08 -0700 | [diff] [blame] | 424 | mt = gfp_migratetype(gfp_mask); |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 425 | |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 426 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
Vlastimil Babka | 37389167 | 2019-09-23 15:34:39 -0700 | [diff] [blame] | 427 | pr_alert("page_owner info is not present (never set?)\n"); |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 428 | return; |
| 429 | } |
| 430 | |
Vlastimil Babka | fdf3bf8 | 2019-10-14 14:11:47 -0700 | [diff] [blame] | 431 | if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
Vlastimil Babka | 37389167 | 2019-09-23 15:34:39 -0700 | [diff] [blame] | 432 | pr_alert("page_owner tracks the page as allocated\n"); |
| 433 | else |
| 434 | pr_alert("page_owner tracks the page as freed\n"); |
| 435 | |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 436 | pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu, free_ts %llu\n", |
Liam Mark | 9cc7e96a | 2020-12-14 19:04:49 -0800 | [diff] [blame] | 437 | page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask, |
Georgi Djakov | 866b485 | 2021-04-29 22:54:57 -0700 | [diff] [blame] | 438 | page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec); |
Vlastimil Babka | 37389167 | 2019-09-23 15:34:39 -0700 | [diff] [blame] | 439 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 440 | handle = READ_ONCE(page_owner->handle); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 441 | if (!handle) { |
Vlastimil Babka | 37389167 | 2019-09-23 15:34:39 -0700 | [diff] [blame] | 442 | pr_alert("page_owner allocation stack trace missing\n"); |
| 443 | } else { |
| 444 | nr_entries = stack_depot_fetch(handle, &entries); |
| 445 | stack_trace_print(entries, nr_entries, 0); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 446 | } |
| 447 | |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 448 | handle = READ_ONCE(page_owner->free_handle); |
| 449 | if (!handle) { |
| 450 | pr_alert("page_owner free stack trace missing\n"); |
| 451 | } else { |
| 452 | nr_entries = stack_depot_fetch(handle, &entries); |
| 453 | pr_alert("page last free stack trace:\n"); |
| 454 | stack_trace_print(entries, nr_entries, 0); |
| 455 | } |
Vlastimil Babka | 8974558 | 2019-09-23 15:34:42 -0700 | [diff] [blame] | 456 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 457 | if (page_owner->last_migrate_reason != -1) |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 458 | pr_alert("page has been migrated, last migrate reason: %s\n", |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 459 | migrate_reason_names[page_owner->last_migrate_reason]); |
Vlastimil Babka | 4e46211 | 2016-03-15 14:56:21 -0700 | [diff] [blame] | 460 | } |
| 461 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 462 | static ssize_t |
| 463 | read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
| 464 | { |
| 465 | unsigned long pfn; |
| 466 | struct page *page; |
| 467 | struct page_ext *page_ext; |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 468 | struct page_owner *page_owner; |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 469 | depot_stack_handle_t handle; |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 470 | |
Vlastimil Babka | 7dd80b8 | 2016-03-15 14:56:12 -0700 | [diff] [blame] | 471 | if (!static_branch_unlikely(&page_owner_inited)) |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 472 | return -EINVAL; |
| 473 | |
| 474 | page = NULL; |
| 475 | pfn = min_low_pfn + *ppos; |
| 476 | |
| 477 | /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ |
| 478 | while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) |
| 479 | pfn++; |
| 480 | |
| 481 | drain_all_pages(NULL); |
| 482 | |
| 483 | /* Find an allocated page */ |
| 484 | for (; pfn < max_pfn; pfn++) { |
| 485 | /* |
| 486 | * If the new page is in a new MAX_ORDER_NR_PAGES area, |
| 487 | * validate the area as existing, skip it if not |
| 488 | */ |
| 489 | if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { |
| 490 | pfn += MAX_ORDER_NR_PAGES - 1; |
| 491 | continue; |
| 492 | } |
| 493 | |
| 494 | /* Check for holes within a MAX_ORDER area */ |
| 495 | if (!pfn_valid_within(pfn)) |
| 496 | continue; |
| 497 | |
| 498 | page = pfn_to_page(pfn); |
| 499 | if (PageBuddy(page)) { |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 500 | unsigned long freepage_order = buddy_order_unsafe(page); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 501 | |
| 502 | if (freepage_order < MAX_ORDER) |
| 503 | pfn += (1UL << freepage_order) - 1; |
| 504 | continue; |
| 505 | } |
| 506 | |
| 507 | page_ext = lookup_page_ext(page); |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 508 | if (unlikely(!page_ext)) |
| 509 | continue; |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 510 | |
| 511 | /* |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 512 | * Some pages could be missed by concurrent allocation or free, |
| 513 | * because we don't hold the zone lock. |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 514 | */ |
| 515 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
| 516 | continue; |
| 517 | |
Vlastimil Babka | 37389167 | 2019-09-23 15:34:39 -0700 | [diff] [blame] | 518 | /* |
| 519 | * Although we do have the info about past allocation of free |
| 520 | * pages, it's not relevant for current memory usage. |
| 521 | */ |
Vlastimil Babka | fdf3bf8 | 2019-10-14 14:11:47 -0700 | [diff] [blame] | 522 | if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
Vlastimil Babka | 37389167 | 2019-09-23 15:34:39 -0700 | [diff] [blame] | 523 | continue; |
| 524 | |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 525 | page_owner = get_page_owner(page_ext); |
| 526 | |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 527 | /* |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 528 | * Don't print "tail" pages of high-order allocations as that |
| 529 | * would inflate the stats. |
| 530 | */ |
| 531 | if (!IS_ALIGNED(pfn, 1 << page_owner->order)) |
| 532 | continue; |
| 533 | |
| 534 | /* |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 535 | * Access to page_ext->handle isn't synchronous so we should |
| 536 | * be careful to access it. |
| 537 | */ |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 538 | handle = READ_ONCE(page_owner->handle); |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 539 | if (!handle) |
| 540 | continue; |
| 541 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 542 | /* Record the next PFN to read in the file offset */ |
| 543 | *ppos = (pfn - min_low_pfn) + 1; |
| 544 | |
Joonsoo Kim | f2ca0b5 | 2016-07-26 15:23:55 -0700 | [diff] [blame] | 545 | return print_page_owner(buf, count, pfn, page, |
Joonsoo Kim | 9300d8d | 2016-10-07 16:58:30 -0700 | [diff] [blame] | 546 | page_owner, handle); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 547 | } |
| 548 | |
| 549 | return 0; |
| 550 | } |
| 551 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 552 | static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) |
| 553 | { |
Oscar Salvador | 6787c1d | 2018-01-31 16:20:11 -0800 | [diff] [blame] | 554 | unsigned long pfn = zone->zone_start_pfn; |
| 555 | unsigned long end_pfn = zone_end_pfn(zone); |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 556 | unsigned long count = 0; |
| 557 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 558 | /* |
| 559 | * Walk the zone in pageblock_nr_pages steps. If a page block spans |
| 560 | * a zone boundary, it will be double counted between zones. This does |
| 561 | * not matter as the mixed block count will still be correct |
| 562 | */ |
| 563 | for (; pfn < end_pfn; ) { |
Oscar Salvador | 6787c1d | 2018-01-31 16:20:11 -0800 | [diff] [blame] | 564 | unsigned long block_end_pfn; |
| 565 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 566 | if (!pfn_valid(pfn)) { |
| 567 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); |
| 568 | continue; |
| 569 | } |
| 570 | |
| 571 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
| 572 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 573 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 574 | for (; pfn < block_end_pfn; pfn++) { |
Oscar Salvador | 6787c1d | 2018-01-31 16:20:11 -0800 | [diff] [blame] | 575 | struct page *page; |
| 576 | struct page_ext *page_ext; |
| 577 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 578 | if (!pfn_valid_within(pfn)) |
| 579 | continue; |
| 580 | |
| 581 | page = pfn_to_page(pfn); |
| 582 | |
Joonsoo Kim | 9d43f5a | 2016-05-19 17:12:13 -0700 | [diff] [blame] | 583 | if (page_zone(page) != zone) |
| 584 | continue; |
| 585 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 586 | /* |
Vlastimil Babka | 1090302 | 2017-09-06 16:20:51 -0700 | [diff] [blame] | 587 | * To avoid having to grab zone->lock, be a little |
| 588 | * careful when reading buddy page order. The only |
| 589 | * danger is that we skip too much and potentially miss |
| 590 | * some early allocated pages, which is better than |
| 591 | * heavy lock contention. |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 592 | */ |
| 593 | if (PageBuddy(page)) { |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 594 | unsigned long order = buddy_order_unsafe(page); |
Vlastimil Babka | 1090302 | 2017-09-06 16:20:51 -0700 | [diff] [blame] | 595 | |
| 596 | if (order > 0 && order < MAX_ORDER) |
| 597 | pfn += (1UL << order) - 1; |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 598 | continue; |
| 599 | } |
| 600 | |
| 601 | if (PageReserved(page)) |
| 602 | continue; |
| 603 | |
| 604 | page_ext = lookup_page_ext(page); |
Yang Shi | f86e427 | 2016-06-03 14:55:38 -0700 | [diff] [blame] | 605 | if (unlikely(!page_ext)) |
| 606 | continue; |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 607 | |
Vlastimil Babka | dab4ead | 2017-09-06 16:20:44 -0700 | [diff] [blame] | 608 | /* Maybe overlapping zone */ |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 609 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
| 610 | continue; |
| 611 | |
| 612 | /* Found early allocated page */ |
zhongjiang-ali | 64ea78d | 2021-04-29 22:55:00 -0700 | [diff] [blame] | 613 | __set_page_owner_handle(page_ext, early_handle, |
Vlastimil Babka | 7e2f2a0 | 2019-09-23 15:34:36 -0700 | [diff] [blame] | 614 | 0, 0); |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 615 | count++; |
| 616 | } |
Vlastimil Babka | 1090302 | 2017-09-06 16:20:51 -0700 | [diff] [blame] | 617 | cond_resched(); |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", |
| 621 | pgdat->node_id, zone->name, count); |
| 622 | } |
| 623 | |
| 624 | static void init_zones_in_node(pg_data_t *pgdat) |
| 625 | { |
| 626 | struct zone *zone; |
| 627 | struct zone *node_zones = pgdat->node_zones; |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 628 | |
| 629 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { |
| 630 | if (!populated_zone(zone)) |
| 631 | continue; |
| 632 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 633 | init_pages_in_zone(pgdat, zone); |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 634 | } |
| 635 | } |
| 636 | |
| 637 | static void init_early_allocated_pages(void) |
| 638 | { |
| 639 | pg_data_t *pgdat; |
| 640 | |
Joonsoo Kim | 61cf5fe | 2014-12-12 16:56:04 -0800 | [diff] [blame] | 641 | for_each_online_pgdat(pgdat) |
| 642 | init_zones_in_node(pgdat); |
| 643 | } |
| 644 | |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 645 | static const struct file_operations proc_page_owner_operations = { |
| 646 | .read = read_page_owner, |
| 647 | }; |
| 648 | |
| 649 | static int __init pageowner_init(void) |
| 650 | { |
Vlastimil Babka | 7dd80b8 | 2016-03-15 14:56:12 -0700 | [diff] [blame] | 651 | if (!static_branch_unlikely(&page_owner_inited)) { |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 652 | pr_info("page_owner is disabled\n"); |
| 653 | return 0; |
| 654 | } |
| 655 | |
Greg Kroah-Hartman | d9f7979 | 2019-03-05 15:46:09 -0800 | [diff] [blame] | 656 | debugfs_create_file("page_owner", 0400, NULL, NULL, |
| 657 | &proc_page_owner_operations); |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 658 | |
Greg Kroah-Hartman | d9f7979 | 2019-03-05 15:46:09 -0800 | [diff] [blame] | 659 | return 0; |
Joonsoo Kim | 48c96a3 | 2014-12-12 16:56:01 -0800 | [diff] [blame] | 660 | } |
Paul Gortmaker | 44c5af9 | 2015-05-01 21:57:34 -0400 | [diff] [blame] | 661 | late_initcall(pageowner_init) |