Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 2 | #include <linux/init.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 3 | #include <linux/memblock.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> |
| 5 | #include <linux/sysfs.h> |
| 6 | #include <linux/kobject.h> |
SeongJae Park | 92fb1db | 2020-06-07 21:40:04 -0700 | [diff] [blame] | 7 | #include <linux/memory_hotplug.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 8 | #include <linux/mm.h> |
| 9 | #include <linux/mmzone.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | #include <linux/mmu_notifier.h> |
| 13 | #include <linux/page_ext.h> |
| 14 | #include <linux/page_idle.h> |
| 15 | |
| 16 | #define BITMAP_CHUNK_SIZE sizeof(u64) |
| 17 | #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) |
| 18 | |
| 19 | /* |
| 20 | * Idle page tracking only considers user memory pages, for other types of |
| 21 | * pages the idle flag is always unset and an attempt to set it is silently |
| 22 | * ignored. |
| 23 | * |
| 24 | * We treat a page as a user memory page if it is on an LRU list, because it is |
| 25 | * always safe to pass such a page to rmap_walk(), which is essential for idle |
| 26 | * page tracking. With such an indicator of user pages we can skip isolated |
| 27 | * pages, but since there are not usually many of them, it will hardly affect |
| 28 | * the overall result. |
| 29 | * |
| 30 | * This function tries to get a user memory page by pfn as described above. |
| 31 | */ |
| 32 | static struct page *page_idle_get_page(unsigned long pfn) |
| 33 | { |
SeongJae Park | 92fb1db | 2020-06-07 21:40:04 -0700 | [diff] [blame] | 34 | struct page *page = pfn_to_online_page(pfn); |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 35 | pg_data_t *pgdat; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 36 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 37 | if (!page || !PageLRU(page) || |
| 38 | !get_page_unless_zero(page)) |
| 39 | return NULL; |
| 40 | |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 41 | pgdat = page_pgdat(page); |
| 42 | spin_lock_irq(&pgdat->lru_lock); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 43 | if (unlikely(!PageLRU(page))) { |
| 44 | put_page(page); |
| 45 | page = NULL; |
| 46 | } |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 47 | spin_unlock_irq(&pgdat->lru_lock); |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 48 | return page; |
| 49 | } |
| 50 | |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 51 | static bool page_idle_clear_pte_refs_one(struct page *page, |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 52 | struct vm_area_struct *vma, |
| 53 | unsigned long addr, void *arg) |
| 54 | { |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 55 | struct page_vma_mapped_walk pvmw = { |
| 56 | .page = page, |
| 57 | .vma = vma, |
| 58 | .address = addr, |
| 59 | }; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 60 | bool referenced = false; |
| 61 | |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 62 | while (page_vma_mapped_walk(&pvmw)) { |
| 63 | addr = pvmw.address; |
| 64 | if (pvmw.pte) { |
Yang Shi | f0849ac | 2018-04-05 16:22:35 -0700 | [diff] [blame] | 65 | /* |
| 66 | * For PTE-mapped THP, one sub page is referenced, |
| 67 | * the whole THP is referenced. |
| 68 | */ |
| 69 | if (ptep_clear_young_notify(vma, addr, pvmw.pte)) |
| 70 | referenced = true; |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 71 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
Yang Shi | f0849ac | 2018-04-05 16:22:35 -0700 | [diff] [blame] | 72 | if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) |
| 73 | referenced = true; |
Kirill A. Shutemov | 699fa21 | 2017-02-24 14:57:51 -0800 | [diff] [blame] | 74 | } else { |
| 75 | /* unexpected pmd-mapped page? */ |
| 76 | WARN_ON_ONCE(1); |
| 77 | } |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 78 | } |
| 79 | |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 80 | if (referenced) { |
| 81 | clear_page_idle(page); |
| 82 | /* |
| 83 | * We cleared the referenced bit in a mapping to this page. To |
| 84 | * avoid interference with page reclaim, mark it young so that |
| 85 | * page_referenced() will return > 0. |
| 86 | */ |
| 87 | set_page_young(page); |
| 88 | } |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 89 | return true; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static void page_idle_clear_pte_refs(struct page *page) |
| 93 | { |
| 94 | /* |
| 95 | * Since rwc.arg is unused, rwc is effectively immutable, so we |
| 96 | * can make it static const to save some cycles and stack. |
| 97 | */ |
| 98 | static const struct rmap_walk_control rwc = { |
| 99 | .rmap_one = page_idle_clear_pte_refs_one, |
| 100 | .anon_lock = page_lock_anon_vma_read, |
| 101 | }; |
| 102 | bool need_lock; |
| 103 | |
| 104 | if (!page_mapped(page) || |
| 105 | !page_rmapping(page)) |
| 106 | return; |
| 107 | |
| 108 | need_lock = !PageAnon(page) || PageKsm(page); |
| 109 | if (need_lock && !trylock_page(page)) |
| 110 | return; |
| 111 | |
| 112 | rmap_walk(page, (struct rmap_walk_control *)&rwc); |
| 113 | |
| 114 | if (need_lock) |
| 115 | unlock_page(page); |
| 116 | } |
| 117 | |
| 118 | static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, |
| 119 | struct bin_attribute *attr, char *buf, |
| 120 | loff_t pos, size_t count) |
| 121 | { |
| 122 | u64 *out = (u64 *)buf; |
| 123 | struct page *page; |
| 124 | unsigned long pfn, end_pfn; |
| 125 | int bit; |
| 126 | |
| 127 | if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) |
| 128 | return -EINVAL; |
| 129 | |
| 130 | pfn = pos * BITS_PER_BYTE; |
| 131 | if (pfn >= max_pfn) |
| 132 | return 0; |
| 133 | |
| 134 | end_pfn = pfn + count * BITS_PER_BYTE; |
| 135 | if (end_pfn > max_pfn) |
Colin Ian King | 7298e3b | 2019-06-28 12:07:05 -0700 | [diff] [blame] | 136 | end_pfn = max_pfn; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 137 | |
| 138 | for (; pfn < end_pfn; pfn++) { |
| 139 | bit = pfn % BITMAP_CHUNK_BITS; |
| 140 | if (!bit) |
| 141 | *out = 0ULL; |
| 142 | page = page_idle_get_page(pfn); |
| 143 | if (page) { |
| 144 | if (page_is_idle(page)) { |
| 145 | /* |
| 146 | * The page might have been referenced via a |
| 147 | * pte, in which case it is not idle. Clear |
| 148 | * refs and recheck. |
| 149 | */ |
| 150 | page_idle_clear_pte_refs(page); |
| 151 | if (page_is_idle(page)) |
| 152 | *out |= 1ULL << bit; |
| 153 | } |
| 154 | put_page(page); |
| 155 | } |
| 156 | if (bit == BITMAP_CHUNK_BITS - 1) |
| 157 | out++; |
| 158 | cond_resched(); |
| 159 | } |
| 160 | return (char *)out - buf; |
| 161 | } |
| 162 | |
| 163 | static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, |
| 164 | struct bin_attribute *attr, char *buf, |
| 165 | loff_t pos, size_t count) |
| 166 | { |
| 167 | const u64 *in = (u64 *)buf; |
| 168 | struct page *page; |
| 169 | unsigned long pfn, end_pfn; |
| 170 | int bit; |
| 171 | |
| 172 | if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) |
| 173 | return -EINVAL; |
| 174 | |
| 175 | pfn = pos * BITS_PER_BYTE; |
| 176 | if (pfn >= max_pfn) |
| 177 | return -ENXIO; |
| 178 | |
| 179 | end_pfn = pfn + count * BITS_PER_BYTE; |
| 180 | if (end_pfn > max_pfn) |
Colin Ian King | 7298e3b | 2019-06-28 12:07:05 -0700 | [diff] [blame] | 181 | end_pfn = max_pfn; |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 182 | |
| 183 | for (; pfn < end_pfn; pfn++) { |
| 184 | bit = pfn % BITMAP_CHUNK_BITS; |
| 185 | if ((*in >> bit) & 1) { |
| 186 | page = page_idle_get_page(pfn); |
| 187 | if (page) { |
| 188 | page_idle_clear_pte_refs(page); |
| 189 | set_page_idle(page); |
| 190 | put_page(page); |
| 191 | } |
| 192 | } |
| 193 | if (bit == BITMAP_CHUNK_BITS - 1) |
| 194 | in++; |
| 195 | cond_resched(); |
| 196 | } |
| 197 | return (char *)in - buf; |
| 198 | } |
| 199 | |
| 200 | static struct bin_attribute page_idle_bitmap_attr = |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 201 | __BIN_ATTR(bitmap, 0600, |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 202 | page_idle_bitmap_read, page_idle_bitmap_write, 0); |
| 203 | |
| 204 | static struct bin_attribute *page_idle_bin_attrs[] = { |
| 205 | &page_idle_bitmap_attr, |
| 206 | NULL, |
| 207 | }; |
| 208 | |
Arvind Yadav | fd147cb | 2017-09-06 16:21:59 -0700 | [diff] [blame] | 209 | static const struct attribute_group page_idle_attr_group = { |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 210 | .bin_attrs = page_idle_bin_attrs, |
| 211 | .name = "page_idle", |
| 212 | }; |
| 213 | |
| 214 | #ifndef CONFIG_64BIT |
| 215 | static bool need_page_idle(void) |
| 216 | { |
| 217 | return true; |
| 218 | } |
| 219 | struct page_ext_operations page_idle_ops = { |
| 220 | .need = need_page_idle, |
| 221 | }; |
| 222 | #endif |
| 223 | |
| 224 | static int __init page_idle_init(void) |
| 225 | { |
| 226 | int err; |
| 227 | |
| 228 | err = sysfs_create_group(mm_kobj, &page_idle_attr_group); |
| 229 | if (err) { |
| 230 | pr_err("page_idle: register sysfs failed\n"); |
| 231 | return err; |
| 232 | } |
| 233 | return 0; |
| 234 | } |
| 235 | subsys_initcall(page_idle_init); |