blob: 057c61df12dba277af673ee2c5e68ab6232fa9a6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07002#include <linux/init.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -07003#include <linux/memblock.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07004#include <linux/fs.h>
5#include <linux/sysfs.h>
6#include <linux/kobject.h>
SeongJae Park92fb1db2020-06-07 21:40:04 -07007#include <linux/memory_hotplug.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07008#include <linux/mm.h>
9#include <linux/mmzone.h>
10#include <linux/pagemap.h>
11#include <linux/rmap.h>
12#include <linux/mmu_notifier.h>
13#include <linux/page_ext.h>
14#include <linux/page_idle.h>
15
16#define BITMAP_CHUNK_SIZE sizeof(u64)
17#define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
18
19/*
20 * Idle page tracking only considers user memory pages, for other types of
21 * pages the idle flag is always unset and an attempt to set it is silently
22 * ignored.
23 *
24 * We treat a page as a user memory page if it is on an LRU list, because it is
25 * always safe to pass such a page to rmap_walk(), which is essential for idle
26 * page tracking. With such an indicator of user pages we can skip isolated
27 * pages, but since there are not usually many of them, it will hardly affect
28 * the overall result.
29 *
30 * This function tries to get a user memory page by pfn as described above.
31 */
32static struct page *page_idle_get_page(unsigned long pfn)
33{
SeongJae Park92fb1db2020-06-07 21:40:04 -070034 struct page *page = pfn_to_online_page(pfn);
Andrey Ryabininf4b7e272019-03-05 15:49:39 -080035 pg_data_t *pgdat;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070036
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070037 if (!page || !PageLRU(page) ||
38 !get_page_unless_zero(page))
39 return NULL;
40
Andrey Ryabininf4b7e272019-03-05 15:49:39 -080041 pgdat = page_pgdat(page);
42 spin_lock_irq(&pgdat->lru_lock);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070043 if (unlikely(!PageLRU(page))) {
44 put_page(page);
45 page = NULL;
46 }
Andrey Ryabininf4b7e272019-03-05 15:49:39 -080047 spin_unlock_irq(&pgdat->lru_lock);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070048 return page;
49}
50
Minchan Kime4b82222017-05-03 14:54:27 -070051static bool page_idle_clear_pte_refs_one(struct page *page,
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070052 struct vm_area_struct *vma,
53 unsigned long addr, void *arg)
54{
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080055 struct page_vma_mapped_walk pvmw = {
56 .page = page,
57 .vma = vma,
58 .address = addr,
59 };
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070060 bool referenced = false;
61
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080062 while (page_vma_mapped_walk(&pvmw)) {
63 addr = pvmw.address;
64 if (pvmw.pte) {
Yang Shif0849ac2018-04-05 16:22:35 -070065 /*
66 * For PTE-mapped THP, one sub page is referenced,
67 * the whole THP is referenced.
68 */
69 if (ptep_clear_young_notify(vma, addr, pvmw.pte))
70 referenced = true;
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080071 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
Yang Shif0849ac2018-04-05 16:22:35 -070072 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
73 referenced = true;
Kirill A. Shutemov699fa212017-02-24 14:57:51 -080074 } else {
75 /* unexpected pmd-mapped page? */
76 WARN_ON_ONCE(1);
77 }
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -080078 }
79
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070080 if (referenced) {
81 clear_page_idle(page);
82 /*
83 * We cleared the referenced bit in a mapping to this page. To
84 * avoid interference with page reclaim, mark it young so that
85 * page_referenced() will return > 0.
86 */
87 set_page_young(page);
88 }
Minchan Kime4b82222017-05-03 14:54:27 -070089 return true;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070090}
91
92static void page_idle_clear_pte_refs(struct page *page)
93{
94 /*
95 * Since rwc.arg is unused, rwc is effectively immutable, so we
96 * can make it static const to save some cycles and stack.
97 */
98 static const struct rmap_walk_control rwc = {
99 .rmap_one = page_idle_clear_pte_refs_one,
100 .anon_lock = page_lock_anon_vma_read,
101 };
102 bool need_lock;
103
104 if (!page_mapped(page) ||
105 !page_rmapping(page))
106 return;
107
108 need_lock = !PageAnon(page) || PageKsm(page);
109 if (need_lock && !trylock_page(page))
110 return;
111
112 rmap_walk(page, (struct rmap_walk_control *)&rwc);
113
114 if (need_lock)
115 unlock_page(page);
116}
117
118static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
119 struct bin_attribute *attr, char *buf,
120 loff_t pos, size_t count)
121{
122 u64 *out = (u64 *)buf;
123 struct page *page;
124 unsigned long pfn, end_pfn;
125 int bit;
126
127 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
128 return -EINVAL;
129
130 pfn = pos * BITS_PER_BYTE;
131 if (pfn >= max_pfn)
132 return 0;
133
134 end_pfn = pfn + count * BITS_PER_BYTE;
135 if (end_pfn > max_pfn)
Colin Ian King7298e3b2019-06-28 12:07:05 -0700136 end_pfn = max_pfn;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700137
138 for (; pfn < end_pfn; pfn++) {
139 bit = pfn % BITMAP_CHUNK_BITS;
140 if (!bit)
141 *out = 0ULL;
142 page = page_idle_get_page(pfn);
143 if (page) {
144 if (page_is_idle(page)) {
145 /*
146 * The page might have been referenced via a
147 * pte, in which case it is not idle. Clear
148 * refs and recheck.
149 */
150 page_idle_clear_pte_refs(page);
151 if (page_is_idle(page))
152 *out |= 1ULL << bit;
153 }
154 put_page(page);
155 }
156 if (bit == BITMAP_CHUNK_BITS - 1)
157 out++;
158 cond_resched();
159 }
160 return (char *)out - buf;
161}
162
163static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
164 struct bin_attribute *attr, char *buf,
165 loff_t pos, size_t count)
166{
167 const u64 *in = (u64 *)buf;
168 struct page *page;
169 unsigned long pfn, end_pfn;
170 int bit;
171
172 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
173 return -EINVAL;
174
175 pfn = pos * BITS_PER_BYTE;
176 if (pfn >= max_pfn)
177 return -ENXIO;
178
179 end_pfn = pfn + count * BITS_PER_BYTE;
180 if (end_pfn > max_pfn)
Colin Ian King7298e3b2019-06-28 12:07:05 -0700181 end_pfn = max_pfn;
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700182
183 for (; pfn < end_pfn; pfn++) {
184 bit = pfn % BITMAP_CHUNK_BITS;
185 if ((*in >> bit) & 1) {
186 page = page_idle_get_page(pfn);
187 if (page) {
188 page_idle_clear_pte_refs(page);
189 set_page_idle(page);
190 put_page(page);
191 }
192 }
193 if (bit == BITMAP_CHUNK_BITS - 1)
194 in++;
195 cond_resched();
196 }
197 return (char *)in - buf;
198}
199
200static struct bin_attribute page_idle_bitmap_attr =
Joe Perches0825a6f2018-06-14 15:27:58 -0700201 __BIN_ATTR(bitmap, 0600,
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700202 page_idle_bitmap_read, page_idle_bitmap_write, 0);
203
204static struct bin_attribute *page_idle_bin_attrs[] = {
205 &page_idle_bitmap_attr,
206 NULL,
207};
208
Arvind Yadavfd147cb2017-09-06 16:21:59 -0700209static const struct attribute_group page_idle_attr_group = {
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700210 .bin_attrs = page_idle_bin_attrs,
211 .name = "page_idle",
212};
213
214#ifndef CONFIG_64BIT
215static bool need_page_idle(void)
216{
217 return true;
218}
219struct page_ext_operations page_idle_ops = {
220 .need = need_page_idle,
221};
222#endif
223
224static int __init page_idle_init(void)
225{
226 int err;
227
228 err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
229 if (err) {
230 pr_err("page_idle: register sysfs failed\n");
231 return err;
232 }
233 return 0;
234}
235subsys_initcall(page_idle_init);