blob: 6242afb24d8479118019c084211280c160e301f1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08002#include <linux/mm.h>
3#include <linux/mmzone.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -07004#include <linux/memblock.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08005#include <linux/page_ext.h>
6#include <linux/memory.h>
7#include <linux/vmalloc.h>
8#include <linux/kmemleak.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -08009#include <linux/page_owner.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070010#include <linux/page_idle.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080011
12/*
13 * struct page extension
14 *
15 * This is the feature to manage memory for extended data per page.
16 *
17 * Until now, we must modify struct page itself to store extra data per page.
18 * This requires rebuilding the kernel and it is really time consuming process.
19 * And, sometimes, rebuild is impossible due to third party module dependency.
20 * At last, enlarging struct page could cause un-wanted system behaviour change.
21 *
22 * This feature is intended to overcome above mentioned problems. This feature
23 * allocates memory for extended data per page in certain place rather than
24 * the struct page itself. This memory can be accessed by the accessor
25 * functions provided by this code. During the boot process, it checks whether
26 * allocation of huge chunk of memory is needed or not. If not, it avoids
27 * allocating memory at all. With this advantage, we can include this feature
28 * into the kernel in default and can avoid rebuild and solve related problems.
29 *
30 * To help these things to work well, there are two callbacks for clients. One
31 * is the need callback which is mandatory if user wants to avoid useless
32 * memory allocation at boot-time. The other is optional, init callback, which
33 * is used to do proper initialization after memory is allocated.
34 *
35 * The need callback is used to decide whether extended memory allocation is
36 * needed or not. Sometimes users want to deactivate some features in this
Haitao Shi8958b242020-12-15 20:47:26 -080037 * boot and extra memory would be unnecessary. In this case, to avoid
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080038 * allocating huge chunk of memory, each clients represent their need of
39 * extra memory through the need callback. If one of the need callbacks
40 * returns true, it means that someone needs extra memory so that
41 * page extension core should allocates memory for page extension. If
42 * none of need callbacks return true, memory isn't needed at all in this boot
43 * and page extension core can skip to allocate memory. As result,
44 * none of memory is wasted.
45 *
Joonsoo Kim980ac162016-10-07 16:58:27 -070046 * When need callback returns true, page_ext checks if there is a request for
47 * extra memory through size in struct page_ext_operations. If it is non-zero,
48 * extra space is allocated for each page_ext entry and offset is returned to
49 * user through offset in struct page_ext_operations.
50 *
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080051 * The init callback is used to do proper initialization after page extension
52 * is completely initialized. In sparse memory system, extra memory is
53 * allocated some time later than memmap is allocated. In other words, lifetime
54 * of memory for page extension isn't same with memmap for struct page.
55 * Therefore, clients can't store extra data until page extension is
56 * initialized, even if pages are allocated and used freely. This could
57 * cause inadequate state of extra data per page, so, to prevent it, client
58 * can utilize this callback to initialize the state of it correctly.
59 */
60
SeongJae Park1c676e02021-09-07 19:56:40 -070061#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
62static bool need_page_idle(void)
63{
64 return true;
65}
66struct page_ext_operations page_idle_ops = {
67 .need = need_page_idle,
68};
69#endif
70
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080071static struct page_ext_operations *page_ext_ops[] = {
Joonsoo Kim48c96a32014-12-12 16:56:01 -080072#ifdef CONFIG_PAGE_OWNER
73 &page_owner_ops,
74#endif
SeongJae Park1c676e02021-09-07 19:56:40 -070075#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070076 &page_idle_ops,
77#endif
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080078};
79
Vlastimil Babka5556cfe2019-10-14 14:11:40 -070080unsigned long page_ext_size = sizeof(struct page_ext);
81
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080082static unsigned long total_usage;
83
84static bool __init invoke_need_callbacks(void)
85{
86 int i;
87 int entries = ARRAY_SIZE(page_ext_ops);
Joonsoo Kim980ac162016-10-07 16:58:27 -070088 bool need = false;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080089
90 for (i = 0; i < entries; i++) {
Joonsoo Kim980ac162016-10-07 16:58:27 -070091 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
Vlastimil Babka5556cfe2019-10-14 14:11:40 -070092 page_ext_ops[i]->offset = page_ext_size;
93 page_ext_size += page_ext_ops[i]->size;
Joonsoo Kim980ac162016-10-07 16:58:27 -070094 need = true;
95 }
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080096 }
97
Joonsoo Kim980ac162016-10-07 16:58:27 -070098 return need;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080099}
100
101static void __init invoke_init_callbacks(void)
102{
103 int i;
104 int entries = ARRAY_SIZE(page_ext_ops);
105
106 for (i = 0; i < entries; i++) {
107 if (page_ext_ops[i]->init)
108 page_ext_ops[i]->init();
109 }
110}
111
Zhenhua Huang7fb7ab62020-12-14 19:04:46 -0800112#ifndef CONFIG_SPARSEMEM
113void __init page_ext_init_flatmem_late(void)
114{
115 invoke_init_callbacks();
116}
117#endif
118
Joonsoo Kim980ac162016-10-07 16:58:27 -0700119static inline struct page_ext *get_entry(void *base, unsigned long index)
120{
Vlastimil Babka5556cfe2019-10-14 14:11:40 -0700121 return base + page_ext_size * index;
Joonsoo Kim980ac162016-10-07 16:58:27 -0700122}
123
Zhenhua Huang7fb7ab62020-12-14 19:04:46 -0800124#ifndef CONFIG_SPARSEMEM
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800125
126
127void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
128{
129 pgdat->node_page_ext = NULL;
130}
131
Kirill A. Shutemov10ed6342018-08-17 15:45:15 -0700132struct page_ext *lookup_page_ext(const struct page *page)
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800133{
134 unsigned long pfn = page_to_pfn(page);
Joonsoo Kim0b06bb32016-10-07 16:58:24 -0700135 unsigned long index;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800136 struct page_ext *base;
137
138 base = NODE_DATA(page_to_nid(page))->node_page_ext;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800139 /*
140 * The sanity checks the page allocator does upon freeing a
141 * page can reach here before the page_ext arrays are
142 * allocated when feeding a range of pages to the allocator
143 * for the first time during bootup or memory hotplug.
144 */
145 if (unlikely(!base))
146 return NULL;
Joonsoo Kim0b06bb32016-10-07 16:58:24 -0700147 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800148 MAX_ORDER_NR_PAGES);
Joonsoo Kim980ac162016-10-07 16:58:27 -0700149 return get_entry(base, index);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800150}
151
152static int __init alloc_node_page_ext(int nid)
153{
154 struct page_ext *base;
155 unsigned long table_size;
156 unsigned long nr_pages;
157
158 nr_pages = NODE_DATA(nid)->node_spanned_pages;
159 if (!nr_pages)
160 return 0;
161
162 /*
163 * Need extra space if node range is not aligned with
164 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
165 * checks buddy's status, range could be out of exact node range.
166 */
167 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
168 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
169 nr_pages += MAX_ORDER_NR_PAGES;
170
Vlastimil Babka5556cfe2019-10-14 14:11:40 -0700171 table_size = page_ext_size * nr_pages;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800172
Mike Rapoport26fb3da2019-03-11 23:30:42 -0700173 base = memblock_alloc_try_nid(
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800174 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
Mike Rapoport97ad1082018-10-30 15:09:44 -0700175 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800176 if (!base)
177 return -ENOMEM;
178 NODE_DATA(nid)->node_page_ext = base;
179 total_usage += table_size;
180 return 0;
181}
182
183void __init page_ext_init_flatmem(void)
184{
185
186 int nid, fail;
187
188 if (!invoke_need_callbacks())
189 return;
190
191 for_each_online_node(nid) {
192 fail = alloc_node_page_ext(nid);
193 if (fail)
194 goto fail;
195 }
196 pr_info("allocated %ld bytes of page_ext\n", total_usage);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800197 return;
198
199fail:
200 pr_crit("allocation of page_ext failed.\n");
201 panic("Out of memory");
202}
203
Yinan Zhangd1fea152021-11-05 13:36:46 -0700204#else /* CONFIG_SPARSEMEM */
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800205
Kirill A. Shutemov10ed6342018-08-17 15:45:15 -0700206struct page_ext *lookup_page_ext(const struct page *page)
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800207{
208 unsigned long pfn = page_to_pfn(page);
209 struct mem_section *section = __pfn_to_section(pfn);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800210 /*
211 * The sanity checks the page allocator does upon freeing a
212 * page can reach here before the page_ext arrays are
213 * allocated when feeding a range of pages to the allocator
214 * for the first time during bootup or memory hotplug.
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800215 */
216 if (!section->page_ext)
217 return NULL;
Joonsoo Kim980ac162016-10-07 16:58:27 -0700218 return get_entry(section->page_ext, pfn);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800219}
220
221static void *__meminit alloc_page_ext(size_t size, int nid)
222{
223 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
224 void *addr = NULL;
225
226 addr = alloc_pages_exact_nid(nid, size, flags);
227 if (addr) {
228 kmemleak_alloc(addr, size, 1, flags);
229 return addr;
230 }
231
Michal Hockob95046b2017-09-06 16:20:41 -0700232 addr = vzalloc_node(size, nid);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800233
234 return addr;
235}
236
237static int __meminit init_section_page_ext(unsigned long pfn, int nid)
238{
239 struct mem_section *section;
240 struct page_ext *base;
241 unsigned long table_size;
242
243 section = __pfn_to_section(pfn);
244
245 if (section->page_ext)
246 return 0;
247
Vlastimil Babka5556cfe2019-10-14 14:11:40 -0700248 table_size = page_ext_size * PAGES_PER_SECTION;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800249 base = alloc_page_ext(table_size, nid);
250
251 /*
252 * The value stored in section->page_ext is (base - pfn)
253 * and it does not point to the memory block allocated above,
254 * causing kmemleak false positives.
255 */
256 kmemleak_not_leak(base);
257
258 if (!base) {
259 pr_err("page ext allocation failure\n");
260 return -ENOMEM;
261 }
262
263 /*
264 * The passed "pfn" may not be aligned to SECTION. For the calculation
265 * we need to apply a mask.
266 */
267 pfn &= PAGE_SECTION_MASK;
Vlastimil Babka5556cfe2019-10-14 14:11:40 -0700268 section->page_ext = (void *)base - page_ext_size * pfn;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800269 total_usage += table_size;
270 return 0;
271}
Dave Hansen76af6a02021-10-18 15:15:32 -0700272
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800273static void free_page_ext(void *addr)
274{
275 if (is_vmalloc_addr(addr)) {
276 vfree(addr);
277 } else {
278 struct page *page = virt_to_page(addr);
279 size_t table_size;
280
Vlastimil Babka5556cfe2019-10-14 14:11:40 -0700281 table_size = page_ext_size * PAGES_PER_SECTION;
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800282
283 BUG_ON(PageReserved(page));
Qian Cai0c815852019-03-05 15:49:46 -0800284 kmemleak_free(addr);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800285 free_pages_exact(addr, table_size);
286 }
287}
288
289static void __free_page_ext(unsigned long pfn)
290{
291 struct mem_section *ms;
292 struct page_ext *base;
293
294 ms = __pfn_to_section(pfn);
295 if (!ms || !ms->page_ext)
296 return;
Joonsoo Kim980ac162016-10-07 16:58:27 -0700297 base = get_entry(ms->page_ext, pfn);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800298 free_page_ext(base);
299 ms->page_ext = NULL;
300}
301
302static int __meminit online_page_ext(unsigned long start_pfn,
303 unsigned long nr_pages,
304 int nid)
305{
306 unsigned long start, end, pfn;
307 int fail = 0;
308
309 start = SECTION_ALIGN_DOWN(start_pfn);
310 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
311
Anshuman Khandual98fa15f2019-03-05 15:42:58 -0800312 if (nid == NUMA_NO_NODE) {
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800313 /*
314 * In this case, "nid" already exists and contains valid memory.
315 * "start_pfn" passed to us is a pfn which is an arg for
316 * online__pages(), and start_pfn should exist.
317 */
318 nid = pfn_to_nid(start_pfn);
319 VM_BUG_ON(!node_state(nid, N_ONLINE));
320 }
321
David Hildenbranddccacf82020-04-06 20:06:47 -0700322 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800323 fail = init_section_page_ext(pfn, nid);
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800324 if (!fail)
325 return 0;
326
327 /* rollback */
328 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
329 __free_page_ext(pfn);
330
331 return -ENOMEM;
332}
333
334static int __meminit offline_page_ext(unsigned long start_pfn,
335 unsigned long nr_pages, int nid)
336{
337 unsigned long start, end, pfn;
338
339 start = SECTION_ALIGN_DOWN(start_pfn);
340 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
341
342 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
343 __free_page_ext(pfn);
344 return 0;
345
346}
347
348static int __meminit page_ext_callback(struct notifier_block *self,
349 unsigned long action, void *arg)
350{
351 struct memory_notify *mn = arg;
352 int ret = 0;
353
354 switch (action) {
355 case MEM_GOING_ONLINE:
356 ret = online_page_ext(mn->start_pfn,
357 mn->nr_pages, mn->status_change_nid);
358 break;
359 case MEM_OFFLINE:
360 offline_page_ext(mn->start_pfn,
361 mn->nr_pages, mn->status_change_nid);
362 break;
363 case MEM_CANCEL_ONLINE:
364 offline_page_ext(mn->start_pfn,
365 mn->nr_pages, mn->status_change_nid);
366 break;
367 case MEM_GOING_OFFLINE:
368 break;
369 case MEM_ONLINE:
370 case MEM_CANCEL_OFFLINE:
371 break;
372 }
373
374 return notifier_from_errno(ret);
375}
376
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800377void __init page_ext_init(void)
378{
379 unsigned long pfn;
380 int nid;
381
382 if (!invoke_need_callbacks())
383 return;
384
385 for_each_node_state(nid, N_MEMORY) {
386 unsigned long start_pfn, end_pfn;
387
388 start_pfn = node_start_pfn(nid);
389 end_pfn = node_end_pfn(nid);
390 /*
391 * start_pfn and end_pfn may not be aligned to SECTION and the
392 * page->flags of out of node pages are not initialized. So we
393 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
394 */
395 for (pfn = start_pfn; pfn < end_pfn;
396 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
397
398 if (!pfn_valid(pfn))
399 continue;
400 /*
401 * Nodes's pfns can be overlapping.
402 * We know some arch can have a nodes layout such as
403 * -------------pfn-------------->
404 * N0 | N1 | N2 | N0 | N1 | N2|....
405 */
Qian Cai2f1ee092019-02-12 15:36:03 -0800406 if (pfn_to_nid(pfn) != nid)
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800407 continue;
408 if (init_section_page_ext(pfn, nid))
409 goto oom;
Vlastimil Babka0fc542b2017-09-06 16:20:48 -0700410 cond_resched();
Joonsoo Kimeefa864b2014-12-12 16:55:46 -0800411 }
412 }
413 hotplug_memory_notifier(page_ext_callback, 0);
414 pr_info("allocated %ld bytes of page_ext\n", total_usage);
415 invoke_init_callbacks();
416 return;
417
418oom:
419 panic("Out of memory");
420}
421
422void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
423{
424}
425
426#endif