blob: c540c21e26f5bb3376d387b219f3ea0d81cb648d [file] [log] [blame]
Muchun Songf41f2ed2021-06-30 18:47:13 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Free some vmemmap pages of HugeTLB
4 *
5 * Copyright (c) 2020, Bytedance. All rights reserved.
6 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 *
9 * The struct page structures (page structs) are used to describe a physical
10 * page frame. By default, there is a one-to-one mapping from a page frame to
11 * it's corresponding page struct.
12 *
13 * HugeTLB pages consist of multiple base page size pages and is supported by
14 * many architectures. See hugetlbpage.rst in the Documentation directory for
15 * more details. On the x86-64 architecture, HugeTLB pages of size 2MB and 1GB
16 * are currently supported. Since the base page size on x86 is 4KB, a 2MB
17 * HugeTLB page consists of 512 base pages and a 1GB HugeTLB page consists of
18 * 4096 base pages. For each base page, there is a corresponding page struct.
19 *
20 * Within the HugeTLB subsystem, only the first 4 page structs are used to
21 * contain unique information about a HugeTLB page. __NR_USED_SUBPAGE provides
22 * this upper limit. The only 'useful' information in the remaining page structs
23 * is the compound_head field, and this field is the same for all tail pages.
24 *
25 * By removing redundant page structs for HugeTLB pages, memory can be returned
26 * to the buddy allocator for other uses.
27 *
28 * Different architectures support different HugeTLB pages. For example, the
29 * following table is the HugeTLB page size supported by x86 and arm64
30 * architectures. Because arm64 supports 4k, 16k, and 64k base pages and
31 * supports contiguous entries, so it supports many kinds of sizes of HugeTLB
32 * page.
33 *
34 * +--------------+-----------+-----------------------------------------------+
35 * | Architecture | Page Size | HugeTLB Page Size |
36 * +--------------+-----------+-----------+-----------+-----------+-----------+
37 * | x86-64 | 4KB | 2MB | 1GB | | |
38 * +--------------+-----------+-----------+-----------+-----------+-----------+
39 * | | 4KB | 64KB | 2MB | 32MB | 1GB |
40 * | +-----------+-----------+-----------+-----------+-----------+
41 * | arm64 | 16KB | 2MB | 32MB | 1GB | |
42 * | +-----------+-----------+-----------+-----------+-----------+
43 * | | 64KB | 2MB | 512MB | 16GB | |
44 * +--------------+-----------+-----------+-----------+-----------+-----------+
45 *
46 * When the system boot up, every HugeTLB page has more than one struct page
47 * structs which size is (unit: pages):
48 *
49 * struct_size = HugeTLB_Size / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
50 *
51 * Where HugeTLB_Size is the size of the HugeTLB page. We know that the size
52 * of the HugeTLB page is always n times PAGE_SIZE. So we can get the following
53 * relationship.
54 *
55 * HugeTLB_Size = n * PAGE_SIZE
56 *
57 * Then,
58 *
59 * struct_size = n * PAGE_SIZE / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
60 * = n * sizeof(struct page) / PAGE_SIZE
61 *
62 * We can use huge mapping at the pud/pmd level for the HugeTLB page.
63 *
64 * For the HugeTLB page of the pmd level mapping, then
65 *
66 * struct_size = n * sizeof(struct page) / PAGE_SIZE
67 * = PAGE_SIZE / sizeof(pte_t) * sizeof(struct page) / PAGE_SIZE
68 * = sizeof(struct page) / sizeof(pte_t)
69 * = 64 / 8
70 * = 8 (pages)
71 *
72 * Where n is how many pte entries which one page can contains. So the value of
73 * n is (PAGE_SIZE / sizeof(pte_t)).
74 *
75 * This optimization only supports 64-bit system, so the value of sizeof(pte_t)
76 * is 8. And this optimization also applicable only when the size of struct page
77 * is a power of two. In most cases, the size of struct page is 64 bytes (e.g.
78 * x86-64 and arm64). So if we use pmd level mapping for a HugeTLB page, the
79 * size of struct page structs of it is 8 page frames which size depends on the
80 * size of the base page.
81 *
82 * For the HugeTLB page of the pud level mapping, then
83 *
84 * struct_size = PAGE_SIZE / sizeof(pmd_t) * struct_size(pmd)
85 * = PAGE_SIZE / 8 * 8 (pages)
86 * = PAGE_SIZE (pages)
87 *
88 * Where the struct_size(pmd) is the size of the struct page structs of a
89 * HugeTLB page of the pmd level mapping.
90 *
91 * E.g.: A 2MB HugeTLB page on x86_64 consists in 8 page frames while 1GB
92 * HugeTLB page consists in 4096.
93 *
94 * Next, we take the pmd level mapping of the HugeTLB page as an example to
95 * show the internal implementation of this optimization. There are 8 pages
96 * struct page structs associated with a HugeTLB page which is pmd mapped.
97 *
98 * Here is how things look before optimization.
99 *
100 * HugeTLB struct pages(8 pages) page frame(8 pages)
101 * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
102 * | | | 0 | -------------> | 0 |
103 * | | +-----------+ +-----------+
104 * | | | 1 | -------------> | 1 |
105 * | | +-----------+ +-----------+
106 * | | | 2 | -------------> | 2 |
107 * | | +-----------+ +-----------+
108 * | | | 3 | -------------> | 3 |
109 * | | +-----------+ +-----------+
110 * | | | 4 | -------------> | 4 |
111 * | PMD | +-----------+ +-----------+
112 * | level | | 5 | -------------> | 5 |
113 * | mapping | +-----------+ +-----------+
114 * | | | 6 | -------------> | 6 |
115 * | | +-----------+ +-----------+
116 * | | | 7 | -------------> | 7 |
117 * | | +-----------+ +-----------+
118 * | |
119 * | |
120 * | |
121 * +-----------+
122 *
123 * The value of page->compound_head is the same for all tail pages. The first
124 * page of page structs (page 0) associated with the HugeTLB page contains the 4
125 * page structs necessary to describe the HugeTLB. The only use of the remaining
126 * pages of page structs (page 1 to page 7) is to point to page->compound_head.
127 * Therefore, we can remap pages 2 to 7 to page 1. Only 2 pages of page structs
128 * will be used for each HugeTLB page. This will allow us to free the remaining
129 * 6 pages to the buddy allocator.
130 *
131 * Here is how things look after remapping.
132 *
133 * HugeTLB struct pages(8 pages) page frame(8 pages)
134 * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
135 * | | | 0 | -------------> | 0 |
136 * | | +-----------+ +-----------+
137 * | | | 1 | -------------> | 1 |
138 * | | +-----------+ +-----------+
139 * | | | 2 | ----------------^ ^ ^ ^ ^ ^
140 * | | +-----------+ | | | | |
141 * | | | 3 | ------------------+ | | | |
142 * | | +-----------+ | | | |
143 * | | | 4 | --------------------+ | | |
144 * | PMD | +-----------+ | | |
145 * | level | | 5 | ----------------------+ | |
146 * | mapping | +-----------+ | |
147 * | | | 6 | ------------------------+ |
148 * | | +-----------+ |
149 * | | | 7 | --------------------------+
150 * | | +-----------+
151 * | |
152 * | |
153 * | |
154 * +-----------+
155 *
156 * When a HugeTLB is freed to the buddy system, we should allocate 6 pages for
157 * vmemmap pages and restore the previous mapping relationship.
158 *
159 * For the HugeTLB page of the pud level mapping. It is similar to the former.
160 * We also can use this approach to free (PAGE_SIZE - 2) vmemmap pages.
161 *
162 * Apart from the HugeTLB page of the pmd/pud level mapping, some architectures
163 * (e.g. aarch64) provides a contiguous bit in the translation table entries
164 * that hints to the MMU to indicate that it is one of a contiguous set of
165 * entries that can be cached in a single TLB entry.
166 *
167 * The contiguous bit is used to increase the mapping size at the pmd and pte
168 * (last) level. So this type of HugeTLB page can be optimized only when its
169 * size of the struct page structs is greater than 2 pages.
170 */
Muchun Songe9fdff82021-06-30 18:47:25 -0700171#define pr_fmt(fmt) "HugeTLB: " fmt
172
Muchun Songf41f2ed2021-06-30 18:47:13 -0700173#include "hugetlb_vmemmap.h"
174
175/*
176 * There are a lot of struct page structures associated with each HugeTLB page.
177 * For tail pages, the value of compound_head is the same. So we can reuse first
178 * page of tail page structures. We map the virtual addresses of the remaining
179 * pages of tail page structures to the first tail page struct, and then free
180 * these page frames. Therefore, we need to reserve two pages as vmemmap areas.
181 */
182#define RESERVE_VMEMMAP_NR 2U
183#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
184
Muchun Songe6d41f12021-06-30 18:48:28 -0700185bool hugetlb_free_vmemmap_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
Muchun Songe9fdff82021-06-30 18:47:25 -0700186
187static int __init early_hugetlb_free_vmemmap_param(char *buf)
188{
189 /* We cannot optimize if a "struct page" crosses page boundaries. */
190 if ((!is_power_of_2(sizeof(struct page)))) {
191 pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n");
192 return 0;
193 }
194
195 if (!buf)
196 return -EINVAL;
197
198 if (!strcmp(buf, "on"))
199 hugetlb_free_vmemmap_enabled = true;
Muchun Songe6d41f12021-06-30 18:48:28 -0700200 else if (!strcmp(buf, "off"))
201 hugetlb_free_vmemmap_enabled = false;
202 else
Muchun Songe9fdff82021-06-30 18:47:25 -0700203 return -EINVAL;
204
205 return 0;
206}
207early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
208
Muchun Songf41f2ed2021-06-30 18:47:13 -0700209static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
210{
211 return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
212}
213
Muchun Songad2fa372021-06-30 18:47:21 -0700214/*
215 * Previously discarded vmemmap pages will be allocated and remapping
216 * after this function returns zero.
217 */
218int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
219{
220 int ret;
221 unsigned long vmemmap_addr = (unsigned long)head;
222 unsigned long vmemmap_end, vmemmap_reuse;
223
224 if (!HPageVmemmapOptimized(head))
225 return 0;
226
227 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
228 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
229 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
230 /*
231 * The pages which the vmemmap virtual address range [@vmemmap_addr,
232 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
233 * the range is mapped to the page which @vmemmap_reuse is mapped to.
234 * When a HugeTLB page is freed to the buddy allocator, previously
235 * discarded vmemmap pages must be allocated and remapping.
236 */
237 ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
238 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
239
240 if (!ret)
241 ClearHPageVmemmapOptimized(head);
242
243 return ret;
244}
245
Muchun Songf41f2ed2021-06-30 18:47:13 -0700246void free_huge_page_vmemmap(struct hstate *h, struct page *head)
247{
248 unsigned long vmemmap_addr = (unsigned long)head;
249 unsigned long vmemmap_end, vmemmap_reuse;
250
251 if (!free_vmemmap_pages_per_hpage(h))
252 return;
253
254 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
255 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
256 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
257
258 /*
259 * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
260 * to the page which @vmemmap_reuse is mapped to, then free the pages
261 * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
262 */
Muchun Song3bc2b6a2021-06-30 18:48:22 -0700263 if (!vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
264 SetHPageVmemmapOptimized(head);
Muchun Songf41f2ed2021-06-30 18:47:13 -0700265}
Muchun Song77490582021-06-30 18:47:33 -0700266
267void __init hugetlb_vmemmap_init(struct hstate *h)
268{
269 unsigned int nr_pages = pages_per_huge_page(h);
270 unsigned int vmemmap_pages;
271
272 /*
273 * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
274 * page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
275 * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
276 */
277 BUILD_BUG_ON(__NR_USED_SUBPAGE >=
278 RESERVE_VMEMMAP_SIZE / sizeof(struct page));
279
280 if (!hugetlb_free_vmemmap_enabled)
281 return;
282
283 vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
284 /*
285 * The head page and the first tail page are not to be freed to buddy
286 * allocator, the other pages will map to the first tail page, so they
287 * can be freed.
288 *
289 * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
290 * on some architectures (e.g. aarch64). See Documentation/arm64/
291 * hugetlbpage.rst for more details.
292 */
293 if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
294 h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
295
296 pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
297 h->name);
298}