blob: d4f89c2f9544697c1df525f57ffe2867080f12bc [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Generic hugetlb support.
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * (C) Nadia Yvette Chambers, April 2004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/mm.h>
Alexey Dobriyane1759c22008-10-15 23:50:22 +04009#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070012#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070014#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080015#include <linux/mempolicy.h>
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -070016#include <linux/compiler.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080017#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080018#include <linux/mutex.h>
Mike Rapoport97ad1082018-10-30 15:09:44 -070019#include <linux/memblock.h>
Nishanth Aravamudana3437872008-07-23 21:27:44 -070020#include <linux/sysfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Joonsoo Kimbbe88752020-08-11 18:37:38 -070022#include <linux/sched/mm.h>
Mike Kravetz63489f82018-03-22 16:17:13 -070023#include <linux/mmdebug.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +090025#include <linux/rmap.h>
Matthew Wilcoxc6247f72017-07-10 15:48:56 -070026#include <linux/string_helpers.h>
Naoya Horiguchifd6a03e2010-05-28 09:29:21 +090027#include <linux/swap.h>
28#include <linux/swapops.h>
Davidlohr Bueso8382d912014-04-03 14:47:31 -070029#include <linux/jhash.h>
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080030#include <linux/numa.h>
Waiman Longc77c0a82020-01-04 13:00:15 -080031#include <linux/llist.h>
Roman Gushchincf11e852020-04-10 14:32:45 -070032#include <linux/cma.h>
Linus Torvaldsd6606682008-08-06 12:04:54 -070033
David Gibson63551ae2005-06-21 17:14:44 -070034#include <asm/page.h>
Mike Rapoportca15ca42020-08-06 23:22:28 -070035#include <asm/pgalloc.h>
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070036#include <asm/tlb.h>
David Gibson63551ae2005-06-21 17:14:44 -070037
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070038#include <linux/io.h>
David Gibson63551ae2005-06-21 17:14:44 -070039#include <linux/hugetlb.h>
Aneesh Kumar K.V9dd540e2012-07-31 16:42:15 -070040#include <linux/hugetlb_cgroup.h>
Lee Schermerhorn9a3052302009-12-14 17:58:25 -080041#include <linux/node.h>
Mike Kravetz1a1aad82017-02-22 15:43:01 -080042#include <linux/userfaultfd_k.h>
Michal Hockoab5ac902018-01-31 16:20:48 -080043#include <linux/page_owner.h>
Nick Piggin7835e982006-03-22 00:08:40 -080044#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070046int hugetlb_max_hstate __read_mostly;
Andi Kleene5ff2152008-07-23 21:27:42 -070047unsigned int default_hstate_idx;
48struct hstate hstates[HUGE_MAX_HSTATE];
Roman Gushchincf11e852020-04-10 14:32:45 -070049
Barry Songdbda8fe2020-07-23 21:15:30 -070050#ifdef CONFIG_CMA
Roman Gushchincf11e852020-04-10 14:32:45 -070051static struct cma *hugetlb_cma[MAX_NUMNODES];
Barry Songdbda8fe2020-07-23 21:15:30 -070052#endif
53static unsigned long hugetlb_cma_size __initdata;
Roman Gushchincf11e852020-04-10 14:32:45 -070054
Naoya Horiguchi641844f2015-06-24 16:56:59 -070055/*
56 * Minimum page order among possible hugepage sizes, set to a proper value
57 * at boot time.
58 */
59static unsigned int minimum_order __read_mostly = UINT_MAX;
Andi Kleene5ff2152008-07-23 21:27:42 -070060
Jon Tollefson53ba51d2008-07-23 21:27:52 -070061__initdata LIST_HEAD(huge_boot_pages);
62
Andi Kleene5ff2152008-07-23 21:27:42 -070063/* for command line parsing */
64static struct hstate * __initdata parsed_hstate;
65static unsigned long __initdata default_hstate_max_huge_pages;
Vaishali Thakkar9fee0212016-05-19 17:11:04 -070066static bool __initdata parsed_valid_hugepagesz = true;
Mike Kravetz282f4212020-06-03 16:00:46 -070067static bool __initdata parsed_default_hugepagesz;
Andi Kleene5ff2152008-07-23 21:27:42 -070068
David Gibson3935baa2006-03-22 00:08:53 -080069/*
Naoya Horiguchi31caf662013-09-11 14:21:59 -070070 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
71 * free_huge_pages, and surplus_huge_pages.
David Gibson3935baa2006-03-22 00:08:53 -080072 */
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070073DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080074
Davidlohr Bueso8382d912014-04-03 14:47:31 -070075/*
76 * Serializes faults on the same logical page. This is used to
77 * prevent spurious OOMs when the hugepage pool is fully utilized.
78 */
79static int num_fault_mutexes;
Mike Kravetzc672c7f2015-09-08 15:01:35 -070080struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
Davidlohr Bueso8382d912014-04-03 14:47:31 -070081
Muchun Songe334b1f2021-02-04 18:32:06 -080082static inline bool PageHugeFreed(struct page *head)
83{
84 return page_private(head + 4) == -1UL;
85}
86
87static inline void SetPageHugeFreed(struct page *head)
88{
89 set_page_private(head + 4, -1UL);
90}
91
92static inline void ClearPageHugeFreed(struct page *head)
93{
94 set_page_private(head + 4, 0);
95}
96
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -070097/* Forward declaration */
98static int hugetlb_acct_memory(struct hstate *h, long delta);
99
David Gibson90481622012-03-21 16:34:12 -0700100static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
101{
102 bool free = (spool->count == 0) && (spool->used_hpages == 0);
103
104 spin_unlock(&spool->lock);
105
106 /* If no pages are used, and no other handles to the subpool
Ethon Paul7c8de352020-06-04 16:49:07 -0700107 * remain, give up any reservations based on minimum size and
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -0700108 * free the subpool */
109 if (free) {
110 if (spool->min_hpages != -1)
111 hugetlb_acct_memory(spool->hstate,
112 -spool->min_hpages);
David Gibson90481622012-03-21 16:34:12 -0700113 kfree(spool);
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -0700114 }
David Gibson90481622012-03-21 16:34:12 -0700115}
116
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -0700117struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
118 long min_hpages)
David Gibson90481622012-03-21 16:34:12 -0700119{
120 struct hugepage_subpool *spool;
121
Mike Kravetzc6a91822015-04-15 16:13:36 -0700122 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
David Gibson90481622012-03-21 16:34:12 -0700123 if (!spool)
124 return NULL;
125
126 spin_lock_init(&spool->lock);
127 spool->count = 1;
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -0700128 spool->max_hpages = max_hpages;
129 spool->hstate = h;
130 spool->min_hpages = min_hpages;
131
132 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
133 kfree(spool);
134 return NULL;
135 }
136 spool->rsv_hpages = min_hpages;
David Gibson90481622012-03-21 16:34:12 -0700137
138 return spool;
139}
140
141void hugepage_put_subpool(struct hugepage_subpool *spool)
142{
143 spin_lock(&spool->lock);
144 BUG_ON(!spool->count);
145 spool->count--;
146 unlock_or_release_subpool(spool);
147}
148
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700149/*
150 * Subpool accounting for allocating and reserving pages.
151 * Return -ENOMEM if there are not enough resources to satisfy the
Randy Dunlap9e7ee402020-08-11 18:32:59 -0700152 * request. Otherwise, return the number of pages by which the
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700153 * global pools must be adjusted (upward). The returned value may
154 * only be different than the passed value (delta) in the case where
Ethon Paul7c8de352020-06-04 16:49:07 -0700155 * a subpool minimum size must be maintained.
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700156 */
157static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
David Gibson90481622012-03-21 16:34:12 -0700158 long delta)
159{
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700160 long ret = delta;
David Gibson90481622012-03-21 16:34:12 -0700161
162 if (!spool)
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700163 return ret;
David Gibson90481622012-03-21 16:34:12 -0700164
165 spin_lock(&spool->lock);
David Gibson90481622012-03-21 16:34:12 -0700166
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700167 if (spool->max_hpages != -1) { /* maximum size accounting */
168 if ((spool->used_hpages + delta) <= spool->max_hpages)
169 spool->used_hpages += delta;
170 else {
171 ret = -ENOMEM;
172 goto unlock_ret;
173 }
174 }
175
Mike Kravetz09a95e22016-05-19 17:11:01 -0700176 /* minimum size accounting */
177 if (spool->min_hpages != -1 && spool->rsv_hpages) {
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700178 if (delta > spool->rsv_hpages) {
179 /*
180 * Asking for more reserves than those already taken on
181 * behalf of subpool. Return difference.
182 */
183 ret = delta - spool->rsv_hpages;
184 spool->rsv_hpages = 0;
185 } else {
186 ret = 0; /* reserves already accounted for */
187 spool->rsv_hpages -= delta;
188 }
189 }
190
191unlock_ret:
192 spin_unlock(&spool->lock);
David Gibson90481622012-03-21 16:34:12 -0700193 return ret;
194}
195
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700196/*
197 * Subpool accounting for freeing and unreserving pages.
198 * Return the number of global page reservations that must be dropped.
199 * The return value may only be different than the passed value (delta)
200 * in the case where a subpool minimum size must be maintained.
201 */
202static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
David Gibson90481622012-03-21 16:34:12 -0700203 long delta)
204{
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700205 long ret = delta;
206
David Gibson90481622012-03-21 16:34:12 -0700207 if (!spool)
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700208 return delta;
David Gibson90481622012-03-21 16:34:12 -0700209
210 spin_lock(&spool->lock);
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700211
212 if (spool->max_hpages != -1) /* maximum size accounting */
213 spool->used_hpages -= delta;
214
Mike Kravetz09a95e22016-05-19 17:11:01 -0700215 /* minimum size accounting */
216 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700217 if (spool->rsv_hpages + delta <= spool->min_hpages)
218 ret = 0;
219 else
220 ret = spool->rsv_hpages + delta - spool->min_hpages;
221
222 spool->rsv_hpages += delta;
223 if (spool->rsv_hpages > spool->min_hpages)
224 spool->rsv_hpages = spool->min_hpages;
225 }
226
227 /*
228 * If hugetlbfs_put_super couldn't free spool due to an outstanding
229 * quota reference, free it now.
230 */
David Gibson90481622012-03-21 16:34:12 -0700231 unlock_or_release_subpool(spool);
Mike Kravetz1c5ecae2015-04-15 16:13:39 -0700232
233 return ret;
David Gibson90481622012-03-21 16:34:12 -0700234}
235
236static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
237{
238 return HUGETLBFS_SB(inode->i_sb)->spool;
239}
240
241static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
242{
Al Viro496ad9a2013-01-23 17:07:38 -0500243 return subpool_inode(file_inode(vma->vm_file));
David Gibson90481622012-03-21 16:34:12 -0700244}
245
Mina Almasry0db9d742020-04-01 21:11:25 -0700246/* Helper that removes a struct file_region from the resv_map cache and returns
247 * it for use.
248 */
249static struct file_region *
250get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
251{
252 struct file_region *nrg = NULL;
253
254 VM_BUG_ON(resv->region_cache_count <= 0);
255
256 resv->region_cache_count--;
257 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
Mina Almasry0db9d742020-04-01 21:11:25 -0700258 list_del(&nrg->link);
259
260 nrg->from = from;
261 nrg->to = to;
262
263 return nrg;
264}
265
Mina Almasry075a61d2020-04-01 21:11:28 -0700266static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
267 struct file_region *rg)
268{
269#ifdef CONFIG_CGROUP_HUGETLB
270 nrg->reservation_counter = rg->reservation_counter;
271 nrg->css = rg->css;
272 if (rg->css)
273 css_get(rg->css);
274#endif
275}
276
277/* Helper that records hugetlb_cgroup uncharge info. */
278static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
279 struct hstate *h,
280 struct resv_map *resv,
281 struct file_region *nrg)
282{
283#ifdef CONFIG_CGROUP_HUGETLB
284 if (h_cg) {
285 nrg->reservation_counter =
286 &h_cg->rsvd_hugepage[hstate_index(h)];
287 nrg->css = &h_cg->css;
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700288 /*
289 * The caller will hold exactly one h_cg->css reference for the
290 * whole contiguous reservation region. But this area might be
291 * scattered when there are already some file_regions reside in
292 * it. As a result, many file_regions may share only one css
293 * reference. In order to ensure that one file_region must hold
294 * exactly one h_cg->css reference, we should do css_get for
295 * each file_region and leave the reference held by caller
296 * untouched.
297 */
298 css_get(&h_cg->css);
Mina Almasry075a61d2020-04-01 21:11:28 -0700299 if (!resv->pages_per_hpage)
300 resv->pages_per_hpage = pages_per_huge_page(h);
301 /* pages_per_hpage should be the same for all entries in
302 * a resv_map.
303 */
304 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
305 } else {
306 nrg->reservation_counter = NULL;
307 nrg->css = NULL;
308 }
309#endif
310}
311
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700312static void put_uncharge_info(struct file_region *rg)
313{
314#ifdef CONFIG_CGROUP_HUGETLB
315 if (rg->css)
316 css_put(rg->css);
317#endif
318}
319
Mina Almasrya9b3f862020-04-01 21:11:35 -0700320static bool has_same_uncharge_info(struct file_region *rg,
321 struct file_region *org)
322{
323#ifdef CONFIG_CGROUP_HUGETLB
324 return rg && org &&
325 rg->reservation_counter == org->reservation_counter &&
326 rg->css == org->css;
327
328#else
329 return true;
330#endif
331}
332
333static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
334{
335 struct file_region *nrg = NULL, *prg = NULL;
336
337 prg = list_prev_entry(rg, link);
338 if (&prg->link != &resv->regions && prg->to == rg->from &&
339 has_same_uncharge_info(prg, rg)) {
340 prg->to = rg->to;
341
342 list_del(&rg->link);
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700343 put_uncharge_info(rg);
Mina Almasrya9b3f862020-04-01 21:11:35 -0700344 kfree(rg);
345
Wei Yang7db5e7b2020-10-13 16:56:20 -0700346 rg = prg;
Mina Almasrya9b3f862020-04-01 21:11:35 -0700347 }
348
349 nrg = list_next_entry(rg, link);
350 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
351 has_same_uncharge_info(nrg, rg)) {
352 nrg->from = rg->from;
353
354 list_del(&rg->link);
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700355 put_uncharge_info(rg);
Mina Almasrya9b3f862020-04-01 21:11:35 -0700356 kfree(rg);
Mina Almasrya9b3f862020-04-01 21:11:35 -0700357 }
358}
359
Wei Yang972a3da32020-10-13 16:56:30 -0700360/*
361 * Must be called with resv->lock held.
362 *
363 * Calling this with regions_needed != NULL will count the number of pages
364 * to be added but will not modify the linked list. And regions_needed will
365 * indicate the number of file_regions needed in the cache to carry out to add
366 * the regions for this range.
Mina Almasryd75c6af2019-11-30 17:56:59 -0800367 */
368static long add_reservation_in_range(struct resv_map *resv, long f, long t,
Mina Almasry075a61d2020-04-01 21:11:28 -0700369 struct hugetlb_cgroup *h_cg,
Wei Yang972a3da32020-10-13 16:56:30 -0700370 struct hstate *h, long *regions_needed)
Mina Almasryd75c6af2019-11-30 17:56:59 -0800371{
Mina Almasry0db9d742020-04-01 21:11:25 -0700372 long add = 0;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800373 struct list_head *head = &resv->regions;
Mina Almasry0db9d742020-04-01 21:11:25 -0700374 long last_accounted_offset = f;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800375 struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
376
Mina Almasry0db9d742020-04-01 21:11:25 -0700377 if (regions_needed)
378 *regions_needed = 0;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800379
Mina Almasry0db9d742020-04-01 21:11:25 -0700380 /* In this loop, we essentially handle an entry for the range
381 * [last_accounted_offset, rg->from), at every iteration, with some
382 * bounds checking.
383 */
384 list_for_each_entry_safe(rg, trg, head, link) {
385 /* Skip irrelevant regions that start before our range. */
386 if (rg->from < f) {
387 /* If this region ends after the last accounted offset,
388 * then we need to update last_accounted_offset.
389 */
390 if (rg->to > last_accounted_offset)
391 last_accounted_offset = rg->to;
392 continue;
393 }
Mina Almasryd75c6af2019-11-30 17:56:59 -0800394
Mina Almasry0db9d742020-04-01 21:11:25 -0700395 /* When we find a region that starts beyond our range, we've
396 * finished.
397 */
Mina Almasryd75c6af2019-11-30 17:56:59 -0800398 if (rg->from > t)
399 break;
400
Mina Almasry0db9d742020-04-01 21:11:25 -0700401 /* Add an entry for last_accounted_offset -> rg->from, and
402 * update last_accounted_offset.
Mina Almasryd75c6af2019-11-30 17:56:59 -0800403 */
Mina Almasry0db9d742020-04-01 21:11:25 -0700404 if (rg->from > last_accounted_offset) {
405 add += rg->from - last_accounted_offset;
Wei Yang972a3da32020-10-13 16:56:30 -0700406 if (!regions_needed) {
Mina Almasry0db9d742020-04-01 21:11:25 -0700407 nrg = get_file_region_entry_from_cache(
408 resv, last_accounted_offset, rg->from);
Mina Almasry075a61d2020-04-01 21:11:28 -0700409 record_hugetlb_cgroup_uncharge_info(h_cg, h,
410 resv, nrg);
Mina Almasry0db9d742020-04-01 21:11:25 -0700411 list_add(&nrg->link, rg->link.prev);
Mina Almasrya9b3f862020-04-01 21:11:35 -0700412 coalesce_file_region(resv, nrg);
Wei Yang972a3da32020-10-13 16:56:30 -0700413 } else
Mina Almasry0db9d742020-04-01 21:11:25 -0700414 *regions_needed += 1;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800415 }
Mina Almasryd75c6af2019-11-30 17:56:59 -0800416
Mina Almasry0db9d742020-04-01 21:11:25 -0700417 last_accounted_offset = rg->to;
418 }
419
420 /* Handle the case where our range extends beyond
421 * last_accounted_offset.
422 */
423 if (last_accounted_offset < t) {
424 add += t - last_accounted_offset;
Wei Yang972a3da32020-10-13 16:56:30 -0700425 if (!regions_needed) {
Mina Almasry0db9d742020-04-01 21:11:25 -0700426 nrg = get_file_region_entry_from_cache(
427 resv, last_accounted_offset, t);
Mina Almasry075a61d2020-04-01 21:11:28 -0700428 record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
Mina Almasry0db9d742020-04-01 21:11:25 -0700429 list_add(&nrg->link, rg->link.prev);
Mina Almasrya9b3f862020-04-01 21:11:35 -0700430 coalesce_file_region(resv, nrg);
Wei Yang972a3da32020-10-13 16:56:30 -0700431 } else
Mina Almasry0db9d742020-04-01 21:11:25 -0700432 *regions_needed += 1;
433 }
434
435 VM_BUG_ON(add < 0);
436 return add;
437}
438
439/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
440 */
441static int allocate_file_region_entries(struct resv_map *resv,
442 int regions_needed)
443 __must_hold(&resv->lock)
444{
445 struct list_head allocated_regions;
446 int to_allocate = 0, i = 0;
447 struct file_region *trg = NULL, *rg = NULL;
448
449 VM_BUG_ON(regions_needed < 0);
450
451 INIT_LIST_HEAD(&allocated_regions);
452
453 /*
454 * Check for sufficient descriptors in the cache to accommodate
455 * the number of in progress add operations plus regions_needed.
456 *
457 * This is a while loop because when we drop the lock, some other call
458 * to region_add or region_del may have consumed some region_entries,
459 * so we keep looping here until we finally have enough entries for
460 * (adds_in_progress + regions_needed).
461 */
462 while (resv->region_cache_count <
463 (resv->adds_in_progress + regions_needed)) {
464 to_allocate = resv->adds_in_progress + regions_needed -
465 resv->region_cache_count;
466
467 /* At this point, we should have enough entries in the cache
468 * for all the existings adds_in_progress. We should only be
469 * needing to allocate for regions_needed.
470 */
471 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
472
473 spin_unlock(&resv->lock);
474 for (i = 0; i < to_allocate; i++) {
475 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
476 if (!trg)
477 goto out_of_memory;
478 list_add(&trg->link, &allocated_regions);
479 }
480
481 spin_lock(&resv->lock);
482
Wei Yangd3ec7b62020-10-13 16:56:27 -0700483 list_splice(&allocated_regions, &resv->region_cache);
484 resv->region_cache_count += to_allocate;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800485 }
486
Mina Almasry0db9d742020-04-01 21:11:25 -0700487 return 0;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800488
Mina Almasry0db9d742020-04-01 21:11:25 -0700489out_of_memory:
490 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
491 list_del(&rg->link);
492 kfree(rg);
493 }
494 return -ENOMEM;
Mina Almasryd75c6af2019-11-30 17:56:59 -0800495}
496
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700497/*
498 * Add the huge page range represented by [f, t) to the reserve
Mina Almasry0db9d742020-04-01 21:11:25 -0700499 * map. Regions will be taken from the cache to fill in this range.
500 * Sufficient regions should exist in the cache due to the previous
501 * call to region_chg with the same range, but in some cases the cache will not
502 * have sufficient entries due to races with other code doing region_add or
503 * region_del. The extra needed entries will be allocated.
Mike Kravetzcf3ad202015-06-24 16:57:55 -0700504 *
Mina Almasry0db9d742020-04-01 21:11:25 -0700505 * regions_needed is the out value provided by a previous call to region_chg.
506 *
507 * Return the number of new huge pages added to the map. This number is greater
508 * than or equal to zero. If file_region entries needed to be allocated for
Ethon Paul7c8de352020-06-04 16:49:07 -0700509 * this operation and we were not able to allocate, it returns -ENOMEM.
Mina Almasry0db9d742020-04-01 21:11:25 -0700510 * region_add of regions of length 1 never allocate file_regions and cannot
511 * fail; region_chg will always allocate at least 1 entry and a region_add for
512 * 1 page will only require at most 1 entry.
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700513 */
Mina Almasry0db9d742020-04-01 21:11:25 -0700514static long region_add(struct resv_map *resv, long f, long t,
Mina Almasry075a61d2020-04-01 21:11:28 -0700515 long in_regions_needed, struct hstate *h,
516 struct hugetlb_cgroup *h_cg)
Andy Whitcroft96822902008-07-23 21:27:29 -0700517{
Mina Almasry0db9d742020-04-01 21:11:25 -0700518 long add = 0, actual_regions_needed = 0;
Andy Whitcroft96822902008-07-23 21:27:29 -0700519
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700520 spin_lock(&resv->lock);
Mina Almasry0db9d742020-04-01 21:11:25 -0700521retry:
522
523 /* Count how many regions are actually needed to execute this add. */
Wei Yang972a3da32020-10-13 16:56:30 -0700524 add_reservation_in_range(resv, f, t, NULL, NULL,
525 &actual_regions_needed);
Andy Whitcroft96822902008-07-23 21:27:29 -0700526
Mike Kravetz5e911372015-09-08 15:01:28 -0700527 /*
Mina Almasry0db9d742020-04-01 21:11:25 -0700528 * Check for sufficient descriptors in the cache to accommodate
529 * this add operation. Note that actual_regions_needed may be greater
530 * than in_regions_needed, as the resv_map may have been modified since
531 * the region_chg call. In this case, we need to make sure that we
532 * allocate extra entries, such that we have enough for all the
533 * existing adds_in_progress, plus the excess needed for this
534 * operation.
Mike Kravetz5e911372015-09-08 15:01:28 -0700535 */
Mina Almasry0db9d742020-04-01 21:11:25 -0700536 if (actual_regions_needed > in_regions_needed &&
537 resv->region_cache_count <
538 resv->adds_in_progress +
539 (actual_regions_needed - in_regions_needed)) {
540 /* region_add operation of range 1 should never need to
541 * allocate file_region entries.
542 */
543 VM_BUG_ON(t - f <= 1);
Mike Kravetz5e911372015-09-08 15:01:28 -0700544
Mina Almasry0db9d742020-04-01 21:11:25 -0700545 if (allocate_file_region_entries(
546 resv, actual_regions_needed - in_regions_needed)) {
547 return -ENOMEM;
548 }
Mike Kravetz5e911372015-09-08 15:01:28 -0700549
Mina Almasry0db9d742020-04-01 21:11:25 -0700550 goto retry;
Mike Kravetz5e911372015-09-08 15:01:28 -0700551 }
552
Wei Yang972a3da32020-10-13 16:56:30 -0700553 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
Mike Kravetzcf3ad202015-06-24 16:57:55 -0700554
Mina Almasry0db9d742020-04-01 21:11:25 -0700555 resv->adds_in_progress -= in_regions_needed;
556
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700557 spin_unlock(&resv->lock);
Mike Kravetzcf3ad202015-06-24 16:57:55 -0700558 VM_BUG_ON(add < 0);
559 return add;
Andy Whitcroft96822902008-07-23 21:27:29 -0700560}
561
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700562/*
563 * Examine the existing reserve map and determine how many
564 * huge pages in the specified range [f, t) are NOT currently
565 * represented. This routine is called before a subsequent
566 * call to region_add that will actually modify the reserve
567 * map to add the specified range [f, t). region_chg does
568 * not change the number of huge pages represented by the
Mina Almasry0db9d742020-04-01 21:11:25 -0700569 * map. A number of new file_region structures is added to the cache as a
570 * placeholder, for the subsequent region_add call to use. At least 1
571 * file_region structure is added.
572 *
573 * out_regions_needed is the number of regions added to the
574 * resv->adds_in_progress. This value needs to be provided to a follow up call
575 * to region_add or region_abort for proper accounting.
Mike Kravetz5e911372015-09-08 15:01:28 -0700576 *
577 * Returns the number of huge pages that need to be added to the existing
578 * reservation map for the range [f, t). This number is greater or equal to
579 * zero. -ENOMEM is returned if a new file_region structure or cache entry
580 * is needed and can not be allocated.
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700581 */
Mina Almasry0db9d742020-04-01 21:11:25 -0700582static long region_chg(struct resv_map *resv, long f, long t,
583 long *out_regions_needed)
Andy Whitcroft96822902008-07-23 21:27:29 -0700584{
Andy Whitcroft96822902008-07-23 21:27:29 -0700585 long chg = 0;
586
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700587 spin_lock(&resv->lock);
Mike Kravetz5e911372015-09-08 15:01:28 -0700588
Wei Yang972a3da32020-10-13 16:56:30 -0700589 /* Count how many hugepages in this range are NOT represented. */
Mina Almasry075a61d2020-04-01 21:11:28 -0700590 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
Wei Yang972a3da32020-10-13 16:56:30 -0700591 out_regions_needed);
Mike Kravetz5e911372015-09-08 15:01:28 -0700592
Mina Almasry0db9d742020-04-01 21:11:25 -0700593 if (*out_regions_needed == 0)
594 *out_regions_needed = 1;
Mike Kravetz5e911372015-09-08 15:01:28 -0700595
Mina Almasry0db9d742020-04-01 21:11:25 -0700596 if (allocate_file_region_entries(resv, *out_regions_needed))
597 return -ENOMEM;
Mike Kravetz5e911372015-09-08 15:01:28 -0700598
Mina Almasry0db9d742020-04-01 21:11:25 -0700599 resv->adds_in_progress += *out_regions_needed;
Andy Whitcroft96822902008-07-23 21:27:29 -0700600
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700601 spin_unlock(&resv->lock);
Andy Whitcroft96822902008-07-23 21:27:29 -0700602 return chg;
603}
604
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700605/*
Mike Kravetz5e911372015-09-08 15:01:28 -0700606 * Abort the in progress add operation. The adds_in_progress field
607 * of the resv_map keeps track of the operations in progress between
608 * calls to region_chg and region_add. Operations are sometimes
609 * aborted after the call to region_chg. In such cases, region_abort
Mina Almasry0db9d742020-04-01 21:11:25 -0700610 * is called to decrement the adds_in_progress counter. regions_needed
611 * is the value returned by the region_chg call, it is used to decrement
612 * the adds_in_progress counter.
Mike Kravetz5e911372015-09-08 15:01:28 -0700613 *
614 * NOTE: The range arguments [f, t) are not needed or used in this
615 * routine. They are kept to make reading the calling code easier as
616 * arguments will match the associated region_chg call.
617 */
Mina Almasry0db9d742020-04-01 21:11:25 -0700618static void region_abort(struct resv_map *resv, long f, long t,
619 long regions_needed)
Mike Kravetz5e911372015-09-08 15:01:28 -0700620{
621 spin_lock(&resv->lock);
622 VM_BUG_ON(!resv->region_cache_count);
Mina Almasry0db9d742020-04-01 21:11:25 -0700623 resv->adds_in_progress -= regions_needed;
Mike Kravetz5e911372015-09-08 15:01:28 -0700624 spin_unlock(&resv->lock);
625}
626
627/*
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700628 * Delete the specified range [f, t) from the reserve map. If the
629 * t parameter is LONG_MAX, this indicates that ALL regions after f
630 * should be deleted. Locate the regions which intersect [f, t)
631 * and either trim, delete or split the existing regions.
632 *
633 * Returns the number of huge pages deleted from the reserve map.
634 * In the normal case, the return value is zero or more. In the
635 * case where a region must be split, a new region descriptor must
636 * be allocated. If the allocation fails, -ENOMEM will be returned.
637 * NOTE: If the parameter t == LONG_MAX, then we will never split
638 * a region and possibly return -ENOMEM. Callers specifying
639 * t == LONG_MAX do not need to check for -ENOMEM error.
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700640 */
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700641static long region_del(struct resv_map *resv, long f, long t)
Andy Whitcroft96822902008-07-23 21:27:29 -0700642{
Joonsoo Kim1406ec92014-04-03 14:47:26 -0700643 struct list_head *head = &resv->regions;
Andy Whitcroft96822902008-07-23 21:27:29 -0700644 struct file_region *rg, *trg;
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700645 struct file_region *nrg = NULL;
646 long del = 0;
Andy Whitcroft96822902008-07-23 21:27:29 -0700647
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700648retry:
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700649 spin_lock(&resv->lock);
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700650 list_for_each_entry_safe(rg, trg, head, link) {
Mike Kravetzdbe409e2015-12-11 13:40:52 -0800651 /*
652 * Skip regions before the range to be deleted. file_region
653 * ranges are normally of the form [from, to). However, there
654 * may be a "placeholder" entry in the map which is of the form
655 * (from, to) with from == to. Check for placeholder entries
656 * at the beginning of the range to be deleted.
657 */
658 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700659 continue;
Mike Kravetzdbe409e2015-12-11 13:40:52 -0800660
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700661 if (rg->from >= t)
Andy Whitcroft96822902008-07-23 21:27:29 -0700662 break;
Andy Whitcroft96822902008-07-23 21:27:29 -0700663
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700664 if (f > rg->from && t < rg->to) { /* Must split region */
665 /*
666 * Check for an entry in the cache before dropping
667 * lock and attempting allocation.
668 */
669 if (!nrg &&
670 resv->region_cache_count > resv->adds_in_progress) {
671 nrg = list_first_entry(&resv->region_cache,
672 struct file_region,
673 link);
674 list_del(&nrg->link);
675 resv->region_cache_count--;
676 }
677
678 if (!nrg) {
679 spin_unlock(&resv->lock);
680 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
681 if (!nrg)
682 return -ENOMEM;
683 goto retry;
684 }
685
686 del += t - f;
Mike Kravetz79aa9252020-11-01 17:07:27 -0800687 hugetlb_cgroup_uncharge_file_region(
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700688 resv, rg, t - f, false);
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700689
690 /* New entry for end of split region */
691 nrg->from = t;
692 nrg->to = rg->to;
Mina Almasry075a61d2020-04-01 21:11:28 -0700693
694 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
695
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700696 INIT_LIST_HEAD(&nrg->link);
697
698 /* Original entry is trimmed */
699 rg->to = f;
700
701 list_add(&nrg->link, &rg->link);
702 nrg = NULL;
703 break;
704 }
705
706 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
707 del += rg->to - rg->from;
Mina Almasry075a61d2020-04-01 21:11:28 -0700708 hugetlb_cgroup_uncharge_file_region(resv, rg,
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700709 rg->to - rg->from, true);
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700710 list_del(&rg->link);
711 kfree(rg);
712 continue;
713 }
714
715 if (f <= rg->from) { /* Trim beginning of region */
Mina Almasry075a61d2020-04-01 21:11:28 -0700716 hugetlb_cgroup_uncharge_file_region(resv, rg,
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700717 t - rg->from, false);
Mina Almasry075a61d2020-04-01 21:11:28 -0700718
Mike Kravetz79aa9252020-11-01 17:07:27 -0800719 del += t - rg->from;
720 rg->from = t;
721 } else { /* Trim end of region */
Mina Almasry075a61d2020-04-01 21:11:28 -0700722 hugetlb_cgroup_uncharge_file_region(resv, rg,
Miaohe Linfe03ccc2021-03-24 21:37:17 -0700723 rg->to - f, false);
Mike Kravetz79aa9252020-11-01 17:07:27 -0800724
725 del += rg->to - f;
726 rg->to = f;
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700727 }
Andy Whitcroft96822902008-07-23 21:27:29 -0700728 }
729
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700730 spin_unlock(&resv->lock);
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700731 kfree(nrg);
732 return del;
Andy Whitcroft96822902008-07-23 21:27:29 -0700733}
734
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700735/*
Mike Kravetzb5cec282015-09-08 15:01:41 -0700736 * A rare out of memory error was encountered which prevented removal of
737 * the reserve map region for a page. The huge page itself was free'ed
738 * and removed from the page cache. This routine will adjust the subpool
739 * usage count, and the global reserve count if needed. By incrementing
740 * these counts, the reserve map entry which could not be deleted will
741 * appear as a "reserved" entry instead of simply dangling with incorrect
742 * counts.
743 */
zhong jiang72e29362016-10-07 17:02:01 -0700744void hugetlb_fix_reserve_counts(struct inode *inode)
Mike Kravetzb5cec282015-09-08 15:01:41 -0700745{
746 struct hugepage_subpool *spool = subpool_inode(inode);
747 long rsv_adjust;
Miaohe Lin9639a752021-05-04 18:34:38 -0700748 bool reserved = false;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700749
750 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
Miaohe Lin9639a752021-05-04 18:34:38 -0700751 if (rsv_adjust > 0) {
Mike Kravetzb5cec282015-09-08 15:01:41 -0700752 struct hstate *h = hstate_inode(inode);
753
Miaohe Lin9639a752021-05-04 18:34:38 -0700754 if (!hugetlb_acct_memory(h, 1))
755 reserved = true;
756 } else if (!rsv_adjust) {
757 reserved = true;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700758 }
Miaohe Lin9639a752021-05-04 18:34:38 -0700759
760 if (!reserved)
761 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
Mike Kravetzb5cec282015-09-08 15:01:41 -0700762}
763
764/*
Mike Kravetz1dd308a2015-06-24 16:57:52 -0700765 * Count and return the number of huge pages in the reserve map
766 * that intersect with the range [f, t).
767 */
Joonsoo Kim1406ec92014-04-03 14:47:26 -0700768static long region_count(struct resv_map *resv, long f, long t)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700769{
Joonsoo Kim1406ec92014-04-03 14:47:26 -0700770 struct list_head *head = &resv->regions;
Andy Whitcroft84afd992008-07-23 21:27:32 -0700771 struct file_region *rg;
772 long chg = 0;
773
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700774 spin_lock(&resv->lock);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700775 /* Locate each segment we overlap with, and count that overlap. */
776 list_for_each_entry(rg, head, link) {
Wang Sheng-Huif2135a42012-05-29 15:06:17 -0700777 long seg_from;
778 long seg_to;
Andy Whitcroft84afd992008-07-23 21:27:32 -0700779
780 if (rg->to <= f)
781 continue;
782 if (rg->from >= t)
783 break;
784
785 seg_from = max(rg->from, f);
786 seg_to = min(rg->to, t);
787
788 chg += seg_to - seg_from;
789 }
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700790 spin_unlock(&resv->lock);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700791
792 return chg;
793}
794
Andy Whitcroft96822902008-07-23 21:27:29 -0700795/*
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700796 * Convert the address within this vma to the page offset within
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700797 * the mapping, in pagecache page units; huge pages here.
798 */
Andi Kleena5516432008-07-23 21:27:41 -0700799static pgoff_t vma_hugecache_offset(struct hstate *h,
800 struct vm_area_struct *vma, unsigned long address)
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700801{
Andi Kleena5516432008-07-23 21:27:41 -0700802 return ((address - vma->vm_start) >> huge_page_shift(h)) +
803 (vma->vm_pgoff >> huge_page_order(h));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700804}
805
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900806pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
807 unsigned long address)
808{
809 return vma_hugecache_offset(hstate_vma(vma), vma, address);
810}
Dan Williamsdee41072016-05-14 12:20:44 -0700811EXPORT_SYMBOL_GPL(linear_hugepage_index);
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900812
Andy Whitcroft84afd992008-07-23 21:27:32 -0700813/*
Mel Gorman08fba692009-01-06 14:38:53 -0800814 * Return the size of the pages allocated when backing a VMA. In the majority
815 * cases this will be same size as used by the page table entries.
816 */
817unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
818{
Dan Williams05ea8862018-04-05 16:24:25 -0700819 if (vma->vm_ops && vma->vm_ops->pagesize)
820 return vma->vm_ops->pagesize(vma);
821 return PAGE_SIZE;
Mel Gorman08fba692009-01-06 14:38:53 -0800822}
Joerg Roedelf340ca02009-06-19 15:16:22 +0200823EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
Mel Gorman08fba692009-01-06 14:38:53 -0800824
825/*
Mel Gorman33402892009-01-06 14:38:54 -0800826 * Return the page size being used by the MMU to back a VMA. In the majority
827 * of cases, the page size used by the kernel matches the MMU size. On
Dan Williams09135cc2018-04-05 16:24:21 -0700828 * architectures where it differs, an architecture-specific 'strong'
829 * version of this symbol is required.
Mel Gorman33402892009-01-06 14:38:54 -0800830 */
Dan Williams09135cc2018-04-05 16:24:21 -0700831__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
Mel Gorman33402892009-01-06 14:38:54 -0800832{
833 return vma_kernel_pagesize(vma);
834}
Mel Gorman33402892009-01-06 14:38:54 -0800835
836/*
Andy Whitcroft84afd992008-07-23 21:27:32 -0700837 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
838 * bits of the reservation map pointer, which are always clear due to
839 * alignment.
840 */
841#define HPAGE_RESV_OWNER (1UL << 0)
842#define HPAGE_RESV_UNMAPPED (1UL << 1)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700843#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700844
Mel Gormana1e78772008-07-23 21:27:23 -0700845/*
846 * These helpers are used to track how many pages are reserved for
847 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
848 * is guaranteed to have their future faults succeed.
849 *
850 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
851 * the reserve counters are updated with the hugetlb_lock held. It is safe
852 * to reset the VMA at fork() time as it is not in use yet and there is no
853 * chance of the global counters getting corrupted as a result of the values.
Andy Whitcroft84afd992008-07-23 21:27:32 -0700854 *
855 * The private mapping reservation is represented in a subtly different
856 * manner to a shared mapping. A shared mapping has a region map associated
857 * with the underlying file, this region map represents the backing file
858 * pages which have ever had a reservation assigned which this persists even
859 * after the page is instantiated. A private mapping has a region map
860 * associated with the original mmap which is attached to all VMAs which
861 * reference it, this region map represents those offsets which have consumed
862 * reservation ie. where pages have been instantiated.
Mel Gormana1e78772008-07-23 21:27:23 -0700863 */
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700864static unsigned long get_vma_private_data(struct vm_area_struct *vma)
865{
866 return (unsigned long)vma->vm_private_data;
867}
868
869static void set_vma_private_data(struct vm_area_struct *vma,
870 unsigned long value)
871{
872 vma->vm_private_data = (void *)value;
873}
874
Mina Almasrye9fe92a2020-04-01 21:11:21 -0700875static void
876resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
877 struct hugetlb_cgroup *h_cg,
878 struct hstate *h)
879{
880#ifdef CONFIG_CGROUP_HUGETLB
881 if (!h_cg || !h) {
882 resv_map->reservation_counter = NULL;
883 resv_map->pages_per_hpage = 0;
884 resv_map->css = NULL;
885 } else {
886 resv_map->reservation_counter =
887 &h_cg->rsvd_hugepage[hstate_index(h)];
888 resv_map->pages_per_hpage = pages_per_huge_page(h);
889 resv_map->css = &h_cg->css;
890 }
891#endif
892}
893
Joonsoo Kim9119a412014-04-03 14:47:25 -0700894struct resv_map *resv_map_alloc(void)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700895{
896 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
Mike Kravetz5e911372015-09-08 15:01:28 -0700897 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
898
899 if (!resv_map || !rg) {
900 kfree(resv_map);
901 kfree(rg);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700902 return NULL;
Mike Kravetz5e911372015-09-08 15:01:28 -0700903 }
Andy Whitcroft84afd992008-07-23 21:27:32 -0700904
905 kref_init(&resv_map->refs);
Davidlohr Bueso7b24d862014-04-03 14:47:27 -0700906 spin_lock_init(&resv_map->lock);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700907 INIT_LIST_HEAD(&resv_map->regions);
908
Mike Kravetz5e911372015-09-08 15:01:28 -0700909 resv_map->adds_in_progress = 0;
Mina Almasrye9fe92a2020-04-01 21:11:21 -0700910 /*
911 * Initialize these to 0. On shared mappings, 0's here indicate these
912 * fields don't do cgroup accounting. On private mappings, these will be
913 * re-initialized to the proper values, to indicate that hugetlb cgroup
914 * reservations are to be un-charged from here.
915 */
916 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
Mike Kravetz5e911372015-09-08 15:01:28 -0700917
918 INIT_LIST_HEAD(&resv_map->region_cache);
919 list_add(&rg->link, &resv_map->region_cache);
920 resv_map->region_cache_count = 1;
921
Andy Whitcroft84afd992008-07-23 21:27:32 -0700922 return resv_map;
923}
924
Joonsoo Kim9119a412014-04-03 14:47:25 -0700925void resv_map_release(struct kref *ref)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700926{
927 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
Mike Kravetz5e911372015-09-08 15:01:28 -0700928 struct list_head *head = &resv_map->region_cache;
929 struct file_region *rg, *trg;
Andy Whitcroft84afd992008-07-23 21:27:32 -0700930
931 /* Clear out any active regions before we release the map. */
Mike Kravetzfeba16e2015-09-08 15:01:31 -0700932 region_del(resv_map, 0, LONG_MAX);
Mike Kravetz5e911372015-09-08 15:01:28 -0700933
934 /* ... and any entries left in the cache */
935 list_for_each_entry_safe(rg, trg, head, link) {
936 list_del(&rg->link);
937 kfree(rg);
938 }
939
940 VM_BUG_ON(resv_map->adds_in_progress);
941
Andy Whitcroft84afd992008-07-23 21:27:32 -0700942 kfree(resv_map);
943}
944
Joonsoo Kim4e35f482014-04-03 14:47:30 -0700945static inline struct resv_map *inode_resv_map(struct inode *inode)
946{
Mike Kravetzf27a5132019-05-13 17:22:55 -0700947 /*
948 * At inode evict time, i_mapping may not point to the original
949 * address space within the inode. This original address space
950 * contains the pointer to the resv_map. So, always use the
951 * address space embedded within the inode.
952 * The VERY common case is inode->mapping == &inode->i_data but,
953 * this may not be true for device special inodes.
954 */
955 return (struct resv_map *)(&inode->i_data)->private_data;
Joonsoo Kim4e35f482014-04-03 14:47:30 -0700956}
957
Andy Whitcroft84afd992008-07-23 21:27:32 -0700958static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700959{
Sasha Levin81d1b092014-10-09 15:28:10 -0700960 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
Joonsoo Kim4e35f482014-04-03 14:47:30 -0700961 if (vma->vm_flags & VM_MAYSHARE) {
962 struct address_space *mapping = vma->vm_file->f_mapping;
963 struct inode *inode = mapping->host;
964
965 return inode_resv_map(inode);
966
967 } else {
Andy Whitcroft84afd992008-07-23 21:27:32 -0700968 return (struct resv_map *)(get_vma_private_data(vma) &
969 ~HPAGE_RESV_MASK);
Joonsoo Kim4e35f482014-04-03 14:47:30 -0700970 }
Mel Gormana1e78772008-07-23 21:27:23 -0700971}
972
Andy Whitcroft84afd992008-07-23 21:27:32 -0700973static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
Mel Gormana1e78772008-07-23 21:27:23 -0700974{
Sasha Levin81d1b092014-10-09 15:28:10 -0700975 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
976 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
Mel Gormana1e78772008-07-23 21:27:23 -0700977
Andy Whitcroft84afd992008-07-23 21:27:32 -0700978 set_vma_private_data(vma, (get_vma_private_data(vma) &
979 HPAGE_RESV_MASK) | (unsigned long)map);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700980}
981
982static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
983{
Sasha Levin81d1b092014-10-09 15:28:10 -0700984 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
985 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700986
987 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700988}
989
990static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
991{
Sasha Levin81d1b092014-10-09 15:28:10 -0700992 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700993
994 return (get_vma_private_data(vma) & flag) != 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700995}
996
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700997/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
Mel Gormana1e78772008-07-23 21:27:23 -0700998void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
999{
Sasha Levin81d1b092014-10-09 15:28:10 -07001000 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
Mel Gormanf83a2752009-05-28 14:34:40 -07001001 if (!(vma->vm_flags & VM_MAYSHARE))
Mel Gormana1e78772008-07-23 21:27:23 -07001002 vma->vm_private_data = (void *)0;
1003}
1004
1005/* Returns true if the VMA has associated reserve pages */
Nicholas Krause559ec2f2015-09-04 15:48:27 -07001006static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
Mel Gormana1e78772008-07-23 21:27:23 -07001007{
Joonsoo Kimaf0ed732013-09-11 14:21:18 -07001008 if (vma->vm_flags & VM_NORESERVE) {
1009 /*
1010 * This address is already reserved by other process(chg == 0),
1011 * so, we should decrement reserved count. Without decrementing,
1012 * reserve count remains after releasing inode, because this
1013 * allocated page will go into page cache and is regarded as
1014 * coming from reserved pool in releasing step. Currently, we
1015 * don't have any other solution to deal with this situation
1016 * properly, so add work-around here.
1017 */
1018 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
Nicholas Krause559ec2f2015-09-04 15:48:27 -07001019 return true;
Joonsoo Kimaf0ed732013-09-11 14:21:18 -07001020 else
Nicholas Krause559ec2f2015-09-04 15:48:27 -07001021 return false;
Joonsoo Kimaf0ed732013-09-11 14:21:18 -07001022 }
Joonsoo Kima63884e2013-09-11 14:21:07 -07001023
1024 /* Shared mappings always use reserves */
Mike Kravetz1fb1b0e2015-09-08 15:01:44 -07001025 if (vma->vm_flags & VM_MAYSHARE) {
1026 /*
1027 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1028 * be a region map for all pages. The only situation where
1029 * there is no region map is if a hole was punched via
Ethon Paul7c8de352020-06-04 16:49:07 -07001030 * fallocate. In this case, there really are no reserves to
Mike Kravetz1fb1b0e2015-09-08 15:01:44 -07001031 * use. This situation is indicated if chg != 0.
1032 */
1033 if (chg)
1034 return false;
1035 else
1036 return true;
1037 }
Joonsoo Kima63884e2013-09-11 14:21:07 -07001038
1039 /*
1040 * Only the process that called mmap() has reserves for
1041 * private mappings.
1042 */
Mike Kravetz67961f92016-06-08 15:33:42 -07001043 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1044 /*
1045 * Like the shared case above, a hole punch or truncate
1046 * could have been performed on the private mapping.
1047 * Examine the value of chg to determine if reserves
1048 * actually exist or were previously consumed.
1049 * Very Subtle - The value of chg comes from a previous
1050 * call to vma_needs_reserves(). The reserve map for
1051 * private mappings has different (opposite) semantics
1052 * than that of shared mappings. vma_needs_reserves()
1053 * has already taken this difference in semantics into
1054 * account. Therefore, the meaning of chg is the same
1055 * as in the shared case above. Code could easily be
1056 * combined, but keeping it separate draws attention to
1057 * subtle differences.
1058 */
1059 if (chg)
1060 return false;
1061 else
1062 return true;
1063 }
Joonsoo Kima63884e2013-09-11 14:21:07 -07001064
Nicholas Krause559ec2f2015-09-04 15:48:27 -07001065 return false;
Mel Gormana1e78772008-07-23 21:27:23 -07001066}
1067
Andi Kleena5516432008-07-23 21:27:41 -07001068static void enqueue_huge_page(struct hstate *h, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
1070 int nid = page_to_nid(page);
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -07001071 list_move(&page->lru, &h->hugepage_freelists[nid]);
Andi Kleena5516432008-07-23 21:27:41 -07001072 h->free_huge_pages++;
1073 h->free_huge_pages_node[nid]++;
Muchun Songe334b1f2021-02-04 18:32:06 -08001074 SetPageHugeFreed(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075}
1076
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001077static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
Naoya Horiguchibf50bab2010-09-08 10:19:33 +09001078{
1079 struct page *page;
Joonsoo Kimbbe88752020-08-11 18:37:38 -07001080 bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +09001081
Joonsoo Kimbbe88752020-08-11 18:37:38 -07001082 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1083 if (nocma && is_migrate_cma_page(page))
1084 continue;
1085
Wei Yang6664bfc2020-10-13 16:56:39 -07001086 if (PageHWPoison(page))
1087 continue;
1088
1089 list_move(&page->lru, &h->hugepage_activelist);
1090 set_page_refcounted(page);
Muchun Songe334b1f2021-02-04 18:32:06 -08001091 ClearPageHugeFreed(page);
Wei Yang6664bfc2020-10-13 16:56:39 -07001092 h->free_huge_pages--;
1093 h->free_huge_pages_node[nid]--;
1094 return page;
Joonsoo Kimbbe88752020-08-11 18:37:38 -07001095 }
1096
Wei Yang6664bfc2020-10-13 16:56:39 -07001097 return NULL;
Naoya Horiguchibf50bab2010-09-08 10:19:33 +09001098}
1099
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001100static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1101 nodemask_t *nmask)
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001102{
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001103 unsigned int cpuset_mems_cookie;
1104 struct zonelist *zonelist;
1105 struct zone *zone;
1106 struct zoneref *z;
Anshuman Khandual98fa15f2019-03-05 15:42:58 -08001107 int node = NUMA_NO_NODE;
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001108
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001109 zonelist = node_zonelist(nid, gfp_mask);
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001110
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001111retry_cpuset:
1112 cpuset_mems_cookie = read_mems_allowed_begin();
1113 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1114 struct page *page;
1115
1116 if (!cpuset_zone_allowed(zone, gfp_mask))
1117 continue;
1118 /*
1119 * no need to ask again on the same node. Pool is node rather than
1120 * zone aware
1121 */
1122 if (zone_to_nid(zone) == node)
1123 continue;
1124 node = zone_to_nid(zone);
1125
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001126 page = dequeue_huge_page_node_exact(h, node);
1127 if (page)
1128 return page;
1129 }
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001130 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1131 goto retry_cpuset;
1132
Anshuman Khandual94310cb2017-07-06 15:38:38 -07001133 return NULL;
1134}
1135
Andi Kleena5516432008-07-23 21:27:41 -07001136static struct page *dequeue_huge_page_vma(struct hstate *h,
1137 struct vm_area_struct *vma,
Joonsoo Kimaf0ed732013-09-11 14:21:18 -07001138 unsigned long address, int avoid_reserve,
1139 long chg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001141 struct page *page;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001142 struct mempolicy *mpol;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001143 gfp_t gfp_mask;
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001144 nodemask_t *nodemask;
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001145 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Mel Gormana1e78772008-07-23 21:27:23 -07001147 /*
1148 * A child process with MAP_PRIVATE mappings created by their parent
1149 * have no page reserves. This check ensures that reservations are
1150 * not "stolen". The child may still get SIGKILLed
1151 */
Joonsoo Kimaf0ed732013-09-11 14:21:18 -07001152 if (!vma_has_reserves(vma, chg) &&
Andi Kleena5516432008-07-23 21:27:41 -07001153 h->free_huge_pages - h->resv_huge_pages == 0)
Miao Xiec0ff7452010-05-24 14:32:08 -07001154 goto err;
Mel Gormana1e78772008-07-23 21:27:23 -07001155
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001156 /* If reserves cannot be used, ensure enough pages are in the pool */
Andi Kleena5516432008-07-23 21:27:41 -07001157 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
Justin P. Mattock6eab04a2011-04-08 19:49:08 -07001158 goto err;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001159
Vlastimil Babka04ec6262017-07-06 15:40:03 -07001160 gfp_mask = htlb_alloc_mask(h);
1161 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001162 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1163 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1164 SetPagePrivate(page);
1165 h->resv_huge_pages--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 }
Mel Gormancc9a6c82012-03-21 16:34:11 -07001167
1168 mpol_cond_put(mpol);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001169 return page;
1170
Miao Xiec0ff7452010-05-24 14:32:08 -07001171err:
Mel Gormancc9a6c82012-03-21 16:34:11 -07001172 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173}
1174
Luiz Capitulino1cac6f22014-06-04 16:07:11 -07001175/*
1176 * common helper functions for hstate_next_node_to_{alloc|free}.
1177 * We may have allocated or freed a huge page based on a different
1178 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1179 * be outside of *nodes_allowed. Ensure that we use an allowed
1180 * node for alloc or free.
1181 */
1182static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1183{
Andrew Morton0edaf862016-05-19 17:10:58 -07001184 nid = next_node_in(nid, *nodes_allowed);
Luiz Capitulino1cac6f22014-06-04 16:07:11 -07001185 VM_BUG_ON(nid >= MAX_NUMNODES);
1186
1187 return nid;
1188}
1189
1190static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1191{
1192 if (!node_isset(nid, *nodes_allowed))
1193 nid = next_node_allowed(nid, nodes_allowed);
1194 return nid;
1195}
1196
1197/*
1198 * returns the previously saved node ["this node"] from which to
1199 * allocate a persistent huge page for the pool and advance the
1200 * next node from which to allocate, handling wrap at end of node
1201 * mask.
1202 */
1203static int hstate_next_node_to_alloc(struct hstate *h,
1204 nodemask_t *nodes_allowed)
1205{
1206 int nid;
1207
1208 VM_BUG_ON(!nodes_allowed);
1209
1210 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1211 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1212
1213 return nid;
1214}
1215
1216/*
1217 * helper for free_pool_huge_page() - return the previously saved
1218 * node ["this node"] from which to free a huge page. Advance the
1219 * next node id whether or not we find a free huge page to free so
1220 * that the next attempt to free addresses the next node.
1221 */
1222static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1223{
1224 int nid;
1225
1226 VM_BUG_ON(!nodes_allowed);
1227
1228 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1229 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1230
1231 return nid;
1232}
1233
1234#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1235 for (nr_nodes = nodes_weight(*mask); \
1236 nr_nodes > 0 && \
1237 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1238 nr_nodes--)
1239
1240#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1241 for (nr_nodes = nodes_weight(*mask); \
1242 nr_nodes > 0 && \
1243 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1244 nr_nodes--)
1245
Aneesh Kumar K.Ve1073d12017-07-06 15:39:17 -07001246#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001247static void destroy_compound_gigantic_page(struct page *page,
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001248 unsigned int order)
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001249{
1250 int i;
1251 int nr_pages = 1 << order;
1252 struct page *p = page + 1;
1253
Gerald Schaeferc8cc7082016-06-24 14:50:04 -07001254 atomic_set(compound_mapcount_ptr(page), 0);
John Hubbard47e29d32020-04-01 21:05:33 -07001255 if (hpage_pincount_available(page))
1256 atomic_set(compound_pincount_ptr(page), 0);
1257
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001258 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001259 clear_compound_head(p);
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001260 set_page_refcounted(p);
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001261 }
1262
1263 set_compound_order(page, 0);
Gerald Schaeferba9c1202020-12-11 13:36:53 -08001264 page[1].compound_nr = 0;
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001265 __ClearPageHead(page);
1266}
1267
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001268static void free_gigantic_page(struct page *page, unsigned int order)
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001269{
Roman Gushchincf11e852020-04-10 14:32:45 -07001270 /*
1271 * If the page isn't allocated using the cma allocator,
1272 * cma_release() returns false.
1273 */
Barry Songdbda8fe2020-07-23 21:15:30 -07001274#ifdef CONFIG_CMA
1275 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
Roman Gushchincf11e852020-04-10 14:32:45 -07001276 return;
Barry Songdbda8fe2020-07-23 21:15:30 -07001277#endif
Roman Gushchincf11e852020-04-10 14:32:45 -07001278
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001279 free_contig_range(page_to_pfn(page), 1 << order);
1280}
1281
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07001282#ifdef CONFIG_CONTIG_ALLOC
Michal Hockod9cc948f2018-01-31 16:20:44 -08001283static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1284 int nid, nodemask_t *nodemask)
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001285{
Anshuman Khandual5e27a2d2019-11-30 17:55:06 -08001286 unsigned long nr_pages = 1UL << huge_page_order(h);
Li Xinhai953f0642020-09-04 16:36:10 -07001287 if (nid == NUMA_NO_NODE)
1288 nid = numa_mem_id();
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001289
Barry Songdbda8fe2020-07-23 21:15:30 -07001290#ifdef CONFIG_CMA
1291 {
Roman Gushchincf11e852020-04-10 14:32:45 -07001292 struct page *page;
1293 int node;
1294
Li Xinhai953f0642020-09-04 16:36:10 -07001295 if (hugetlb_cma[nid]) {
1296 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1297 huge_page_order(h), true);
Roman Gushchincf11e852020-04-10 14:32:45 -07001298 if (page)
1299 return page;
1300 }
Li Xinhai953f0642020-09-04 16:36:10 -07001301
1302 if (!(gfp_mask & __GFP_THISNODE)) {
1303 for_each_node_mask(node, *nodemask) {
1304 if (node == nid || !hugetlb_cma[node])
1305 continue;
1306
1307 page = cma_alloc(hugetlb_cma[node], nr_pages,
1308 huge_page_order(h), true);
1309 if (page)
1310 return page;
1311 }
1312 }
Roman Gushchincf11e852020-04-10 14:32:45 -07001313 }
Barry Songdbda8fe2020-07-23 21:15:30 -07001314#endif
Roman Gushchincf11e852020-04-10 14:32:45 -07001315
Anshuman Khandual5e27a2d2019-11-30 17:55:06 -08001316 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001317}
1318
1319static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001320static void prep_compound_gigantic_page(struct page *page, unsigned int order);
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07001321#else /* !CONFIG_CONTIG_ALLOC */
1322static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1323 int nid, nodemask_t *nodemask)
1324{
1325 return NULL;
1326}
1327#endif /* CONFIG_CONTIG_ALLOC */
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001328
Aneesh Kumar K.Ve1073d12017-07-06 15:39:17 -07001329#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
Michal Hockod9cc948f2018-01-31 16:20:44 -08001330static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07001331 int nid, nodemask_t *nodemask)
1332{
1333 return NULL;
1334}
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001335static inline void free_gigantic_page(struct page *page, unsigned int order) { }
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001336static inline void destroy_compound_gigantic_page(struct page *page,
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001337 unsigned int order) { }
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001338#endif
1339
Andi Kleena5516432008-07-23 21:27:41 -07001340static void update_and_free_page(struct hstate *h, struct page *page)
Adam Litke6af2acb2007-10-16 01:26:16 -07001341{
1342 int i;
Mike Kravetz65f6dc32021-02-24 12:07:50 -08001343 struct page *subpage = page;
Andi Kleena5516432008-07-23 21:27:41 -07001344
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07001345 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001346 return;
Andy Whitcroft18229df2008-11-06 12:53:27 -08001347
Andi Kleena5516432008-07-23 21:27:41 -07001348 h->nr_huge_pages--;
1349 h->nr_huge_pages_node[page_to_nid(page)]--;
Mike Kravetz65f6dc32021-02-24 12:07:50 -08001350 for (i = 0; i < pages_per_huge_page(h);
1351 i++, subpage = mem_map_next(subpage, page, i)) {
1352 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
Chris Forbes32f84522011-07-25 17:12:14 -07001353 1 << PG_referenced | 1 << PG_dirty |
Luiz Capitulinoa7407a22014-06-04 16:07:09 -07001354 1 << PG_active | 1 << PG_private |
1355 1 << PG_writeback);
Adam Litke6af2acb2007-10-16 01:26:16 -07001356 }
Sasha Levin309381fea2014-01-23 15:52:54 -08001357 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
Mina Almasry1adc4d42020-04-01 21:11:15 -07001358 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -08001359 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
Adam Litke6af2acb2007-10-16 01:26:16 -07001360 set_page_refcounted(page);
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001361 if (hstate_is_gigantic(h)) {
Roman Gushchincf11e852020-04-10 14:32:45 -07001362 /*
1363 * Temporarily drop the hugetlb_lock, because
1364 * we might block in free_gigantic_page().
1365 */
1366 spin_unlock(&hugetlb_lock);
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001367 destroy_compound_gigantic_page(page, huge_page_order(h));
1368 free_gigantic_page(page, huge_page_order(h));
Roman Gushchincf11e852020-04-10 14:32:45 -07001369 spin_lock(&hugetlb_lock);
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001370 } else {
Luiz Capitulino944d9fe2014-06-04 16:07:13 -07001371 __free_pages(page, huge_page_order(h));
1372 }
Adam Litke6af2acb2007-10-16 01:26:16 -07001373}
1374
Andi Kleene5ff2152008-07-23 21:27:42 -07001375struct hstate *size_to_hstate(unsigned long size)
1376{
1377 struct hstate *h;
1378
1379 for_each_hstate(h) {
1380 if (huge_page_size(h) == size)
1381 return h;
1382 }
1383 return NULL;
1384}
1385
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001386/*
1387 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1388 * to hstate->hugepage_activelist.)
1389 *
1390 * This function can be called for tail pages, but never returns true for them.
1391 */
1392bool page_huge_active(struct page *page)
1393{
Muchun Songeca84eb2021-02-04 18:32:13 -08001394 return PageHeadHuge(page) && PagePrivate(&page[1]);
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001395}
1396
1397/* never called for tail page */
Muchun Songafe6c312021-02-04 18:32:03 -08001398void set_page_huge_active(struct page *page)
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001399{
1400 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1401 SetPagePrivate(&page[1]);
1402}
1403
1404static void clear_page_huge_active(struct page *page)
1405{
1406 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1407 ClearPagePrivate(&page[1]);
1408}
1409
Michal Hockoab5ac902018-01-31 16:20:48 -08001410/*
1411 * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1412 * code
1413 */
1414static inline bool PageHugeTemporary(struct page *page)
1415{
1416 if (!PageHuge(page))
1417 return false;
1418
1419 return (unsigned long)page[2].mapping == -1U;
1420}
1421
1422static inline void SetPageHugeTemporary(struct page *page)
1423{
1424 page[2].mapping = (void *)-1U;
1425}
1426
1427static inline void ClearPageHugeTemporary(struct page *page)
1428{
1429 page[2].mapping = NULL;
1430}
1431
Waiman Longc77c0a82020-01-04 13:00:15 -08001432static void __free_huge_page(struct page *page)
David Gibson27a85ef2006-03-22 00:08:56 -08001433{
Andi Kleena5516432008-07-23 21:27:41 -07001434 /*
1435 * Can't pass hstate in here because it is called from the
1436 * compound page destructor.
1437 */
Andi Kleene5ff2152008-07-23 21:27:42 -07001438 struct hstate *h = page_hstate(page);
Adam Litke7893d1d2007-10-16 01:26:18 -07001439 int nid = page_to_nid(page);
David Gibson90481622012-03-21 16:34:12 -07001440 struct hugepage_subpool *spool =
1441 (struct hugepage_subpool *)page_private(page);
Joonsoo Kim07443a82013-09-11 14:21:58 -07001442 bool restore_reserve;
David Gibson27a85ef2006-03-22 00:08:56 -08001443
Mike Kravetzb4330af2016-02-05 15:36:38 -08001444 VM_BUG_ON_PAGE(page_count(page), page);
1445 VM_BUG_ON_PAGE(page_mapcount(page), page);
Yongkai Wu8ace22b2018-12-14 14:17:10 -08001446
1447 set_page_private(page, 0);
1448 page->mapping = NULL;
Joonsoo Kim07443a82013-09-11 14:21:58 -07001449 restore_reserve = PagePrivate(page);
Joonsoo Kim16c794b2013-10-16 13:46:48 -07001450 ClearPagePrivate(page);
David Gibson27a85ef2006-03-22 00:08:56 -08001451
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07001452 /*
Mike Kravetz0919e1b2019-05-13 17:19:38 -07001453 * If PagePrivate() was set on page, page allocation consumed a
1454 * reservation. If the page was associated with a subpool, there
1455 * would have been a page reserved in the subpool before allocation
1456 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1457 * reservtion, do not call hugepage_subpool_put_pages() as this will
1458 * remove the reserved page from the subpool.
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07001459 */
Mike Kravetz0919e1b2019-05-13 17:19:38 -07001460 if (!restore_reserve) {
1461 /*
1462 * A return code of zero implies that the subpool will be
1463 * under its minimum size if the reservation is not restored
1464 * after page is free. Therefore, force restore_reserve
1465 * operation.
1466 */
1467 if (hugepage_subpool_put_pages(spool, 1) == 0)
1468 restore_reserve = true;
1469 }
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07001470
David Gibson27a85ef2006-03-22 00:08:56 -08001471 spin_lock(&hugetlb_lock);
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001472 clear_page_huge_active(page);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -07001473 hugetlb_cgroup_uncharge_page(hstate_index(h),
1474 pages_per_huge_page(h), page);
Mina Almasry08cf9fa2020-04-01 21:11:31 -07001475 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1476 pages_per_huge_page(h), page);
Joonsoo Kim07443a82013-09-11 14:21:58 -07001477 if (restore_reserve)
1478 h->resv_huge_pages++;
1479
Michal Hockoab5ac902018-01-31 16:20:48 -08001480 if (PageHugeTemporary(page)) {
1481 list_del(&page->lru);
1482 ClearPageHugeTemporary(page);
1483 update_and_free_page(h, page);
1484 } else if (h->surplus_huge_pages_node[nid]) {
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -07001485 /* remove the page from active list */
1486 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -07001487 update_and_free_page(h, page);
1488 h->surplus_huge_pages--;
1489 h->surplus_huge_pages_node[nid]--;
Adam Litke7893d1d2007-10-16 01:26:18 -07001490 } else {
Will Deacon5d3a5512012-10-08 16:29:32 -07001491 arch_clear_hugepage_flags(page);
Andi Kleena5516432008-07-23 21:27:41 -07001492 enqueue_huge_page(h, page);
Adam Litke7893d1d2007-10-16 01:26:18 -07001493 }
David Gibson27a85ef2006-03-22 00:08:56 -08001494 spin_unlock(&hugetlb_lock);
1495}
1496
Waiman Longc77c0a82020-01-04 13:00:15 -08001497/*
1498 * As free_huge_page() can be called from a non-task context, we have
1499 * to defer the actual freeing in a workqueue to prevent potential
1500 * hugetlb_lock deadlock.
1501 *
1502 * free_hpage_workfn() locklessly retrieves the linked list of pages to
1503 * be freed and frees them one-by-one. As the page->mapping pointer is
1504 * going to be cleared in __free_huge_page() anyway, it is reused as the
1505 * llist_node structure of a lockless linked list of huge pages to be freed.
1506 */
1507static LLIST_HEAD(hpage_freelist);
1508
1509static void free_hpage_workfn(struct work_struct *work)
1510{
1511 struct llist_node *node;
1512 struct page *page;
1513
1514 node = llist_del_all(&hpage_freelist);
1515
1516 while (node) {
1517 page = container_of((struct address_space **)node,
1518 struct page, mapping);
1519 node = node->next;
1520 __free_huge_page(page);
1521 }
1522}
1523static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1524
1525void free_huge_page(struct page *page)
1526{
1527 /*
1528 * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
1529 */
1530 if (!in_task()) {
1531 /*
1532 * Only call schedule_work() if hpage_freelist is previously
1533 * empty. Otherwise, schedule_work() had been called but the
1534 * workfn hasn't retrieved the list yet.
1535 */
1536 if (llist_add((struct llist_node *)&page->mapping,
1537 &hpage_freelist))
1538 schedule_work(&free_hpage_work);
1539 return;
1540 }
1541
1542 __free_huge_page(page);
1543}
1544
Andi Kleena5516432008-07-23 21:27:41 -07001545static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
Andi Kleenb7ba30c2008-07-23 21:27:40 -07001546{
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -07001547 INIT_LIST_HEAD(&page->lru);
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -08001548 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
Aneesh Kumar K.V9dd540e2012-07-31 16:42:15 -07001549 set_hugetlb_cgroup(page, NULL);
Mina Almasry1adc4d42020-04-01 21:11:15 -07001550 set_hugetlb_cgroup_rsvd(page, NULL);
Wei Yang2f375112020-10-13 16:56:36 -07001551 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001552 h->nr_huge_pages++;
1553 h->nr_huge_pages_node[nid]++;
Muchun Songe334b1f2021-02-04 18:32:06 -08001554 ClearPageHugeFreed(page);
Andi Kleenb7ba30c2008-07-23 21:27:40 -07001555 spin_unlock(&hugetlb_lock);
Andi Kleenb7ba30c2008-07-23 21:27:40 -07001556}
1557
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001558static void prep_compound_gigantic_page(struct page *page, unsigned int order)
Wu Fengguang20a03072009-06-16 15:32:22 -07001559{
1560 int i;
1561 int nr_pages = 1 << order;
1562 struct page *p = page + 1;
1563
1564 /* we rely on prep_new_huge_page to set the destructor */
1565 set_compound_order(page, order);
Andrea Arcangelief5a22b2013-10-16 13:46:56 -07001566 __ClearPageReserved(page);
Kirill A. Shutemovde09d312016-01-15 16:51:42 -08001567 __SetPageHead(page);
Wu Fengguang20a03072009-06-16 15:32:22 -07001568 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
Andrea Arcangelief5a22b2013-10-16 13:46:56 -07001569 /*
1570 * For gigantic hugepages allocated through bootmem at
1571 * boot, it's safer to be consistent with the not-gigantic
1572 * hugepages and clear the PG_reserved bit from all tail pages
Ethon Paul7c8de352020-06-04 16:49:07 -07001573 * too. Otherwise drivers using get_user_pages() to access tail
Andrea Arcangelief5a22b2013-10-16 13:46:56 -07001574 * pages may get the reference counting wrong if they see
1575 * PG_reserved set on a tail page (despite the head page not
1576 * having PG_reserved set). Enforcing this consistency between
1577 * head and tail pages allows drivers to optimize away a check
1578 * on the head page when they need know if put_page() is needed
1579 * after get_user_pages().
1580 */
1581 __ClearPageReserved(p);
Youquan Song58a84aa2011-12-08 14:34:18 -08001582 set_page_count(p, 0);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001583 set_compound_head(p, page);
Wu Fengguang20a03072009-06-16 15:32:22 -07001584 }
Mike Kravetzb4330af2016-02-05 15:36:38 -08001585 atomic_set(compound_mapcount_ptr(page), -1);
John Hubbard47e29d32020-04-01 21:05:33 -07001586
1587 if (hpage_pincount_available(page))
1588 atomic_set(compound_pincount_ptr(page), 0);
Wu Fengguang20a03072009-06-16 15:32:22 -07001589}
1590
Andrew Morton77959122012-10-08 16:34:11 -07001591/*
1592 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1593 * transparent huge pages. See the PageTransHuge() documentation for more
1594 * details.
1595 */
Wu Fengguang20a03072009-06-16 15:32:22 -07001596int PageHuge(struct page *page)
1597{
Wu Fengguang20a03072009-06-16 15:32:22 -07001598 if (!PageCompound(page))
1599 return 0;
1600
1601 page = compound_head(page);
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -08001602 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
Wu Fengguang20a03072009-06-16 15:32:22 -07001603}
Naoya Horiguchi43131e12010-05-28 09:29:22 +09001604EXPORT_SYMBOL_GPL(PageHuge);
1605
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -08001606/*
1607 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1608 * normal or transparent huge pages.
1609 */
1610int PageHeadHuge(struct page *page_head)
1611{
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -08001612 if (!PageHead(page_head))
1613 return 0;
1614
Vlastimil Babkad4af73e2020-04-01 21:11:48 -07001615 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -08001616}
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -08001617
Mike Kravetzc0d03812020-04-01 21:11:05 -07001618/*
Mike Kravetzc0d03812020-04-01 21:11:05 -07001619 * Find and lock address space (mapping) in write mode.
1620 *
Mike Kravetz336bf302020-11-13 22:52:16 -08001621 * Upon entry, the page is locked which means that page_mapping() is
1622 * stable. Due to locking order, we can only trylock_write. If we can
1623 * not get the lock, simply return NULL to caller.
Mike Kravetzc0d03812020-04-01 21:11:05 -07001624 */
1625struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1626{
Mike Kravetz336bf302020-11-13 22:52:16 -08001627 struct address_space *mapping = page_mapping(hpage);
Mike Kravetzc0d03812020-04-01 21:11:05 -07001628
Mike Kravetzc0d03812020-04-01 21:11:05 -07001629 if (!mapping)
1630 return mapping;
1631
Mike Kravetzc0d03812020-04-01 21:11:05 -07001632 if (i_mmap_trylock_write(mapping))
1633 return mapping;
1634
Mike Kravetz336bf302020-11-13 22:52:16 -08001635 return NULL;
Mike Kravetzc0d03812020-04-01 21:11:05 -07001636}
1637
Hugh Dickins377a7962021-06-24 18:39:52 -07001638pgoff_t hugetlb_basepage_index(struct page *page)
Zhang Yi13d60f42013-06-25 21:19:31 +08001639{
1640 struct page *page_head = compound_head(page);
1641 pgoff_t index = page_index(page_head);
1642 unsigned long compound_idx;
1643
Zhang Yi13d60f42013-06-25 21:19:31 +08001644 if (compound_order(page_head) >= MAX_ORDER)
1645 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1646 else
1647 compound_idx = page - page_head;
1648
1649 return (index << compound_order(page_head)) + compound_idx;
1650}
1651
Michal Hocko0c397da2018-01-31 16:20:56 -08001652static struct page *alloc_buddy_huge_page(struct hstate *h,
Mike Kravetzf60858f2019-09-23 15:37:35 -07001653 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1654 nodemask_t *node_alloc_noretry)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655{
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001656 int order = huge_page_order(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 struct page *page;
Mike Kravetzf60858f2019-09-23 15:37:35 -07001658 bool alloc_try_hard = true;
Joe Jinf96efd52007-07-15 23:38:12 -07001659
Mike Kravetzf60858f2019-09-23 15:37:35 -07001660 /*
1661 * By default we always try hard to allocate the page with
1662 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
1663 * a loop (to adjust global huge page counts) and previous allocation
1664 * failed, do not continue to try hard on the same node. Use the
1665 * node_alloc_noretry bitmap to manage this state information.
1666 */
1667 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1668 alloc_try_hard = false;
1669 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1670 if (alloc_try_hard)
1671 gfp_mask |= __GFP_RETRY_MAYFAIL;
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001672 if (nid == NUMA_NO_NODE)
1673 nid = numa_mem_id();
1674 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1675 if (page)
1676 __count_vm_event(HTLB_BUDDY_PGALLOC);
1677 else
1678 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
Nishanth Aravamudan63b46132007-10-16 01:26:24 -07001679
Mike Kravetzf60858f2019-09-23 15:37:35 -07001680 /*
1681 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1682 * indicates an overall state change. Clear bit so that we resume
1683 * normal 'try hard' allocations.
1684 */
1685 if (node_alloc_noretry && page && !alloc_try_hard)
1686 node_clear(nid, *node_alloc_noretry);
1687
1688 /*
1689 * If we tried hard to get a page but failed, set bit so that
1690 * subsequent attempts will not try as hard until there is an
1691 * overall state change.
1692 */
1693 if (node_alloc_noretry && !page && alloc_try_hard)
1694 node_set(nid, *node_alloc_noretry);
1695
Nishanth Aravamudan63b46132007-10-16 01:26:24 -07001696 return page;
1697}
1698
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001699/*
Michal Hocko0c397da2018-01-31 16:20:56 -08001700 * Common helper to allocate a fresh hugetlb page. All specific allocators
1701 * should use this function to get new hugetlb pages
1702 */
1703static struct page *alloc_fresh_huge_page(struct hstate *h,
Mike Kravetzf60858f2019-09-23 15:37:35 -07001704 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1705 nodemask_t *node_alloc_noretry)
Michal Hocko0c397da2018-01-31 16:20:56 -08001706{
1707 struct page *page;
1708
1709 if (hstate_is_gigantic(h))
1710 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1711 else
1712 page = alloc_buddy_huge_page(h, gfp_mask,
Mike Kravetzf60858f2019-09-23 15:37:35 -07001713 nid, nmask, node_alloc_noretry);
Michal Hocko0c397da2018-01-31 16:20:56 -08001714 if (!page)
1715 return NULL;
1716
1717 if (hstate_is_gigantic(h))
1718 prep_compound_gigantic_page(page, huge_page_order(h));
1719 prep_new_huge_page(h, page, page_to_nid(page));
1720
1721 return page;
1722}
1723
1724/*
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001725 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1726 * manner.
1727 */
Mike Kravetzf60858f2019-09-23 15:37:35 -07001728static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1729 nodemask_t *node_alloc_noretry)
Joonsoo Kimb2261022013-09-11 14:21:00 -07001730{
1731 struct page *page;
1732 int nr_nodes, node;
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001733 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
Joonsoo Kimb2261022013-09-11 14:21:00 -07001734
1735 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
Mike Kravetzf60858f2019-09-23 15:37:35 -07001736 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1737 node_alloc_noretry);
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001738 if (page)
Joonsoo Kimb2261022013-09-11 14:21:00 -07001739 break;
Joonsoo Kimb2261022013-09-11 14:21:00 -07001740 }
1741
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001742 if (!page)
1743 return 0;
Joonsoo Kimb2261022013-09-11 14:21:00 -07001744
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08001745 put_page(page); /* free it into the hugepage allocator */
1746
1747 return 1;
Joonsoo Kimb2261022013-09-11 14:21:00 -07001748}
1749
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001750/*
1751 * Free huge page from pool from next node to free.
1752 * Attempt to keep persistent huge pages more or less
1753 * balanced over allowed nodes.
1754 * Called with hugetlb_lock locked.
1755 */
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001756static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1757 bool acct_surplus)
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001758{
Joonsoo Kimb2261022013-09-11 14:21:00 -07001759 int nr_nodes, node;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001760 int ret = 0;
1761
Joonsoo Kimb2261022013-09-11 14:21:00 -07001762 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
Lee Schermerhorn685f3452009-09-21 17:01:23 -07001763 /*
1764 * If we're returning unused surplus pages, only examine
1765 * nodes with surplus pages.
1766 */
Joonsoo Kimb2261022013-09-11 14:21:00 -07001767 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1768 !list_empty(&h->hugepage_freelists[node])) {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001769 struct page *page =
Joonsoo Kimb2261022013-09-11 14:21:00 -07001770 list_entry(h->hugepage_freelists[node].next,
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001771 struct page, lru);
1772 list_del(&page->lru);
1773 h->free_huge_pages--;
Joonsoo Kimb2261022013-09-11 14:21:00 -07001774 h->free_huge_pages_node[node]--;
Lee Schermerhorn685f3452009-09-21 17:01:23 -07001775 if (acct_surplus) {
1776 h->surplus_huge_pages--;
Joonsoo Kimb2261022013-09-11 14:21:00 -07001777 h->surplus_huge_pages_node[node]--;
Lee Schermerhorn685f3452009-09-21 17:01:23 -07001778 }
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001779 update_and_free_page(h, page);
1780 ret = 1;
Lee Schermerhorn9a76db02009-12-14 17:58:15 -08001781 break;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001782 }
Joonsoo Kimb2261022013-09-11 14:21:00 -07001783 }
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001784
1785 return ret;
1786}
1787
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001788/*
1789 * Dissolve a given free hugepage into free buddy pages. This function does
Naoya Horiguchifaf53de2019-06-28 12:06:56 -07001790 * nothing for in-use hugepages and non-hugepages.
1791 * This function returns values like below:
1792 *
1793 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1794 * (allocated or reserved.)
1795 * 0: successfully dissolved free hugepages or the page is not a
1796 * hugepage (considered as already dissolved)
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001797 */
Anshuman Khandualc3114a82017-07-10 15:47:41 -07001798int dissolve_free_huge_page(struct page *page)
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001799{
Naoya Horiguchi6bc9b562018-08-23 17:00:38 -07001800 int rc = -EBUSY;
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001801
Muchun Songe334b1f2021-02-04 18:32:06 -08001802retry:
Naoya Horiguchifaf53de2019-06-28 12:06:56 -07001803 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1804 if (!PageHuge(page))
1805 return 0;
1806
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001807 spin_lock(&hugetlb_lock);
Naoya Horiguchifaf53de2019-06-28 12:06:56 -07001808 if (!PageHuge(page)) {
1809 rc = 0;
1810 goto out;
1811 }
1812
1813 if (!page_count(page)) {
Gerald Schaefer2247bb32016-10-07 17:01:07 -07001814 struct page *head = compound_head(page);
1815 struct hstate *h = page_hstate(head);
1816 int nid = page_to_nid(head);
Naoya Horiguchi6bc9b562018-08-23 17:00:38 -07001817 if (h->free_huge_pages - h->resv_huge_pages == 0)
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001818 goto out;
Muchun Songe334b1f2021-02-04 18:32:06 -08001819
1820 /*
1821 * We should make sure that the page is already on the free list
1822 * when it is dissolved.
1823 */
1824 if (unlikely(!PageHugeFreed(head))) {
1825 spin_unlock(&hugetlb_lock);
1826 cond_resched();
1827
1828 /*
1829 * Theoretically, we should return -EBUSY when we
1830 * encounter this race. In fact, we have a chance
1831 * to successfully dissolve the page if we do a
1832 * retry. Because the race window is quite small.
1833 * If we seize this opportunity, it is an optimization
1834 * for increasing the success rate of dissolving page.
1835 */
1836 goto retry;
1837 }
1838
Anshuman Khandualc3114a82017-07-10 15:47:41 -07001839 /*
1840 * Move PageHWPoison flag from head page to the raw error page,
1841 * which makes any subpages rather than the error page reusable.
1842 */
1843 if (PageHWPoison(head) && page != head) {
1844 SetPageHWPoison(page);
1845 ClearPageHWPoison(head);
1846 }
Gerald Schaefer2247bb32016-10-07 17:01:07 -07001847 list_del(&head->lru);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001848 h->free_huge_pages--;
1849 h->free_huge_pages_node[nid]--;
zhong jiangc1470b32016-08-11 15:32:55 -07001850 h->max_huge_pages--;
Gerald Schaefer2247bb32016-10-07 17:01:07 -07001851 update_and_free_page(h, head);
Naoya Horiguchi6bc9b562018-08-23 17:00:38 -07001852 rc = 0;
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001853 }
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001854out:
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001855 spin_unlock(&hugetlb_lock);
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001856 return rc;
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001857}
1858
1859/*
1860 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1861 * make specified memory blocks removable from the system.
Gerald Schaefer2247bb32016-10-07 17:01:07 -07001862 * Note that this will dissolve a free gigantic hugepage completely, if any
1863 * part of it lies within the given range.
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001864 * Also note that if dissolve_free_huge_page() returns with an error, all
1865 * free hugepages that were dissolved before that error are lost.
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001866 */
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001867int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001868{
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001869 unsigned long pfn;
Gerald Schaefereb03aa02016-10-07 17:01:13 -07001870 struct page *page;
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001871 int rc = 0;
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001872
Li Zhongd0177632014-08-06 16:07:56 -07001873 if (!hugepages_supported())
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001874 return rc;
Li Zhongd0177632014-08-06 16:07:56 -07001875
Gerald Schaefereb03aa02016-10-07 17:01:13 -07001876 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1877 page = pfn_to_page(pfn);
Naoya Horiguchifaf53de2019-06-28 12:06:56 -07001878 rc = dissolve_free_huge_page(page);
1879 if (rc)
1880 break;
Gerald Schaefereb03aa02016-10-07 17:01:13 -07001881 }
Gerald Schaefer082d5b62016-10-07 17:01:10 -07001882
1883 return rc;
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07001884}
1885
Michal Hockoab5ac902018-01-31 16:20:48 -08001886/*
1887 * Allocates a fresh surplus page from the page allocator.
1888 */
Michal Hocko0c397da2018-01-31 16:20:56 -08001889static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
Michal Hockoaaf14e42017-07-10 15:49:08 -07001890 int nid, nodemask_t *nmask)
Adam Litke7893d1d2007-10-16 01:26:18 -07001891{
Michal Hocko9980d742018-01-31 16:20:52 -08001892 struct page *page = NULL;
Adam Litke7893d1d2007-10-16 01:26:18 -07001893
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07001894 if (hstate_is_gigantic(h))
Andi Kleenaa888a72008-07-23 21:27:47 -07001895 return NULL;
1896
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001897 spin_lock(&hugetlb_lock);
Michal Hocko9980d742018-01-31 16:20:52 -08001898 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1899 goto out_unlock;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001900 spin_unlock(&hugetlb_lock);
1901
Mike Kravetzf60858f2019-09-23 15:37:35 -07001902 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
Michal Hocko9980d742018-01-31 16:20:52 -08001903 if (!page)
Michal Hocko0c397da2018-01-31 16:20:56 -08001904 return NULL;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001905
1906 spin_lock(&hugetlb_lock);
Michal Hocko9980d742018-01-31 16:20:52 -08001907 /*
1908 * We could have raced with the pool size change.
1909 * Double check that and simply deallocate the new page
1910 * if we would end up overcommiting the surpluses. Abuse
1911 * temporary page to workaround the nasty free_huge_page
1912 * codeflow
1913 */
1914 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1915 SetPageHugeTemporary(page);
Kai Shen2bf753e2019-05-13 17:15:37 -07001916 spin_unlock(&hugetlb_lock);
Michal Hocko9980d742018-01-31 16:20:52 -08001917 put_page(page);
Kai Shen2bf753e2019-05-13 17:15:37 -07001918 return NULL;
Michal Hocko9980d742018-01-31 16:20:52 -08001919 } else {
Michal Hocko9980d742018-01-31 16:20:52 -08001920 h->surplus_huge_pages++;
Michal Hocko4704dea2018-03-09 15:50:55 -08001921 h->surplus_huge_pages_node[page_to_nid(page)]++;
Adam Litke7893d1d2007-10-16 01:26:18 -07001922 }
Michal Hocko9980d742018-01-31 16:20:52 -08001923
1924out_unlock:
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001925 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -07001926
1927 return page;
1928}
1929
Joonsoo Kimbbe88752020-08-11 18:37:38 -07001930static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
Aneesh Kumar K.V9a4e9f32019-03-05 15:47:44 -08001931 int nid, nodemask_t *nmask)
Michal Hockoab5ac902018-01-31 16:20:48 -08001932{
1933 struct page *page;
1934
1935 if (hstate_is_gigantic(h))
1936 return NULL;
1937
Mike Kravetzf60858f2019-09-23 15:37:35 -07001938 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
Michal Hockoab5ac902018-01-31 16:20:48 -08001939 if (!page)
1940 return NULL;
1941
1942 /*
1943 * We do not account these pages as surplus because they are only
1944 * temporary and will be released properly on the last reference
1945 */
Michal Hockoab5ac902018-01-31 16:20:48 -08001946 SetPageHugeTemporary(page);
1947
1948 return page;
1949}
1950
Adam Litkee4e574b2007-10-16 01:26:19 -07001951/*
Dave Hansen099730d2015-11-05 18:50:17 -08001952 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1953 */
Dave Hansene0ec90e2015-11-05 18:50:20 -08001954static
Michal Hocko0c397da2018-01-31 16:20:56 -08001955struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
Dave Hansen099730d2015-11-05 18:50:17 -08001956 struct vm_area_struct *vma, unsigned long addr)
1957{
Michal Hockoaaf14e42017-07-10 15:49:08 -07001958 struct page *page;
1959 struct mempolicy *mpol;
1960 gfp_t gfp_mask = htlb_alloc_mask(h);
1961 int nid;
1962 nodemask_t *nodemask;
1963
1964 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
Michal Hocko0c397da2018-01-31 16:20:56 -08001965 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
Michal Hockoaaf14e42017-07-10 15:49:08 -07001966 mpol_cond_put(mpol);
1967
1968 return page;
Dave Hansen099730d2015-11-05 18:50:17 -08001969}
1970
Michal Hockoab5ac902018-01-31 16:20:48 -08001971/* page migration callback function */
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001972struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
Joonsoo Kimd92bbc22020-08-11 18:37:17 -07001973 nodemask_t *nmask, gfp_t gfp_mask)
Michal Hocko4db9b2e2017-07-10 15:48:44 -07001974{
Michal Hocko4db9b2e2017-07-10 15:48:44 -07001975 spin_lock(&hugetlb_lock);
1976 if (h->free_huge_pages - h->resv_huge_pages > 0) {
Michal Hocko3e59fcb2017-07-10 15:49:11 -07001977 struct page *page;
1978
1979 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1980 if (page) {
1981 spin_unlock(&hugetlb_lock);
1982 return page;
Michal Hocko4db9b2e2017-07-10 15:48:44 -07001983 }
1984 }
1985 spin_unlock(&hugetlb_lock);
Michal Hocko4db9b2e2017-07-10 15:48:44 -07001986
Michal Hocko0c397da2018-01-31 16:20:56 -08001987 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
Michal Hocko4db9b2e2017-07-10 15:48:44 -07001988}
1989
Michal Hockoebd63722018-01-31 16:21:00 -08001990/* mempolicy aware migration callback */
Michal Hocko389c8172018-01-31 16:21:03 -08001991struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1992 unsigned long address)
Michal Hockoebd63722018-01-31 16:21:00 -08001993{
1994 struct mempolicy *mpol;
1995 nodemask_t *nodemask;
1996 struct page *page;
Michal Hockoebd63722018-01-31 16:21:00 -08001997 gfp_t gfp_mask;
1998 int node;
1999
Michal Hockoebd63722018-01-31 16:21:00 -08002000 gfp_mask = htlb_alloc_mask(h);
2001 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
Joonsoo Kimd92bbc22020-08-11 18:37:17 -07002002 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
Michal Hockoebd63722018-01-31 16:21:00 -08002003 mpol_cond_put(mpol);
2004
2005 return page;
2006}
2007
Naoya Horiguchibf50bab2010-09-08 10:19:33 +09002008/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002009 * Increase the hugetlb pool such that it can accommodate a reservation
Adam Litkee4e574b2007-10-16 01:26:19 -07002010 * of size 'delta'.
2011 */
Andi Kleena5516432008-07-23 21:27:41 -07002012static int gather_surplus_pages(struct hstate *h, int delta)
Jules Irenge1b2a1e72020-04-06 20:08:09 -07002013 __must_hold(&hugetlb_lock)
Adam Litkee4e574b2007-10-16 01:26:19 -07002014{
2015 struct list_head surplus_list;
2016 struct page *page, *tmp;
2017 int ret, i;
2018 int needed, allocated;
Hillf Danton28073b02012-03-21 16:34:00 -07002019 bool alloc_ok = true;
Adam Litkee4e574b2007-10-16 01:26:19 -07002020
Andi Kleena5516432008-07-23 21:27:41 -07002021 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -08002022 if (needed <= 0) {
Andi Kleena5516432008-07-23 21:27:41 -07002023 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -07002024 return 0;
Adam Litkeac09b3a2008-03-04 14:29:38 -08002025 }
Adam Litkee4e574b2007-10-16 01:26:19 -07002026
2027 allocated = 0;
2028 INIT_LIST_HEAD(&surplus_list);
2029
2030 ret = -ENOMEM;
2031retry:
2032 spin_unlock(&hugetlb_lock);
2033 for (i = 0; i < needed; i++) {
Michal Hocko0c397da2018-01-31 16:20:56 -08002034 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
Michal Hockoaaf14e42017-07-10 15:49:08 -07002035 NUMA_NO_NODE, NULL);
Hillf Danton28073b02012-03-21 16:34:00 -07002036 if (!page) {
2037 alloc_ok = false;
2038 break;
2039 }
Adam Litkee4e574b2007-10-16 01:26:19 -07002040 list_add(&page->lru, &surplus_list);
David Rientjes69ed7792017-07-10 15:48:50 -07002041 cond_resched();
Adam Litkee4e574b2007-10-16 01:26:19 -07002042 }
Hillf Danton28073b02012-03-21 16:34:00 -07002043 allocated += i;
Adam Litkee4e574b2007-10-16 01:26:19 -07002044
2045 /*
2046 * After retaking hugetlb_lock, we need to recalculate 'needed'
2047 * because either resv_huge_pages or free_huge_pages may have changed.
2048 */
2049 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002050 needed = (h->resv_huge_pages + delta) -
2051 (h->free_huge_pages + allocated);
Hillf Danton28073b02012-03-21 16:34:00 -07002052 if (needed > 0) {
2053 if (alloc_ok)
2054 goto retry;
2055 /*
2056 * We were not able to allocate enough pages to
2057 * satisfy the entire reservation so we free what
2058 * we've allocated so far.
2059 */
2060 goto free;
2061 }
Adam Litkee4e574b2007-10-16 01:26:19 -07002062 /*
2063 * The surplus_list now contains _at_least_ the number of extra pages
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002064 * needed to accommodate the reservation. Add the appropriate number
Adam Litkee4e574b2007-10-16 01:26:19 -07002065 * of pages to the hugetlb pool and free the extras back to the buddy
Adam Litkeac09b3a2008-03-04 14:29:38 -08002066 * allocator. Commit the entire reservation here to prevent another
2067 * process from stealing the pages as they are added to the pool but
2068 * before they are reserved.
Adam Litkee4e574b2007-10-16 01:26:19 -07002069 */
2070 needed += allocated;
Andi Kleena5516432008-07-23 21:27:41 -07002071 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -07002072 ret = 0;
Naoya Horiguchia9869b82010-09-08 10:19:37 +09002073
Adam Litke19fc3f02008-04-28 02:12:20 -07002074 /* Free the needed pages to the hugetlb pool */
Adam Litkee4e574b2007-10-16 01:26:19 -07002075 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
Adam Litke19fc3f02008-04-28 02:12:20 -07002076 if ((--needed) < 0)
2077 break;
Naoya Horiguchia9869b82010-09-08 10:19:37 +09002078 /*
2079 * This page is now managed by the hugetlb allocator and has
2080 * no users -- drop the buddy allocator's reference.
2081 */
2082 put_page_testzero(page);
Sasha Levin309381fea2014-01-23 15:52:54 -08002083 VM_BUG_ON_PAGE(page_count(page), page);
Andi Kleena5516432008-07-23 21:27:41 -07002084 enqueue_huge_page(h, page);
Adam Litke19fc3f02008-04-28 02:12:20 -07002085 }
Hillf Danton28073b02012-03-21 16:34:00 -07002086free:
Hillf Dantonb0365c82011-12-28 15:57:16 -08002087 spin_unlock(&hugetlb_lock);
Adam Litke19fc3f02008-04-28 02:12:20 -07002088
2089 /* Free unnecessary surplus pages to the buddy allocator */
Joonsoo Kimc0d934b2013-09-11 14:21:02 -07002090 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2091 put_page(page);
Naoya Horiguchia9869b82010-09-08 10:19:37 +09002092 spin_lock(&hugetlb_lock);
Adam Litkee4e574b2007-10-16 01:26:19 -07002093
2094 return ret;
2095}
2096
2097/*
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002098 * This routine has two main purposes:
2099 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2100 * in unused_resv_pages. This corresponds to the prior adjustments made
2101 * to the associated reservation map.
2102 * 2) Free any unused surplus pages that may have been allocated to satisfy
2103 * the reservation. As many as unused_resv_pages may be freed.
2104 *
2105 * Called with hugetlb_lock held. However, the lock could be dropped (and
2106 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
2107 * we must make sure nobody else can claim pages we are in the process of
2108 * freeing. Do this by ensuring resv_huge_page always is greater than the
2109 * number of huge pages we plan to free when dropping the lock.
Adam Litkee4e574b2007-10-16 01:26:19 -07002110 */
Andi Kleena5516432008-07-23 21:27:41 -07002111static void return_unused_surplus_pages(struct hstate *h,
2112 unsigned long unused_resv_pages)
Adam Litkee4e574b2007-10-16 01:26:19 -07002113{
Adam Litkee4e574b2007-10-16 01:26:19 -07002114 unsigned long nr_pages;
2115
Andi Kleenaa888a72008-07-23 21:27:47 -07002116 /* Cannot return gigantic pages currently */
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07002117 if (hstate_is_gigantic(h))
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002118 goto out;
Andi Kleenaa888a72008-07-23 21:27:47 -07002119
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002120 /*
2121 * Part (or even all) of the reservation could have been backed
2122 * by pre-allocated pages. Only free surplus pages.
2123 */
Andi Kleena5516432008-07-23 21:27:41 -07002124 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
Adam Litkee4e574b2007-10-16 01:26:19 -07002125
Lee Schermerhorn685f3452009-09-21 17:01:23 -07002126 /*
2127 * We want to release as many surplus pages as possible, spread
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08002128 * evenly across all nodes with memory. Iterate across these nodes
2129 * until we can no longer free unreserved surplus pages. This occurs
2130 * when the nodes with surplus pages have no free pages.
Randy Dunlap9e7ee402020-08-11 18:32:59 -07002131 * free_pool_huge_page() will balance the freed pages across the
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08002132 * on-line nodes with memory and will handle the hstate accounting.
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002133 *
2134 * Note that we decrement resv_huge_pages as we free the pages. If
2135 * we drop the lock, resv_huge_pages will still be sufficiently large
2136 * to cover subsequent pages we may free.
Lee Schermerhorn685f3452009-09-21 17:01:23 -07002137 */
2138 while (nr_pages--) {
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002139 h->resv_huge_pages--;
2140 unused_resv_pages--;
Lai Jiangshan8cebfcd2012-12-12 13:51:36 -08002141 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002142 goto out;
Mizuma, Masayoshi7848a4b2014-04-18 15:07:18 -07002143 cond_resched_lock(&hugetlb_lock);
Adam Litkee4e574b2007-10-16 01:26:19 -07002144 }
Mike Kravetze5bbc8a2017-01-10 16:58:27 -08002145
2146out:
2147 /* Fully uncommit the reservation */
2148 h->resv_huge_pages -= unused_resv_pages;
Adam Litkee4e574b2007-10-16 01:26:19 -07002149}
2150
Mike Kravetz5e911372015-09-08 15:01:28 -07002151
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002152/*
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002153 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
Mike Kravetz5e911372015-09-08 15:01:28 -07002154 * are used by the huge page allocation routines to manage reservations.
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002155 *
2156 * vma_needs_reservation is called to determine if the huge page at addr
2157 * within the vma has an associated reservation. If a reservation is
2158 * needed, the value 1 is returned. The caller is then responsible for
2159 * managing the global reservation and subpool usage counts. After
2160 * the huge page has been allocated, vma_commit_reservation is called
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002161 * to add the page to the reservation map. If the page allocation fails,
2162 * the reservation must be ended instead of committed. vma_end_reservation
2163 * is called in such cases.
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002164 *
2165 * In the normal case, vma_commit_reservation returns the same value
2166 * as the preceding vma_needs_reservation call. The only time this
2167 * is not the case is if a reserve map was changed between calls. It
2168 * is the responsibility of the caller to notice the difference and
2169 * take appropriate action.
Mike Kravetz96b96a92016-11-10 10:46:32 -08002170 *
2171 * vma_add_reservation is used in error paths where a reservation must
2172 * be restored when a newly allocated huge page must be freed. It is
2173 * to be called after calling vma_needs_reservation to determine if a
2174 * reservation exists.
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002175 */
Mike Kravetz5e911372015-09-08 15:01:28 -07002176enum vma_resv_mode {
2177 VMA_NEEDS_RESV,
2178 VMA_COMMIT_RESV,
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002179 VMA_END_RESV,
Mike Kravetz96b96a92016-11-10 10:46:32 -08002180 VMA_ADD_RESV,
Mike Kravetz5e911372015-09-08 15:01:28 -07002181};
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002182static long __vma_reservation_common(struct hstate *h,
2183 struct vm_area_struct *vma, unsigned long addr,
Mike Kravetz5e911372015-09-08 15:01:28 -07002184 enum vma_resv_mode mode)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002185{
Joonsoo Kim4e35f482014-04-03 14:47:30 -07002186 struct resv_map *resv;
2187 pgoff_t idx;
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002188 long ret;
Mina Almasry0db9d742020-04-01 21:11:25 -07002189 long dummy_out_regions_needed;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002190
Joonsoo Kim4e35f482014-04-03 14:47:30 -07002191 resv = vma_resv_map(vma);
2192 if (!resv)
Andy Whitcroft84afd992008-07-23 21:27:32 -07002193 return 1;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002194
Joonsoo Kim4e35f482014-04-03 14:47:30 -07002195 idx = vma_hugecache_offset(h, vma, addr);
Mike Kravetz5e911372015-09-08 15:01:28 -07002196 switch (mode) {
2197 case VMA_NEEDS_RESV:
Mina Almasry0db9d742020-04-01 21:11:25 -07002198 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2199 /* We assume that vma_reservation_* routines always operate on
2200 * 1 page, and that adding to resv map a 1 page entry can only
2201 * ever require 1 region.
2202 */
2203 VM_BUG_ON(dummy_out_regions_needed != 1);
Mike Kravetz5e911372015-09-08 15:01:28 -07002204 break;
2205 case VMA_COMMIT_RESV:
Mina Almasry075a61d2020-04-01 21:11:28 -07002206 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
Mina Almasry0db9d742020-04-01 21:11:25 -07002207 /* region_add calls of range 1 should never fail. */
2208 VM_BUG_ON(ret < 0);
Mike Kravetz5e911372015-09-08 15:01:28 -07002209 break;
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002210 case VMA_END_RESV:
Mina Almasry0db9d742020-04-01 21:11:25 -07002211 region_abort(resv, idx, idx + 1, 1);
Mike Kravetz5e911372015-09-08 15:01:28 -07002212 ret = 0;
2213 break;
Mike Kravetz96b96a92016-11-10 10:46:32 -08002214 case VMA_ADD_RESV:
Mina Almasry0db9d742020-04-01 21:11:25 -07002215 if (vma->vm_flags & VM_MAYSHARE) {
Mina Almasry075a61d2020-04-01 21:11:28 -07002216 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
Mina Almasry0db9d742020-04-01 21:11:25 -07002217 /* region_add calls of range 1 should never fail. */
2218 VM_BUG_ON(ret < 0);
2219 } else {
2220 region_abort(resv, idx, idx + 1, 1);
Mike Kravetz96b96a92016-11-10 10:46:32 -08002221 ret = region_del(resv, idx, idx + 1);
2222 }
2223 break;
Mike Kravetz5e911372015-09-08 15:01:28 -07002224 default:
2225 BUG();
2226 }
Andy Whitcroft84afd992008-07-23 21:27:32 -07002227
Joonsoo Kim4e35f482014-04-03 14:47:30 -07002228 if (vma->vm_flags & VM_MAYSHARE)
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002229 return ret;
Mike Kravetz67961f92016-06-08 15:33:42 -07002230 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
2231 /*
2232 * In most cases, reserves always exist for private mappings.
2233 * However, a file associated with mapping could have been
2234 * hole punched or truncated after reserves were consumed.
2235 * As subsequent fault on such a range will not use reserves.
2236 * Subtle - The reserve map for private mappings has the
2237 * opposite meaning than that of shared mappings. If NO
2238 * entry is in the reserve map, it means a reservation exists.
2239 * If an entry exists in the reserve map, it means the
2240 * reservation has already been consumed. As a result, the
2241 * return value of this routine is the opposite of the
2242 * value returned from reserve map manipulation routines above.
2243 */
2244 if (ret)
2245 return 0;
2246 else
2247 return 1;
2248 }
Joonsoo Kim4e35f482014-04-03 14:47:30 -07002249 else
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002250 return ret < 0 ? ret : 0;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002251}
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002252
2253static long vma_needs_reservation(struct hstate *h,
Andi Kleena5516432008-07-23 21:27:41 -07002254 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002255{
Mike Kravetz5e911372015-09-08 15:01:28 -07002256 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002257}
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002258
Mike Kravetzcf3ad202015-06-24 16:57:55 -07002259static long vma_commit_reservation(struct hstate *h,
2260 struct vm_area_struct *vma, unsigned long addr)
2261{
Mike Kravetz5e911372015-09-08 15:01:28 -07002262 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2263}
2264
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002265static void vma_end_reservation(struct hstate *h,
Mike Kravetz5e911372015-09-08 15:01:28 -07002266 struct vm_area_struct *vma, unsigned long addr)
2267{
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002268 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002269}
2270
Mike Kravetz96b96a92016-11-10 10:46:32 -08002271static long vma_add_reservation(struct hstate *h,
2272 struct vm_area_struct *vma, unsigned long addr)
2273{
2274 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2275}
2276
2277/*
2278 * This routine is called to restore a reservation on error paths. In the
2279 * specific error paths, a huge page was allocated (via alloc_huge_page)
2280 * and is about to be freed. If a reservation for the page existed,
2281 * alloc_huge_page would have consumed the reservation and set PagePrivate
2282 * in the newly allocated page. When the page is freed via free_huge_page,
2283 * the global reservation count will be incremented if PagePrivate is set.
2284 * However, free_huge_page can not adjust the reserve map. Adjust the
2285 * reserve map here to be consistent with global reserve count adjustments
2286 * to be made by free_huge_page.
2287 */
2288static void restore_reserve_on_error(struct hstate *h,
2289 struct vm_area_struct *vma, unsigned long address,
2290 struct page *page)
2291{
2292 if (unlikely(PagePrivate(page))) {
2293 long rc = vma_needs_reservation(h, vma, address);
2294
2295 if (unlikely(rc < 0)) {
2296 /*
2297 * Rare out of memory condition in reserve map
2298 * manipulation. Clear PagePrivate so that
2299 * global reserve count will not be incremented
2300 * by free_huge_page. This will make it appear
2301 * as though the reservation for this page was
2302 * consumed. This may prevent the task from
2303 * faulting in the page at a later time. This
2304 * is better than inconsistent global huge page
2305 * accounting of reserve counts.
2306 */
2307 ClearPagePrivate(page);
2308 } else if (rc) {
2309 rc = vma_add_reservation(h, vma, address);
2310 if (unlikely(rc < 0))
2311 /*
2312 * See above comment about rare out of
2313 * memory condition.
2314 */
2315 ClearPagePrivate(page);
2316 } else
2317 vma_end_reservation(h, vma, address);
2318 }
2319}
2320
Mike Kravetz70c35472015-09-08 15:01:54 -07002321struct page *alloc_huge_page(struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002322 unsigned long addr, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323{
David Gibson90481622012-03-21 16:34:12 -07002324 struct hugepage_subpool *spool = subpool_vma(vma);
Andi Kleena5516432008-07-23 21:27:41 -07002325 struct hstate *h = hstate_vma(vma);
Adam Litke348ea202007-11-14 16:59:37 -08002326 struct page *page;
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002327 long map_chg, map_commit;
2328 long gbl_chg;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -07002329 int ret, idx;
2330 struct hugetlb_cgroup *h_cg;
Mina Almasry08cf9fa2020-04-01 21:11:31 -07002331 bool deferred_reserve;
Adam Litke2fc39ce2007-11-14 16:59:39 -08002332
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -07002333 idx = hstate_index(h);
Mel Gormana1e78772008-07-23 21:27:23 -07002334 /*
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002335 * Examine the region/reserve map to determine if the process
2336 * has a reservation for the page to be allocated. A return
2337 * code of zero indicates a reservation exists (no change).
Mel Gormana1e78772008-07-23 21:27:23 -07002338 */
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002339 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2340 if (map_chg < 0)
Aneesh Kumar K.V76dcee72012-07-31 16:41:57 -07002341 return ERR_PTR(-ENOMEM);
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002342
2343 /*
2344 * Processes that did not create the mapping will have no
2345 * reserves as indicated by the region/reserve map. Check
2346 * that the allocation will not exceed the subpool limit.
2347 * Allocations for MAP_NORESERVE mappings also need to be
2348 * checked against any subpool limit.
2349 */
2350 if (map_chg || avoid_reserve) {
2351 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2352 if (gbl_chg < 0) {
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002353 vma_end_reservation(h, vma, addr);
Aneesh Kumar K.V76dcee72012-07-31 16:41:57 -07002354 return ERR_PTR(-ENOSPC);
Mike Kravetz5e911372015-09-08 15:01:28 -07002355 }
Mel Gormana1e78772008-07-23 21:27:23 -07002356
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002357 /*
2358 * Even though there was no reservation in the region/reserve
2359 * map, there could be reservations associated with the
2360 * subpool that can be used. This would be indicated if the
2361 * return value of hugepage_subpool_get_pages() is zero.
2362 * However, if avoid_reserve is specified we still avoid even
2363 * the subpool reservations.
2364 */
2365 if (avoid_reserve)
2366 gbl_chg = 1;
2367 }
2368
Mina Almasry08cf9fa2020-04-01 21:11:31 -07002369 /* If this allocation is not consuming a reservation, charge it now.
2370 */
2371 deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
2372 if (deferred_reserve) {
2373 ret = hugetlb_cgroup_charge_cgroup_rsvd(
2374 idx, pages_per_huge_page(h), &h_cg);
2375 if (ret)
2376 goto out_subpool_put;
2377 }
2378
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -07002379 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002380 if (ret)
Mina Almasry08cf9fa2020-04-01 21:11:31 -07002381 goto out_uncharge_cgroup_reservation;
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002382
Mel Gormana1e78772008-07-23 21:27:23 -07002383 spin_lock(&hugetlb_lock);
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002384 /*
2385 * glb_chg is passed to indicate whether or not a page must be taken
2386 * from the global free pool (global change). gbl_chg == 0 indicates
2387 * a reservation exists for the allocation.
2388 */
2389 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
Joonsoo Kim81a6fca2013-09-11 14:20:58 -07002390 if (!page) {
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -07002391 spin_unlock(&hugetlb_lock);
Michal Hocko0c397da2018-01-31 16:20:56 -08002392 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002393 if (!page)
2394 goto out_uncharge_cgroup;
Naoya Horiguchia88c7692015-12-11 13:40:24 -08002395 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2396 SetPagePrivate(page);
2397 h->resv_huge_pages--;
2398 }
Aneesh Kumar K.V79dbb232012-07-31 16:42:32 -07002399 spin_lock(&hugetlb_lock);
Wei Yang15a8d682020-10-13 16:56:33 -07002400 list_add(&page->lru, &h->hugepage_activelist);
Joonsoo Kim81a6fca2013-09-11 14:20:58 -07002401 /* Fall through */
Mel Gormana1e78772008-07-23 21:27:23 -07002402 }
Joonsoo Kim81a6fca2013-09-11 14:20:58 -07002403 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
Mina Almasry08cf9fa2020-04-01 21:11:31 -07002404 /* If allocation is not consuming a reservation, also store the
2405 * hugetlb_cgroup pointer on the page.
2406 */
2407 if (deferred_reserve) {
2408 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2409 h_cg, page);
2410 }
2411
Joonsoo Kim81a6fca2013-09-11 14:20:58 -07002412 spin_unlock(&hugetlb_lock);
Mel Gormana1e78772008-07-23 21:27:23 -07002413
David Gibson90481622012-03-21 16:34:12 -07002414 set_page_private(page, (unsigned long)spool);
Mel Gormana1e78772008-07-23 21:27:23 -07002415
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002416 map_commit = vma_commit_reservation(h, vma, addr);
2417 if (unlikely(map_chg > map_commit)) {
Mike Kravetz33039672015-06-24 16:57:58 -07002418 /*
2419 * The page was added to the reservation map between
2420 * vma_needs_reservation and vma_commit_reservation.
2421 * This indicates a race with hugetlb_reserve_pages.
2422 * Adjust for the subpool count incremented above AND
2423 * in hugetlb_reserve_pages for the same page. Also,
2424 * the reservation count added in hugetlb_reserve_pages
2425 * no longer applies.
2426 */
2427 long rsv_adjust;
2428
2429 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2430 hugetlb_acct_memory(h, -rsv_adjust);
Mike Kravetz79aa9252020-11-01 17:07:27 -08002431 if (deferred_reserve)
2432 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2433 pages_per_huge_page(h), page);
Mike Kravetz33039672015-06-24 16:57:58 -07002434 }
Adam Litke90d8b7e2007-11-14 16:59:42 -08002435 return page;
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002436
2437out_uncharge_cgroup:
2438 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
Mina Almasry08cf9fa2020-04-01 21:11:31 -07002439out_uncharge_cgroup_reservation:
2440 if (deferred_reserve)
2441 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2442 h_cg);
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002443out_subpool_put:
Mike Kravetzd85f69b2015-09-08 15:01:47 -07002444 if (map_chg || avoid_reserve)
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002445 hugepage_subpool_put_pages(spool, 1);
Mike Kravetzfeba16e2015-09-08 15:01:31 -07002446 vma_end_reservation(h, vma, addr);
Jianyu Zhan8f34af62014-06-04 16:10:36 -07002447 return ERR_PTR(-ENOSPC);
David Gibsonb45b5bd2006-03-22 00:08:55 -08002448}
2449
Aneesh Kumar K.Ve24a1302017-07-28 10:31:25 +05302450int alloc_bootmem_huge_page(struct hstate *h)
2451 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2452int __alloc_bootmem_huge_page(struct hstate *h)
Andi Kleenaa888a72008-07-23 21:27:47 -07002453{
2454 struct huge_bootmem_page *m;
Joonsoo Kimb2261022013-09-11 14:21:00 -07002455 int nr_nodes, node;
Andi Kleenaa888a72008-07-23 21:27:47 -07002456
Joonsoo Kimb2261022013-09-11 14:21:00 -07002457 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
Andi Kleenaa888a72008-07-23 21:27:47 -07002458 void *addr;
2459
Mike Rapoporteb31d552018-10-30 15:08:04 -07002460 addr = memblock_alloc_try_nid_raw(
Grygorii Strashko8b89a112014-01-21 15:50:36 -08002461 huge_page_size(h), huge_page_size(h),
Mike Rapoport97ad1082018-10-30 15:09:44 -07002462 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
Andi Kleenaa888a72008-07-23 21:27:47 -07002463 if (addr) {
2464 /*
2465 * Use the beginning of the huge page to store the
2466 * huge_bootmem_page struct (until gather_bootmem
2467 * puts them into the mem_map).
2468 */
2469 m = addr;
Cyrill Gorcunov91f47662009-01-06 14:40:33 -08002470 goto found;
Andi Kleenaa888a72008-07-23 21:27:47 -07002471 }
Andi Kleenaa888a72008-07-23 21:27:47 -07002472 }
2473 return 0;
2474
2475found:
Luiz Capitulinodf994ea2014-12-12 16:55:21 -08002476 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
Andi Kleenaa888a72008-07-23 21:27:47 -07002477 /* Put them into a private list first because mem_map is not up yet */
Cannon Matthews330d6e42018-08-17 15:49:17 -07002478 INIT_LIST_HEAD(&m->list);
Andi Kleenaa888a72008-07-23 21:27:47 -07002479 list_add(&m->list, &huge_boot_pages);
2480 m->hstate = h;
2481 return 1;
2482}
2483
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08002484static void __init prep_compound_huge_page(struct page *page,
2485 unsigned int order)
Andy Whitcroft18229df2008-11-06 12:53:27 -08002486{
2487 if (unlikely(order > (MAX_ORDER - 1)))
2488 prep_compound_gigantic_page(page, order);
2489 else
2490 prep_compound_page(page, order);
2491}
2492
Andi Kleenaa888a72008-07-23 21:27:47 -07002493/* Put bootmem huge pages into the standard lists after mem_map is up */
2494static void __init gather_bootmem_prealloc(void)
2495{
2496 struct huge_bootmem_page *m;
2497
2498 list_for_each_entry(m, &huge_boot_pages, list) {
Mike Kravetz40d18eb2018-08-17 15:49:07 -07002499 struct page *page = virt_to_page(m);
Andi Kleenaa888a72008-07-23 21:27:47 -07002500 struct hstate *h = m->hstate;
Becky Bruceee8f2482011-07-25 17:11:50 -07002501
Andi Kleenaa888a72008-07-23 21:27:47 -07002502 WARN_ON(page_count(page) != 1);
Andy Whitcroft18229df2008-11-06 12:53:27 -08002503 prep_compound_huge_page(page, h->order);
Andrea Arcangelief5a22b2013-10-16 13:46:56 -07002504 WARN_ON(PageReserved(page));
Andi Kleenaa888a72008-07-23 21:27:47 -07002505 prep_new_huge_page(h, page, page_to_nid(page));
Michal Hockoaf0fb9d2018-01-31 16:20:41 -08002506 put_page(page); /* free it into the hugepage allocator */
2507
Rafael Aquinib0320c72011-06-15 15:08:39 -07002508 /*
2509 * If we had gigantic hugepages allocated at boot time, we need
2510 * to restore the 'stolen' pages to totalram_pages in order to
2511 * fix confusing memory reports from free(1) and another
2512 * side-effects, like CommitLimit going negative.
2513 */
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07002514 if (hstate_is_gigantic(h))
Jiang Liu3dcc0572013-07-03 15:03:21 -07002515 adjust_managed_page_count(page, 1 << h->order);
Cannon Matthews520495f2018-07-03 17:02:43 -07002516 cond_resched();
Andi Kleenaa888a72008-07-23 21:27:47 -07002517 }
2518}
2519
Andi Kleen8faa8b02008-07-23 21:27:48 -07002520static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521{
2522 unsigned long i;
Mike Kravetzf60858f2019-09-23 15:37:35 -07002523 nodemask_t *node_alloc_noretry;
2524
2525 if (!hstate_is_gigantic(h)) {
2526 /*
2527 * Bit mask controlling how hard we retry per-node allocations.
2528 * Ignore errors as lower level routines can deal with
2529 * node_alloc_noretry == NULL. If this kmalloc fails at boot
2530 * time, we are likely in bigger trouble.
2531 */
2532 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2533 GFP_KERNEL);
2534 } else {
2535 /* allocations done at boot time */
2536 node_alloc_noretry = NULL;
2537 }
2538
2539 /* bit mask controlling how hard we retry per-node allocations */
2540 if (node_alloc_noretry)
2541 nodes_clear(*node_alloc_noretry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Andi Kleene5ff2152008-07-23 21:27:42 -07002543 for (i = 0; i < h->max_huge_pages; ++i) {
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07002544 if (hstate_is_gigantic(h)) {
Barry Songdbda8fe2020-07-23 21:15:30 -07002545 if (hugetlb_cma_size) {
Roman Gushchincf11e852020-04-10 14:32:45 -07002546 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
Chen Wandunc9ea7712021-02-24 12:07:58 -08002547 goto free;
Roman Gushchincf11e852020-04-10 14:32:45 -07002548 }
Andi Kleenaa888a72008-07-23 21:27:47 -07002549 if (!alloc_bootmem_huge_page(h))
2550 break;
Michal Hocko0c397da2018-01-31 16:20:56 -08002551 } else if (!alloc_pool_huge_page(h,
Mike Kravetzf60858f2019-09-23 15:37:35 -07002552 &node_states[N_MEMORY],
2553 node_alloc_noretry))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 break;
David Rientjes69ed7792017-07-10 15:48:50 -07002555 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 }
Liam R. Howlettd715cf82017-07-10 15:48:15 -07002557 if (i < h->max_huge_pages) {
2558 char buf[32];
2559
Matthew Wilcoxc6247f72017-07-10 15:48:56 -07002560 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
Liam R. Howlettd715cf82017-07-10 15:48:15 -07002561 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2562 h->max_huge_pages, buf, i);
2563 h->max_huge_pages = i;
2564 }
Chen Wandunc9ea7712021-02-24 12:07:58 -08002565free:
Mike Kravetzf60858f2019-09-23 15:37:35 -07002566 kfree(node_alloc_noretry);
Andi Kleene5ff2152008-07-23 21:27:42 -07002567}
2568
2569static void __init hugetlb_init_hstates(void)
2570{
2571 struct hstate *h;
2572
2573 for_each_hstate(h) {
Naoya Horiguchi641844f2015-06-24 16:56:59 -07002574 if (minimum_order > huge_page_order(h))
2575 minimum_order = huge_page_order(h);
2576
Andi Kleen8faa8b02008-07-23 21:27:48 -07002577 /* oversize hugepages were init'ed in early boot */
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07002578 if (!hstate_is_gigantic(h))
Andi Kleen8faa8b02008-07-23 21:27:48 -07002579 hugetlb_hstate_alloc_pages(h);
Andi Kleene5ff2152008-07-23 21:27:42 -07002580 }
Naoya Horiguchi641844f2015-06-24 16:56:59 -07002581 VM_BUG_ON(minimum_order == UINT_MAX);
Andi Kleene5ff2152008-07-23 21:27:42 -07002582}
2583
2584static void __init report_hugepages(void)
2585{
2586 struct hstate *h;
2587
2588 for_each_hstate(h) {
Andi Kleen4abd32d2008-07-23 21:27:49 -07002589 char buf[32];
Matthew Wilcoxc6247f72017-07-10 15:48:56 -07002590
2591 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
Andrew Mortonffb22af2013-02-22 16:32:08 -08002592 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
Matthew Wilcoxc6247f72017-07-10 15:48:56 -07002593 buf, h->free_huge_pages);
Andi Kleene5ff2152008-07-23 21:27:42 -07002594 }
2595}
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597#ifdef CONFIG_HIGHMEM
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002598static void try_to_free_low(struct hstate *h, unsigned long count,
2599 nodemask_t *nodes_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600{
Christoph Lameter4415cc82006-09-25 23:31:55 -07002601 int i;
2602
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07002603 if (hstate_is_gigantic(h))
Andi Kleenaa888a72008-07-23 21:27:47 -07002604 return;
2605
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002606 for_each_node_mask(i, *nodes_allowed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 struct page *page, *next;
Andi Kleena5516432008-07-23 21:27:41 -07002608 struct list_head *freel = &h->hugepage_freelists[i];
2609 list_for_each_entry_safe(page, next, freel, lru) {
2610 if (count >= h->nr_huge_pages)
Adam Litke6b0c8802007-10-16 01:26:23 -07002611 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 if (PageHighMem(page))
2613 continue;
2614 list_del(&page->lru);
Andi Kleene5ff2152008-07-23 21:27:42 -07002615 update_and_free_page(h, page);
Andi Kleena5516432008-07-23 21:27:41 -07002616 h->free_huge_pages--;
2617 h->free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 }
2619 }
2620}
2621#else
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002622static inline void try_to_free_low(struct hstate *h, unsigned long count,
2623 nodemask_t *nodes_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624{
2625}
2626#endif
2627
Wu Fengguang20a03072009-06-16 15:32:22 -07002628/*
2629 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2630 * balanced by operating on them in a round-robin fashion.
2631 * Returns 1 if an adjustment was made.
2632 */
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002633static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2634 int delta)
Wu Fengguang20a03072009-06-16 15:32:22 -07002635{
Joonsoo Kimb2261022013-09-11 14:21:00 -07002636 int nr_nodes, node;
Wu Fengguang20a03072009-06-16 15:32:22 -07002637
2638 VM_BUG_ON(delta != -1 && delta != 1);
Wu Fengguang20a03072009-06-16 15:32:22 -07002639
Joonsoo Kimb2261022013-09-11 14:21:00 -07002640 if (delta < 0) {
2641 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2642 if (h->surplus_huge_pages_node[node])
2643 goto found;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07002644 }
Joonsoo Kimb2261022013-09-11 14:21:00 -07002645 } else {
2646 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2647 if (h->surplus_huge_pages_node[node] <
2648 h->nr_huge_pages_node[node])
2649 goto found;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07002650 }
Joonsoo Kimb2261022013-09-11 14:21:00 -07002651 }
2652 return 0;
Wu Fengguang20a03072009-06-16 15:32:22 -07002653
Joonsoo Kimb2261022013-09-11 14:21:00 -07002654found:
2655 h->surplus_huge_pages += delta;
2656 h->surplus_huge_pages_node[node] += delta;
2657 return 1;
Wu Fengguang20a03072009-06-16 15:32:22 -07002658}
2659
Andi Kleena5516432008-07-23 21:27:41 -07002660#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
Mike Kravetzfd875dc2019-05-13 17:19:20 -07002661static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002662 nodemask_t *nodes_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
Adam Litke7893d1d2007-10-16 01:26:18 -07002664 unsigned long min_count, ret;
Mike Kravetzf60858f2019-09-23 15:37:35 -07002665 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2666
2667 /*
2668 * Bit mask controlling how hard we retry per-node allocations.
2669 * If we can not allocate the bit mask, do not attempt to allocate
2670 * the requested huge pages.
2671 */
2672 if (node_alloc_noretry)
2673 nodes_clear(*node_alloc_noretry);
2674 else
2675 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002677 spin_lock(&hugetlb_lock);
2678
2679 /*
Mike Kravetzfd875dc2019-05-13 17:19:20 -07002680 * Check for a node specific request.
2681 * Changing node specific huge page count may require a corresponding
2682 * change to the global count. In any case, the passed node mask
2683 * (nodes_allowed) will restrict alloc/free to the specified node.
2684 */
2685 if (nid != NUMA_NO_NODE) {
2686 unsigned long old_count = count;
2687
2688 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2689 /*
2690 * User may have specified a large count value which caused the
2691 * above calculation to overflow. In this case, they wanted
2692 * to allocate as many huge pages as possible. Set count to
2693 * largest possible value to align with their intention.
2694 */
2695 if (count < old_count)
2696 count = ULONG_MAX;
2697 }
2698
2699 /*
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002700 * Gigantic pages runtime allocation depend on the capability for large
2701 * page range allocation.
2702 * If the system does not provide this feature, return an error when
2703 * the user tries to allocate gigantic pages but let the user free the
2704 * boottime allocated gigantic pages.
2705 */
2706 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2707 if (count > persistent_huge_pages(h)) {
2708 spin_unlock(&hugetlb_lock);
Mike Kravetzf60858f2019-09-23 15:37:35 -07002709 NODEMASK_FREE(node_alloc_noretry);
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002710 return -EINVAL;
2711 }
2712 /* Fall through to decrease pool */
2713 }
Andi Kleenaa888a72008-07-23 21:27:47 -07002714
Adam Litke7893d1d2007-10-16 01:26:18 -07002715 /*
2716 * Increase the pool size
2717 * First take pages out of surplus state. Then make up the
2718 * remaining difference by allocating fresh huge pages.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08002719 *
Michal Hocko0c397da2018-01-31 16:20:56 -08002720 * We might race with alloc_surplus_huge_page() here and be unable
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08002721 * to convert a surplus huge page to a normal huge page. That is
2722 * not critical, though, it just means the overall size of the
2723 * pool might be one hugepage larger than it needs to be, but
2724 * within all the constraints specified by the sysctls.
Adam Litke7893d1d2007-10-16 01:26:18 -07002725 */
Andi Kleena5516432008-07-23 21:27:41 -07002726 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002727 if (!adjust_pool_surplus(h, nodes_allowed, -1))
Adam Litke7893d1d2007-10-16 01:26:18 -07002728 break;
2729 }
2730
Andi Kleena5516432008-07-23 21:27:41 -07002731 while (count > persistent_huge_pages(h)) {
Adam Litke7893d1d2007-10-16 01:26:18 -07002732 /*
2733 * If this allocation races such that we no longer need the
2734 * page, free_huge_page will handle it by freeing the page
2735 * and reducing the surplus.
2736 */
2737 spin_unlock(&hugetlb_lock);
Jia He649920c2016-08-02 14:02:31 -07002738
2739 /* yield cpu to avoid soft lockup */
2740 cond_resched();
2741
Mike Kravetzf60858f2019-09-23 15:37:35 -07002742 ret = alloc_pool_huge_page(h, nodes_allowed,
2743 node_alloc_noretry);
Adam Litke7893d1d2007-10-16 01:26:18 -07002744 spin_lock(&hugetlb_lock);
2745 if (!ret)
2746 goto out;
2747
Mel Gorman536240f22009-12-14 17:59:56 -08002748 /* Bail for signals. Probably ctrl-c from user */
2749 if (signal_pending(current))
2750 goto out;
Adam Litke7893d1d2007-10-16 01:26:18 -07002751 }
Adam Litke7893d1d2007-10-16 01:26:18 -07002752
2753 /*
2754 * Decrease the pool size
2755 * First return free pages to the buddy allocator (being careful
2756 * to keep enough around to satisfy reservations). Then place
2757 * pages into surplus state as needed so the pool will shrink
2758 * to the desired size as pages become free.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08002759 *
2760 * By placing pages into the surplus state independent of the
2761 * overcommit value, we are allowing the surplus pool size to
2762 * exceed overcommit. There are few sane options here. Since
Michal Hocko0c397da2018-01-31 16:20:56 -08002763 * alloc_surplus_huge_page() is checking the global counter,
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08002764 * though, we'll note that we're not allowed to exceed surplus
2765 * and won't grow the pool anywhere else. Not until one of the
2766 * sysctls are changed, or the surplus pages go out of use.
Adam Litke7893d1d2007-10-16 01:26:18 -07002767 */
Andi Kleena5516432008-07-23 21:27:41 -07002768 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
Adam Litke6b0c8802007-10-16 01:26:23 -07002769 min_count = max(count, min_count);
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002770 try_to_free_low(h, min_count, nodes_allowed);
Andi Kleena5516432008-07-23 21:27:41 -07002771 while (min_count < persistent_huge_pages(h)) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002772 if (!free_pool_huge_page(h, nodes_allowed, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 break;
Mizuma, Masayoshi55f67142014-04-07 15:37:54 -07002774 cond_resched_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 }
Andi Kleena5516432008-07-23 21:27:41 -07002776 while (count < persistent_huge_pages(h)) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08002777 if (!adjust_pool_surplus(h, nodes_allowed, 1))
Adam Litke7893d1d2007-10-16 01:26:18 -07002778 break;
2779 }
2780out:
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002781 h->max_huge_pages = persistent_huge_pages(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 spin_unlock(&hugetlb_lock);
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002783
Mike Kravetzf60858f2019-09-23 15:37:35 -07002784 NODEMASK_FREE(node_alloc_noretry);
2785
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002786 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787}
2788
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002789#define HSTATE_ATTR_RO(_name) \
2790 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2791
2792#define HSTATE_ATTR(_name) \
2793 static struct kobj_attribute _name##_attr = \
2794 __ATTR(_name, 0644, _name##_show, _name##_store)
2795
2796static struct kobject *hugepages_kobj;
2797static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2798
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002799static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2800
2801static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002802{
2803 int i;
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002804
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002805 for (i = 0; i < HUGE_MAX_HSTATE; i++)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002806 if (hstate_kobjs[i] == kobj) {
2807 if (nidp)
2808 *nidp = NUMA_NO_NODE;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002809 return &hstates[i];
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002810 }
2811
2812 return kobj_to_node_hstate(kobj, nidp);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002813}
2814
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002815static ssize_t nr_hugepages_show_common(struct kobject *kobj,
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002816 struct kobj_attribute *attr, char *buf)
2817{
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002818 struct hstate *h;
2819 unsigned long nr_huge_pages;
2820 int nid;
2821
2822 h = kobj_to_hstate(kobj, &nid);
2823 if (nid == NUMA_NO_NODE)
2824 nr_huge_pages = h->nr_huge_pages;
2825 else
2826 nr_huge_pages = h->nr_huge_pages_node[nid];
2827
2828 return sprintf(buf, "%lu\n", nr_huge_pages);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002829}
Eric B Munsonadbe8722011-01-13 15:47:27 -08002830
David Rientjes238d3c12014-08-06 16:06:51 -07002831static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2832 struct hstate *h, int nid,
2833 unsigned long count, size_t len)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002834{
2835 int err;
Oscar Salvador2d0adf72019-05-13 17:19:23 -07002836 nodemask_t nodes_allowed, *n_mask;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002837
Oscar Salvador2d0adf72019-05-13 17:19:23 -07002838 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2839 return -EINVAL;
Eric B Munsonadbe8722011-01-13 15:47:27 -08002840
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002841 if (nid == NUMA_NO_NODE) {
2842 /*
2843 * global hstate attribute
2844 */
2845 if (!(obey_mempolicy &&
Oscar Salvador2d0adf72019-05-13 17:19:23 -07002846 init_nodemask_of_mempolicy(&nodes_allowed)))
2847 n_mask = &node_states[N_MEMORY];
2848 else
2849 n_mask = &nodes_allowed;
2850 } else {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002851 /*
Mike Kravetzfd875dc2019-05-13 17:19:20 -07002852 * Node specific request. count adjustment happens in
2853 * set_max_huge_pages() after acquiring hugetlb_lock.
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002854 */
Oscar Salvador2d0adf72019-05-13 17:19:23 -07002855 init_nodemask_of_node(&nodes_allowed, nid);
2856 n_mask = &nodes_allowed;
Mike Kravetzfd875dc2019-05-13 17:19:20 -07002857 }
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002858
Oscar Salvador2d0adf72019-05-13 17:19:23 -07002859 err = set_max_huge_pages(h, count, nid, n_mask);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002860
Alexandre Ghiti4eb07162019-05-13 17:19:04 -07002861 return err ? err : len;
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002862}
2863
David Rientjes238d3c12014-08-06 16:06:51 -07002864static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2865 struct kobject *kobj, const char *buf,
2866 size_t len)
2867{
2868 struct hstate *h;
2869 unsigned long count;
2870 int nid;
2871 int err;
2872
2873 err = kstrtoul(buf, 10, &count);
2874 if (err)
2875 return err;
2876
2877 h = kobj_to_hstate(kobj, &nid);
2878 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2879}
2880
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002881static ssize_t nr_hugepages_show(struct kobject *kobj,
2882 struct kobj_attribute *attr, char *buf)
2883{
2884 return nr_hugepages_show_common(kobj, attr, buf);
2885}
2886
2887static ssize_t nr_hugepages_store(struct kobject *kobj,
2888 struct kobj_attribute *attr, const char *buf, size_t len)
2889{
David Rientjes238d3c12014-08-06 16:06:51 -07002890 return nr_hugepages_store_common(false, kobj, buf, len);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002891}
2892HSTATE_ATTR(nr_hugepages);
2893
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002894#ifdef CONFIG_NUMA
2895
2896/*
2897 * hstate attribute for optionally mempolicy-based constraint on persistent
2898 * huge page alloc/free.
2899 */
2900static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2901 struct kobj_attribute *attr, char *buf)
2902{
2903 return nr_hugepages_show_common(kobj, attr, buf);
2904}
2905
2906static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2907 struct kobj_attribute *attr, const char *buf, size_t len)
2908{
David Rientjes238d3c12014-08-06 16:06:51 -07002909 return nr_hugepages_store_common(true, kobj, buf, len);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002910}
2911HSTATE_ATTR(nr_hugepages_mempolicy);
2912#endif
2913
2914
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002915static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2916 struct kobj_attribute *attr, char *buf)
2917{
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002918 struct hstate *h = kobj_to_hstate(kobj, NULL);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002919 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2920}
Eric B Munsonadbe8722011-01-13 15:47:27 -08002921
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002922static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2923 struct kobj_attribute *attr, const char *buf, size_t count)
2924{
2925 int err;
2926 unsigned long input;
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002927 struct hstate *h = kobj_to_hstate(kobj, NULL);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002928
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07002929 if (hstate_is_gigantic(h))
Eric B Munsonadbe8722011-01-13 15:47:27 -08002930 return -EINVAL;
2931
Jingoo Han3dbb95f2013-09-11 14:20:25 -07002932 err = kstrtoul(buf, 10, &input);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002933 if (err)
Eric B Munson73ae31e2011-01-13 15:47:28 -08002934 return err;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002935
2936 spin_lock(&hugetlb_lock);
2937 h->nr_overcommit_huge_pages = input;
2938 spin_unlock(&hugetlb_lock);
2939
2940 return count;
2941}
2942HSTATE_ATTR(nr_overcommit_hugepages);
2943
2944static ssize_t free_hugepages_show(struct kobject *kobj,
2945 struct kobj_attribute *attr, char *buf)
2946{
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002947 struct hstate *h;
2948 unsigned long free_huge_pages;
2949 int nid;
2950
2951 h = kobj_to_hstate(kobj, &nid);
2952 if (nid == NUMA_NO_NODE)
2953 free_huge_pages = h->free_huge_pages;
2954 else
2955 free_huge_pages = h->free_huge_pages_node[nid];
2956
2957 return sprintf(buf, "%lu\n", free_huge_pages);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002958}
2959HSTATE_ATTR_RO(free_hugepages);
2960
2961static ssize_t resv_hugepages_show(struct kobject *kobj,
2962 struct kobj_attribute *attr, char *buf)
2963{
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002964 struct hstate *h = kobj_to_hstate(kobj, NULL);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002965 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2966}
2967HSTATE_ATTR_RO(resv_hugepages);
2968
2969static ssize_t surplus_hugepages_show(struct kobject *kobj,
2970 struct kobj_attribute *attr, char *buf)
2971{
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08002972 struct hstate *h;
2973 unsigned long surplus_huge_pages;
2974 int nid;
2975
2976 h = kobj_to_hstate(kobj, &nid);
2977 if (nid == NUMA_NO_NODE)
2978 surplus_huge_pages = h->surplus_huge_pages;
2979 else
2980 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2981
2982 return sprintf(buf, "%lu\n", surplus_huge_pages);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002983}
2984HSTATE_ATTR_RO(surplus_hugepages);
2985
2986static struct attribute *hstate_attrs[] = {
2987 &nr_hugepages_attr.attr,
2988 &nr_overcommit_hugepages_attr.attr,
2989 &free_hugepages_attr.attr,
2990 &resv_hugepages_attr.attr,
2991 &surplus_hugepages_attr.attr,
Lee Schermerhorn06808b02009-12-14 17:58:21 -08002992#ifdef CONFIG_NUMA
2993 &nr_hugepages_mempolicy_attr.attr,
2994#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002995 NULL,
2996};
2997
Arvind Yadav67e5ed92017-09-06 16:22:06 -07002998static const struct attribute_group hstate_attr_group = {
Nishanth Aravamudana3437872008-07-23 21:27:44 -07002999 .attrs = hstate_attrs,
3000};
3001
Jeff Mahoney094e9532010-02-02 13:44:14 -08003002static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3003 struct kobject **hstate_kobjs,
Arvind Yadav67e5ed92017-09-06 16:22:06 -07003004 const struct attribute_group *hstate_attr_group)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003005{
3006 int retval;
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -07003007 int hi = hstate_index(h);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003008
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003009 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3010 if (!hstate_kobjs[hi])
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003011 return -ENOMEM;
3012
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003013 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
Miaohe Lin89b2dbd2021-02-24 12:06:50 -08003014 if (retval) {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003015 kobject_put(hstate_kobjs[hi]);
Miaohe Lin89b2dbd2021-02-24 12:06:50 -08003016 hstate_kobjs[hi] = NULL;
3017 }
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003018
3019 return retval;
3020}
3021
3022static void __init hugetlb_sysfs_init(void)
3023{
3024 struct hstate *h;
3025 int err;
3026
3027 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3028 if (!hugepages_kobj)
3029 return;
3030
3031 for_each_hstate(h) {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003032 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3033 hstate_kobjs, &hstate_attr_group);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003034 if (err)
Mike Kravetz282f4212020-06-03 16:00:46 -07003035 pr_err("HugeTLB: Unable to add hstate %s", h->name);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003036 }
3037}
3038
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003039#ifdef CONFIG_NUMA
3040
3041/*
3042 * node_hstate/s - associate per node hstate attributes, via their kobjects,
Kay Sievers10fbcf42011-12-21 14:48:43 -08003043 * with node devices in node_devices[] using a parallel array. The array
3044 * index of a node device or _hstate == node id.
3045 * This is here to avoid any static dependency of the node device driver, in
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003046 * the base kernel, on the hugetlb module.
3047 */
3048struct node_hstate {
3049 struct kobject *hugepages_kobj;
3050 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3051};
Alexander Kuleshovb4e289a2015-11-05 18:50:14 -08003052static struct node_hstate node_hstates[MAX_NUMNODES];
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003053
3054/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003055 * A subset of global hstate attributes for node devices
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003056 */
3057static struct attribute *per_node_hstate_attrs[] = {
3058 &nr_hugepages_attr.attr,
3059 &free_hugepages_attr.attr,
3060 &surplus_hugepages_attr.attr,
3061 NULL,
3062};
3063
Arvind Yadav67e5ed92017-09-06 16:22:06 -07003064static const struct attribute_group per_node_hstate_attr_group = {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003065 .attrs = per_node_hstate_attrs,
3066};
3067
3068/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003069 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003070 * Returns node id via non-NULL nidp.
3071 */
3072static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3073{
3074 int nid;
3075
3076 for (nid = 0; nid < nr_node_ids; nid++) {
3077 struct node_hstate *nhs = &node_hstates[nid];
3078 int i;
3079 for (i = 0; i < HUGE_MAX_HSTATE; i++)
3080 if (nhs->hstate_kobjs[i] == kobj) {
3081 if (nidp)
3082 *nidp = nid;
3083 return &hstates[i];
3084 }
3085 }
3086
3087 BUG();
3088 return NULL;
3089}
3090
3091/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003092 * Unregister hstate attributes from a single node device.
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003093 * No-op if no hstate attributes attached.
3094 */
Claudiu Ghioc3cd8b442013-03-04 12:46:15 +02003095static void hugetlb_unregister_node(struct node *node)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003096{
3097 struct hstate *h;
Kay Sievers10fbcf42011-12-21 14:48:43 -08003098 struct node_hstate *nhs = &node_hstates[node->dev.id];
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003099
3100 if (!nhs->hugepages_kobj)
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08003101 return; /* no hstate attributes */
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003102
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -07003103 for_each_hstate(h) {
3104 int idx = hstate_index(h);
3105 if (nhs->hstate_kobjs[idx]) {
3106 kobject_put(nhs->hstate_kobjs[idx]);
3107 nhs->hstate_kobjs[idx] = NULL;
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003108 }
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -07003109 }
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003110
3111 kobject_put(nhs->hugepages_kobj);
3112 nhs->hugepages_kobj = NULL;
3113}
3114
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003115
3116/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003117 * Register hstate attributes for a single node device.
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003118 * No-op if attributes already registered.
3119 */
Claudiu Ghioc3cd8b442013-03-04 12:46:15 +02003120static void hugetlb_register_node(struct node *node)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003121{
3122 struct hstate *h;
Kay Sievers10fbcf42011-12-21 14:48:43 -08003123 struct node_hstate *nhs = &node_hstates[node->dev.id];
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003124 int err;
3125
3126 if (nhs->hugepages_kobj)
3127 return; /* already allocated */
3128
3129 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
Kay Sievers10fbcf42011-12-21 14:48:43 -08003130 &node->dev.kobj);
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003131 if (!nhs->hugepages_kobj)
3132 return;
3133
3134 for_each_hstate(h) {
3135 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3136 nhs->hstate_kobjs,
3137 &per_node_hstate_attr_group);
3138 if (err) {
Mike Kravetz282f4212020-06-03 16:00:46 -07003139 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
Andrew Mortonffb22af2013-02-22 16:32:08 -08003140 h->name, node->dev.id);
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003141 hugetlb_unregister_node(node);
3142 break;
3143 }
3144 }
3145}
3146
3147/*
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08003148 * hugetlb init time: register hstate attributes for all registered node
Kay Sievers10fbcf42011-12-21 14:48:43 -08003149 * devices of nodes that have memory. All on-line nodes should have
3150 * registered their associated device by this time.
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003151 */
Luiz Capitulino7d9ca002014-12-12 16:55:24 -08003152static void __init hugetlb_register_all_nodes(void)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003153{
3154 int nid;
3155
Lai Jiangshan8cebfcd2012-12-12 13:51:36 -08003156 for_each_node_state(nid, N_MEMORY) {
Wen Congyang87327942012-12-11 16:00:56 -08003157 struct node *node = node_devices[nid];
Kay Sievers10fbcf42011-12-21 14:48:43 -08003158 if (node->dev.id == nid)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003159 hugetlb_register_node(node);
3160 }
3161
3162 /*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003163 * Let the node device driver know we're here so it can
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003164 * [un]register hstate attributes on node hotplug.
3165 */
3166 register_hugetlbfs_with_node(hugetlb_register_node,
3167 hugetlb_unregister_node);
3168}
3169#else /* !CONFIG_NUMA */
3170
3171static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3172{
3173 BUG();
3174 if (nidp)
3175 *nidp = -1;
3176 return NULL;
3177}
3178
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003179static void hugetlb_register_all_nodes(void) { }
3180
3181#endif
3182
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003183static int __init hugetlb_init(void)
3184{
Davidlohr Bueso8382d912014-04-03 14:47:31 -07003185 int i;
3186
Mike Kravetzc2833a52020-06-03 16:00:50 -07003187 if (!hugepages_supported()) {
3188 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3189 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
Benjamin Herrenschmidt0ef89d22008-07-31 00:07:30 -07003190 return 0;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003191 }
Vaishali Thakkarf8b74812016-02-17 13:11:26 -08003192
Mike Kravetz282f4212020-06-03 16:00:46 -07003193 /*
3194 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
3195 * architectures depend on setup being done here.
3196 */
3197 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3198 if (!parsed_default_hugepagesz) {
3199 /*
3200 * If we did not parse a default huge page size, set
3201 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
3202 * number of huge pages for this default size was implicitly
3203 * specified, set that here as well.
3204 * Note that the implicit setting will overwrite an explicit
3205 * setting. A warning will be printed in this case.
3206 */
3207 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3208 if (default_hstate_max_huge_pages) {
3209 if (default_hstate.max_huge_pages) {
3210 char buf[32];
Andi Kleenaa888a72008-07-23 21:27:47 -07003211
Mike Kravetz282f4212020-06-03 16:00:46 -07003212 string_get_size(huge_page_size(&default_hstate),
3213 1, STRING_UNITS_2, buf, 32);
3214 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3215 default_hstate.max_huge_pages, buf);
3216 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3217 default_hstate_max_huge_pages);
3218 }
3219 default_hstate.max_huge_pages =
3220 default_hstate_max_huge_pages;
3221 }
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003222 }
Andi Kleenaa888a72008-07-23 21:27:47 -07003223
Roman Gushchincf11e852020-04-10 14:32:45 -07003224 hugetlb_cma_check();
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003225 hugetlb_init_hstates();
Andi Kleenaa888a72008-07-23 21:27:47 -07003226 gather_bootmem_prealloc();
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003227 report_hugepages();
3228
3229 hugetlb_sysfs_init();
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003230 hugetlb_register_all_nodes();
Jianguo Wu7179e7b2012-12-18 14:23:19 -08003231 hugetlb_cgroup_file_init();
Lee Schermerhorn9a3052302009-12-14 17:58:25 -08003232
Davidlohr Bueso8382d912014-04-03 14:47:31 -07003233#ifdef CONFIG_SMP
3234 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3235#else
3236 num_fault_mutexes = 1;
3237#endif
Mike Kravetzc672c7f2015-09-08 15:01:35 -07003238 hugetlb_fault_mutex_table =
Kees Cook6da2ec52018-06-12 13:55:00 -07003239 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3240 GFP_KERNEL);
Mike Kravetzc672c7f2015-09-08 15:01:35 -07003241 BUG_ON(!hugetlb_fault_mutex_table);
Davidlohr Bueso8382d912014-04-03 14:47:31 -07003242
3243 for (i = 0; i < num_fault_mutexes; i++)
Mike Kravetzc672c7f2015-09-08 15:01:35 -07003244 mutex_init(&hugetlb_fault_mutex_table[i]);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003245 return 0;
3246}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08003247subsys_initcall(hugetlb_init);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003248
Mike Kravetzae94da82020-06-03 16:00:34 -07003249/* Overwritten by architectures with more huge page sizes */
3250bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
Vaishali Thakkar9fee0212016-05-19 17:11:04 -07003251{
Mike Kravetzae94da82020-06-03 16:00:34 -07003252 return size == HPAGE_SIZE;
Vaishali Thakkar9fee0212016-05-19 17:11:04 -07003253}
3254
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08003255void __init hugetlb_add_hstate(unsigned int order)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003256{
3257 struct hstate *h;
Andi Kleen8faa8b02008-07-23 21:27:48 -07003258 unsigned long i;
3259
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003260 if (size_to_hstate(PAGE_SIZE << order)) {
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003261 return;
3262 }
Aneesh Kumar K.V47d38342012-07-31 16:41:54 -07003263 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003264 BUG_ON(order == 0);
Aneesh Kumar K.V47d38342012-07-31 16:41:54 -07003265 h = &hstates[hugetlb_max_hstate++];
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003266 h->order = order;
3267 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
Andi Kleen8faa8b02008-07-23 21:27:48 -07003268 h->nr_huge_pages = 0;
3269 h->free_huge_pages = 0;
3270 for (i = 0; i < MAX_NUMNODES; ++i)
3271 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -07003272 INIT_LIST_HEAD(&h->hugepage_activelist);
Andrew Morton54f18d32016-05-19 17:11:40 -07003273 h->next_nid_to_alloc = first_memory_node;
3274 h->next_nid_to_free = first_memory_node;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003275 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3276 huge_page_size(h)/1024);
Andi Kleen8faa8b02008-07-23 21:27:48 -07003277
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003278 parsed_hstate = h;
3279}
3280
Mike Kravetz282f4212020-06-03 16:00:46 -07003281/*
3282 * hugepages command line processing
3283 * hugepages normally follows a valid hugepagsz or default_hugepagsz
3284 * specification. If not, ignore the hugepages value. hugepages can also
3285 * be the first huge page command line option in which case it implicitly
3286 * specifies the number of huge pages for the default size.
3287 */
3288static int __init hugepages_setup(char *s)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003289{
3290 unsigned long *mhp;
Andi Kleen8faa8b02008-07-23 21:27:48 -07003291 static unsigned long *last_mhp;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003292
Vaishali Thakkar9fee0212016-05-19 17:11:04 -07003293 if (!parsed_valid_hugepagesz) {
Mike Kravetz282f4212020-06-03 16:00:46 -07003294 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
Vaishali Thakkar9fee0212016-05-19 17:11:04 -07003295 parsed_valid_hugepagesz = true;
Mike Kravetz282f4212020-06-03 16:00:46 -07003296 return 0;
Vaishali Thakkar9fee0212016-05-19 17:11:04 -07003297 }
Mike Kravetz282f4212020-06-03 16:00:46 -07003298
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003299 /*
Mike Kravetz282f4212020-06-03 16:00:46 -07003300 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
3301 * yet, so this hugepages= parameter goes to the "default hstate".
3302 * Otherwise, it goes with the previously parsed hugepagesz or
3303 * default_hugepagesz.
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003304 */
Vaishali Thakkar9fee0212016-05-19 17:11:04 -07003305 else if (!hugetlb_max_hstate)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003306 mhp = &default_hstate_max_huge_pages;
3307 else
3308 mhp = &parsed_hstate->max_huge_pages;
3309
Andi Kleen8faa8b02008-07-23 21:27:48 -07003310 if (mhp == last_mhp) {
Mike Kravetz282f4212020-06-03 16:00:46 -07003311 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3312 return 0;
Andi Kleen8faa8b02008-07-23 21:27:48 -07003313 }
3314
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003315 if (sscanf(s, "%lu", mhp) <= 0)
3316 *mhp = 0;
3317
Andi Kleen8faa8b02008-07-23 21:27:48 -07003318 /*
3319 * Global state is always initialized later in hugetlb_init.
3320 * But we need to allocate >= MAX_ORDER hstates here early to still
3321 * use the bootmem allocator.
3322 */
Aneesh Kumar K.V47d38342012-07-31 16:41:54 -07003323 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
Andi Kleen8faa8b02008-07-23 21:27:48 -07003324 hugetlb_hstate_alloc_pages(parsed_hstate);
3325
3326 last_mhp = mhp;
3327
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003328 return 1;
3329}
Mike Kravetz282f4212020-06-03 16:00:46 -07003330__setup("hugepages=", hugepages_setup);
Nick Piggine11bfbf2008-07-23 21:27:52 -07003331
Mike Kravetz282f4212020-06-03 16:00:46 -07003332/*
3333 * hugepagesz command line processing
3334 * A specific huge page size can only be specified once with hugepagesz.
3335 * hugepagesz is followed by hugepages on the command line. The global
3336 * variable 'parsed_valid_hugepagesz' is used to determine if prior
3337 * hugepagesz argument was valid.
3338 */
Mike Kravetz359f2542020-06-03 16:00:38 -07003339static int __init hugepagesz_setup(char *s)
Nick Piggine11bfbf2008-07-23 21:27:52 -07003340{
Mike Kravetz359f2542020-06-03 16:00:38 -07003341 unsigned long size;
Mike Kravetz282f4212020-06-03 16:00:46 -07003342 struct hstate *h;
3343
3344 parsed_valid_hugepagesz = false;
Mike Kravetz359f2542020-06-03 16:00:38 -07003345 size = (unsigned long)memparse(s, NULL);
3346
3347 if (!arch_hugetlb_valid_size(size)) {
Mike Kravetz282f4212020-06-03 16:00:46 -07003348 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
Mike Kravetz359f2542020-06-03 16:00:38 -07003349 return 0;
3350 }
3351
Mike Kravetz282f4212020-06-03 16:00:46 -07003352 h = size_to_hstate(size);
3353 if (h) {
3354 /*
3355 * hstate for this size already exists. This is normally
3356 * an error, but is allowed if the existing hstate is the
3357 * default hstate. More specifically, it is only allowed if
3358 * the number of huge pages for the default hstate was not
3359 * previously specified.
3360 */
3361 if (!parsed_default_hugepagesz || h != &default_hstate ||
3362 default_hstate.max_huge_pages) {
3363 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3364 return 0;
3365 }
3366
3367 /*
3368 * No need to call hugetlb_add_hstate() as hstate already
3369 * exists. But, do set parsed_hstate so that a following
3370 * hugepages= parameter will be applied to this hstate.
3371 */
3372 parsed_hstate = h;
3373 parsed_valid_hugepagesz = true;
3374 return 1;
Mike Kravetz38237832020-06-03 16:00:42 -07003375 }
3376
Mike Kravetz359f2542020-06-03 16:00:38 -07003377 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
Mike Kravetz282f4212020-06-03 16:00:46 -07003378 parsed_valid_hugepagesz = true;
Nick Piggine11bfbf2008-07-23 21:27:52 -07003379 return 1;
3380}
Mike Kravetz359f2542020-06-03 16:00:38 -07003381__setup("hugepagesz=", hugepagesz_setup);
3382
Mike Kravetz282f4212020-06-03 16:00:46 -07003383/*
3384 * default_hugepagesz command line input
3385 * Only one instance of default_hugepagesz allowed on command line.
3386 */
Mike Kravetzae94da82020-06-03 16:00:34 -07003387static int __init default_hugepagesz_setup(char *s)
Nick Piggine11bfbf2008-07-23 21:27:52 -07003388{
Mike Kravetzae94da82020-06-03 16:00:34 -07003389 unsigned long size;
3390
Mike Kravetz282f4212020-06-03 16:00:46 -07003391 parsed_valid_hugepagesz = false;
Mike Kravetz282f4212020-06-03 16:00:46 -07003392 if (parsed_default_hugepagesz) {
3393 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3394 return 0;
3395 }
3396
3397 size = (unsigned long)memparse(s, NULL);
3398
3399 if (!arch_hugetlb_valid_size(size)) {
3400 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3401 return 0;
3402 }
3403
3404 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3405 parsed_valid_hugepagesz = true;
3406 parsed_default_hugepagesz = true;
3407 default_hstate_idx = hstate_index(size_to_hstate(size));
3408
3409 /*
3410 * The number of default huge pages (for this size) could have been
3411 * specified as the first hugetlb parameter: hugepages=X. If so,
3412 * then default_hstate_max_huge_pages is set. If the default huge
3413 * page size is gigantic (>= MAX_ORDER), then the pages must be
3414 * allocated here from bootmem allocator.
3415 */
3416 if (default_hstate_max_huge_pages) {
3417 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3418 if (hstate_is_gigantic(&default_hstate))
3419 hugetlb_hstate_alloc_pages(&default_hstate);
3420 default_hstate_max_huge_pages = 0;
3421 }
3422
Nick Piggine11bfbf2008-07-23 21:27:52 -07003423 return 1;
3424}
Mike Kravetzae94da82020-06-03 16:00:34 -07003425__setup("default_hugepagesz=", default_hugepagesz_setup);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07003426
Muchun Song8ca39e62020-08-11 18:30:32 -07003427static unsigned int allowed_mems_nr(struct hstate *h)
Nishanth Aravamudan8a213462008-07-25 19:44:37 -07003428{
3429 int node;
3430 unsigned int nr = 0;
Muchun Song8ca39e62020-08-11 18:30:32 -07003431 nodemask_t *mpol_allowed;
3432 unsigned int *array = h->free_huge_pages_node;
3433 gfp_t gfp_mask = htlb_alloc_mask(h);
Nishanth Aravamudan8a213462008-07-25 19:44:37 -07003434
Muchun Song8ca39e62020-08-11 18:30:32 -07003435 mpol_allowed = policy_nodemask_current(gfp_mask);
3436
3437 for_each_node_mask(node, cpuset_current_mems_allowed) {
3438 if (!mpol_allowed ||
3439 (mpol_allowed && node_isset(node, *mpol_allowed)))
3440 nr += array[node];
3441 }
Nishanth Aravamudan8a213462008-07-25 19:44:37 -07003442
3443 return nr;
3444}
3445
3446#ifdef CONFIG_SYSCTL
Muchun Song17743792020-09-04 16:36:13 -07003447static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3448 void *buffer, size_t *length,
3449 loff_t *ppos, unsigned long *out)
3450{
3451 struct ctl_table dup_table;
3452
3453 /*
3454 * In order to avoid races with __do_proc_doulongvec_minmax(), we
3455 * can duplicate the @table and alter the duplicate of it.
3456 */
3457 dup_table = *table;
3458 dup_table.data = out;
3459
3460 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3461}
3462
Lee Schermerhorn06808b02009-12-14 17:58:21 -08003463static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3464 struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003465 void *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466{
Andi Kleene5ff2152008-07-23 21:27:42 -07003467 struct hstate *h = &default_hstate;
David Rientjes238d3c12014-08-06 16:06:51 -07003468 unsigned long tmp = h->max_huge_pages;
Michal Hocko08d4a242011-01-13 15:47:26 -08003469 int ret;
Andi Kleene5ff2152008-07-23 21:27:42 -07003470
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003471 if (!hugepages_supported())
Jan Stancek86613622016-03-09 14:08:35 -08003472 return -EOPNOTSUPP;
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003473
Muchun Song17743792020-09-04 16:36:13 -07003474 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3475 &tmp);
Michal Hocko08d4a242011-01-13 15:47:26 -08003476 if (ret)
3477 goto out;
Andi Kleene5ff2152008-07-23 21:27:42 -07003478
David Rientjes238d3c12014-08-06 16:06:51 -07003479 if (write)
3480 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3481 NUMA_NO_NODE, tmp, *length);
Michal Hocko08d4a242011-01-13 15:47:26 -08003482out:
3483 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484}
Mel Gorman396faf02007-07-17 04:03:13 -07003485
Lee Schermerhorn06808b02009-12-14 17:58:21 -08003486int hugetlb_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003487 void *buffer, size_t *length, loff_t *ppos)
Lee Schermerhorn06808b02009-12-14 17:58:21 -08003488{
3489
3490 return hugetlb_sysctl_handler_common(false, table, write,
3491 buffer, length, ppos);
3492}
3493
3494#ifdef CONFIG_NUMA
3495int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003496 void *buffer, size_t *length, loff_t *ppos)
Lee Schermerhorn06808b02009-12-14 17:58:21 -08003497{
3498 return hugetlb_sysctl_handler_common(true, table, write,
3499 buffer, length, ppos);
3500}
3501#endif /* CONFIG_NUMA */
3502
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08003503int hugetlb_overcommit_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003504 void *buffer, size_t *length, loff_t *ppos)
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08003505{
Andi Kleena5516432008-07-23 21:27:41 -07003506 struct hstate *h = &default_hstate;
Andi Kleene5ff2152008-07-23 21:27:42 -07003507 unsigned long tmp;
Michal Hocko08d4a242011-01-13 15:47:26 -08003508 int ret;
Andi Kleene5ff2152008-07-23 21:27:42 -07003509
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003510 if (!hugepages_supported())
Jan Stancek86613622016-03-09 14:08:35 -08003511 return -EOPNOTSUPP;
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003512
Petr Holasekc033a932011-03-22 16:33:05 -07003513 tmp = h->nr_overcommit_huge_pages;
Andi Kleene5ff2152008-07-23 21:27:42 -07003514
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -07003515 if (write && hstate_is_gigantic(h))
Eric B Munsonadbe8722011-01-13 15:47:27 -08003516 return -EINVAL;
3517
Muchun Song17743792020-09-04 16:36:13 -07003518 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3519 &tmp);
Michal Hocko08d4a242011-01-13 15:47:26 -08003520 if (ret)
3521 goto out;
Andi Kleene5ff2152008-07-23 21:27:42 -07003522
3523 if (write) {
3524 spin_lock(&hugetlb_lock);
3525 h->nr_overcommit_huge_pages = tmp;
3526 spin_unlock(&hugetlb_lock);
3527 }
Michal Hocko08d4a242011-01-13 15:47:26 -08003528out:
3529 return ret;
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08003530}
3531
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532#endif /* CONFIG_SYSCTL */
3533
Alexey Dobriyane1759c22008-10-15 23:50:22 +04003534void hugetlb_report_meminfo(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535{
Roman Gushchinfcb2b0c2018-01-31 16:16:22 -08003536 struct hstate *h;
3537 unsigned long total = 0;
3538
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003539 if (!hugepages_supported())
3540 return;
Roman Gushchinfcb2b0c2018-01-31 16:16:22 -08003541
3542 for_each_hstate(h) {
3543 unsigned long count = h->nr_huge_pages;
3544
3545 total += (PAGE_SIZE << huge_page_order(h)) * count;
3546
3547 if (h == &default_hstate)
3548 seq_printf(m,
3549 "HugePages_Total: %5lu\n"
3550 "HugePages_Free: %5lu\n"
3551 "HugePages_Rsvd: %5lu\n"
3552 "HugePages_Surp: %5lu\n"
3553 "Hugepagesize: %8lu kB\n",
3554 count,
3555 h->free_huge_pages,
3556 h->resv_huge_pages,
3557 h->surplus_huge_pages,
3558 (PAGE_SIZE << huge_page_order(h)) / 1024);
3559 }
3560
3561 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562}
3563
Joe Perches79815932020-09-16 13:40:43 -07003564int hugetlb_report_node_meminfo(char *buf, int len, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565{
Andi Kleena5516432008-07-23 21:27:41 -07003566 struct hstate *h = &default_hstate;
Joe Perches79815932020-09-16 13:40:43 -07003567
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003568 if (!hugepages_supported())
3569 return 0;
Joe Perches79815932020-09-16 13:40:43 -07003570
3571 return sysfs_emit_at(buf, len,
3572 "Node %d HugePages_Total: %5u\n"
3573 "Node %d HugePages_Free: %5u\n"
3574 "Node %d HugePages_Surp: %5u\n",
3575 nid, h->nr_huge_pages_node[nid],
3576 nid, h->free_huge_pages_node[nid],
3577 nid, h->surplus_huge_pages_node[nid]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578}
3579
David Rientjes949f7ec2013-04-29 15:07:48 -07003580void hugetlb_show_meminfo(void)
3581{
3582 struct hstate *h;
3583 int nid;
3584
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07003585 if (!hugepages_supported())
3586 return;
3587
David Rientjes949f7ec2013-04-29 15:07:48 -07003588 for_each_node_state(nid, N_MEMORY)
3589 for_each_hstate(h)
3590 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3591 nid,
3592 h->nr_huge_pages_node[nid],
3593 h->free_huge_pages_node[nid],
3594 h->surplus_huge_pages_node[nid],
3595 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3596}
3597
Naoya Horiguchi5d317b22015-11-05 18:47:14 -08003598void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3599{
3600 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3601 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3602}
3603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3605unsigned long hugetlb_total_pages(void)
3606{
Wanpeng Lid0028582013-03-22 15:04:40 -07003607 struct hstate *h;
3608 unsigned long nr_total_pages = 0;
3609
3610 for_each_hstate(h)
3611 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3612 return nr_total_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614
Andi Kleena5516432008-07-23 21:27:41 -07003615static int hugetlb_acct_memory(struct hstate *h, long delta)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07003616{
3617 int ret = -ENOMEM;
3618
3619 spin_lock(&hugetlb_lock);
3620 /*
3621 * When cpuset is configured, it breaks the strict hugetlb page
3622 * reservation as the accounting is done on a global variable. Such
3623 * reservation is completely rubbish in the presence of cpuset because
3624 * the reservation is not checked against page availability for the
3625 * current cpuset. Application can still potentially OOM'ed by kernel
3626 * with lack of free htlb page in cpuset that the task is in.
3627 * Attempt to enforce strict accounting with cpuset is almost
3628 * impossible (or too ugly) because cpuset is too fluid that
3629 * task or memory node can be dynamically moved between cpusets.
3630 *
3631 * The change of semantics for shared hugetlb mapping with cpuset is
3632 * undesirable. However, in order to preserve some of the semantics,
3633 * we fall back to check against current free page availability as
3634 * a best attempt and hopefully to minimize the impact of changing
3635 * semantics that cpuset has.
Muchun Song8ca39e62020-08-11 18:30:32 -07003636 *
3637 * Apart from cpuset, we also have memory policy mechanism that
3638 * also determines from which node the kernel will allocate memory
3639 * in a NUMA system. So similar to cpuset, we also should consider
3640 * the memory policy of the current task. Similar to the description
3641 * above.
Mel Gormanfc1b8a72008-07-23 21:27:22 -07003642 */
3643 if (delta > 0) {
Andi Kleena5516432008-07-23 21:27:41 -07003644 if (gather_surplus_pages(h, delta) < 0)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07003645 goto out;
3646
Muchun Song8ca39e62020-08-11 18:30:32 -07003647 if (delta > allowed_mems_nr(h)) {
Andi Kleena5516432008-07-23 21:27:41 -07003648 return_unused_surplus_pages(h, delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07003649 goto out;
3650 }
3651 }
3652
3653 ret = 0;
3654 if (delta < 0)
Andi Kleena5516432008-07-23 21:27:41 -07003655 return_unused_surplus_pages(h, (unsigned long) -delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07003656
3657out:
3658 spin_unlock(&hugetlb_lock);
3659 return ret;
3660}
3661
Andy Whitcroft84afd992008-07-23 21:27:32 -07003662static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3663{
Joonsoo Kimf522c3a2013-09-11 14:21:53 -07003664 struct resv_map *resv = vma_resv_map(vma);
Andy Whitcroft84afd992008-07-23 21:27:32 -07003665
3666 /*
3667 * This new VMA should share its siblings reservation map if present.
3668 * The VMA will only ever have a valid reservation map pointer where
3669 * it is being copied for another still existing VMA. As that VMA
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003670 * has a reference to the reservation map it cannot disappear until
Andy Whitcroft84afd992008-07-23 21:27:32 -07003671 * after this open call completes. It is therefore safe to take a
3672 * new reference here without additional locking.
3673 */
Joonsoo Kim4e35f482014-04-03 14:47:30 -07003674 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
Joonsoo Kimf522c3a2013-09-11 14:21:53 -07003675 kref_get(&resv->refs);
Andy Whitcroft84afd992008-07-23 21:27:32 -07003676}
3677
Mel Gormana1e78772008-07-23 21:27:23 -07003678static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3679{
Andi Kleena5516432008-07-23 21:27:41 -07003680 struct hstate *h = hstate_vma(vma);
Joonsoo Kimf522c3a2013-09-11 14:21:53 -07003681 struct resv_map *resv = vma_resv_map(vma);
David Gibson90481622012-03-21 16:34:12 -07003682 struct hugepage_subpool *spool = subpool_vma(vma);
Joonsoo Kim4e35f482014-04-03 14:47:30 -07003683 unsigned long reserve, start, end;
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07003684 long gbl_reserve;
Andy Whitcroft84afd992008-07-23 21:27:32 -07003685
Joonsoo Kim4e35f482014-04-03 14:47:30 -07003686 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3687 return;
Andy Whitcroft84afd992008-07-23 21:27:32 -07003688
Joonsoo Kim4e35f482014-04-03 14:47:30 -07003689 start = vma_hugecache_offset(h, vma, vma->vm_start);
3690 end = vma_hugecache_offset(h, vma, vma->vm_end);
Andy Whitcroft84afd992008-07-23 21:27:32 -07003691
Joonsoo Kim4e35f482014-04-03 14:47:30 -07003692 reserve = (end - start) - region_count(resv, start, end);
Mina Almasrye9fe92a2020-04-01 21:11:21 -07003693 hugetlb_cgroup_uncharge_counter(resv, start, end);
Joonsoo Kim4e35f482014-04-03 14:47:30 -07003694 if (reserve) {
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07003695 /*
3696 * Decrement reserve counts. The global reserve count may be
3697 * adjusted if the subpool has a minimum size.
3698 */
3699 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3700 hugetlb_acct_memory(h, -gbl_reserve);
Andy Whitcroft84afd992008-07-23 21:27:32 -07003701 }
Mina Almasrye9fe92a2020-04-01 21:11:21 -07003702
3703 kref_put(&resv->refs, resv_map_release);
Mel Gormana1e78772008-07-23 21:27:23 -07003704}
3705
Dan Williams31383c62017-11-29 16:10:28 -08003706static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3707{
3708 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3709 return -EINVAL;
3710 return 0;
3711}
3712
Dan Williams05ea8862018-04-05 16:24:25 -07003713static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3714{
3715 struct hstate *hstate = hstate_vma(vma);
3716
3717 return 1UL << huge_page_shift(hstate);
3718}
3719
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720/*
3721 * We cannot handle pagefaults against hugetlb pages at all. They cause
3722 * handle_mm_fault() to try to instantiate regular-sized pages in the
3723 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3724 * this far.
3725 */
Souptick Joarderb3ec9f32018-06-07 17:08:04 -07003726static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
3728 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -07003729 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730}
3731
Jane Chueec36362018-08-02 15:36:05 -07003732/*
3733 * When a new function is introduced to vm_operations_struct and added
3734 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3735 * This is because under System V memory model, mappings created via
3736 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3737 * their original vm_ops are overwritten with shm_vm_ops.
3738 */
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04003739const struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -07003740 .fault = hugetlb_vm_op_fault,
Andy Whitcroft84afd992008-07-23 21:27:32 -07003741 .open = hugetlb_vm_op_open,
Mel Gormana1e78772008-07-23 21:27:23 -07003742 .close = hugetlb_vm_op_close,
Dan Williams31383c62017-11-29 16:10:28 -08003743 .split = hugetlb_vm_op_split,
Dan Williams05ea8862018-04-05 16:24:25 -07003744 .pagesize = hugetlb_vm_op_pagesize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745};
3746
David Gibson1e8f8892006-01-06 00:10:44 -08003747static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3748 int writable)
David Gibson63551ae2005-06-21 17:14:44 -07003749{
3750 pte_t entry;
3751
David Gibson1e8f8892006-01-06 00:10:44 -08003752 if (writable) {
Gerald Schaefer106c9922013-04-29 15:07:23 -07003753 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3754 vma->vm_page_prot)));
David Gibson63551ae2005-06-21 17:14:44 -07003755 } else {
Gerald Schaefer106c9922013-04-29 15:07:23 -07003756 entry = huge_pte_wrprotect(mk_huge_pte(page,
3757 vma->vm_page_prot));
David Gibson63551ae2005-06-21 17:14:44 -07003758 }
3759 entry = pte_mkyoung(entry);
3760 entry = pte_mkhuge(entry);
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -04003761 entry = arch_make_huge_pte(entry, vma, page, writable);
David Gibson63551ae2005-06-21 17:14:44 -07003762
3763 return entry;
3764}
3765
David Gibson1e8f8892006-01-06 00:10:44 -08003766static void set_huge_ptep_writable(struct vm_area_struct *vma,
3767 unsigned long address, pte_t *ptep)
3768{
3769 pte_t entry;
3770
Gerald Schaefer106c9922013-04-29 15:07:23 -07003771 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
Chris Forbes32f84522011-07-25 17:12:14 -07003772 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
Russell King4b3073e2009-12-18 16:40:18 +00003773 update_mmu_cache(vma, address, ptep);
David Gibson1e8f8892006-01-06 00:10:44 -08003774}
3775
Aneesh Kumar K.Vd5ed7442017-07-06 15:38:47 -07003776bool is_hugetlb_entry_migration(pte_t pte)
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003777{
3778 swp_entry_t swp;
3779
3780 if (huge_pte_none(pte) || pte_present(pte))
Aneesh Kumar K.Vd5ed7442017-07-06 15:38:47 -07003781 return false;
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003782 swp = pte_to_swp_entry(pte);
Baoquan Hed79d1762020-10-13 16:56:14 -07003783 if (is_migration_entry(swp))
Aneesh Kumar K.Vd5ed7442017-07-06 15:38:47 -07003784 return true;
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003785 else
Aneesh Kumar K.Vd5ed7442017-07-06 15:38:47 -07003786 return false;
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003787}
3788
Baoquan He3e5c3602020-10-13 16:56:10 -07003789static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003790{
3791 swp_entry_t swp;
3792
3793 if (huge_pte_none(pte) || pte_present(pte))
Baoquan He3e5c3602020-10-13 16:56:10 -07003794 return false;
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003795 swp = pte_to_swp_entry(pte);
Baoquan Hed79d1762020-10-13 16:56:14 -07003796 if (is_hwpoison_entry(swp))
Baoquan He3e5c3602020-10-13 16:56:10 -07003797 return true;
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003798 else
Baoquan He3e5c3602020-10-13 16:56:10 -07003799 return false;
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003800}
David Gibson1e8f8892006-01-06 00:10:44 -08003801
David Gibson63551ae2005-06-21 17:14:44 -07003802int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3803 struct vm_area_struct *vma)
3804{
Mike Kravetz5e415402018-11-16 15:08:04 -08003805 pte_t *src_pte, *dst_pte, entry, dst_entry;
David Gibson63551ae2005-06-21 17:14:44 -07003806 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -07003807 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -08003808 int cow;
Andi Kleena5516432008-07-23 21:27:41 -07003809 struct hstate *h = hstate_vma(vma);
3810 unsigned long sz = huge_page_size(h);
Mike Kravetzc0d03812020-04-01 21:11:05 -07003811 struct address_space *mapping = vma->vm_file->f_mapping;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003812 struct mmu_notifier_range range;
Andreas Sandberge8569dd2014-01-21 15:49:09 -08003813 int ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08003814
3815 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -07003816
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003817 if (cow) {
Jérôme Glisse7269f992019-05-13 17:20:53 -07003818 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07003819 vma->vm_start,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003820 vma->vm_end);
3821 mmu_notifier_invalidate_range_start(&range);
Mike Kravetzc0d03812020-04-01 21:11:05 -07003822 } else {
3823 /*
3824 * For shared mappings i_mmap_rwsem must be held to call
3825 * huge_pte_alloc, otherwise the returned ptep could go
3826 * away if part of a shared pmd and another thread calls
3827 * huge_pmd_unshare.
3828 */
3829 i_mmap_lock_read(mapping);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003830 }
Andreas Sandberge8569dd2014-01-21 15:49:09 -08003831
Andi Kleena5516432008-07-23 21:27:41 -07003832 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08003833 spinlock_t *src_ptl, *dst_ptl;
Punit Agrawal7868a202017-07-06 15:39:42 -07003834 src_pte = huge_pte_offset(src, addr, sz);
Hugh Dickinsc74df322005-10-29 18:16:23 -07003835 if (!src_pte)
3836 continue;
Andi Kleena5516432008-07-23 21:27:41 -07003837 dst_pte = huge_pte_alloc(dst, addr, sz);
Andreas Sandberge8569dd2014-01-21 15:49:09 -08003838 if (!dst_pte) {
3839 ret = -ENOMEM;
3840 break;
3841 }
Larry Woodmanc5c99422008-01-24 05:49:25 -08003842
Mike Kravetz5e415402018-11-16 15:08:04 -08003843 /*
3844 * If the pagetables are shared don't copy or take references.
3845 * dst_pte == src_pte is the common case of src/dest sharing.
3846 *
3847 * However, src could have 'unshared' and dst shares with
3848 * another vma. If dst_pte !none, this implies sharing.
3849 * Check here before taking page table lock, and once again
3850 * after taking the lock below.
3851 */
3852 dst_entry = huge_ptep_get(dst_pte);
3853 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
Larry Woodmanc5c99422008-01-24 05:49:25 -08003854 continue;
3855
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08003856 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3857 src_ptl = huge_pte_lockptr(h, src, src_pte);
3858 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003859 entry = huge_ptep_get(src_pte);
Mike Kravetz5e415402018-11-16 15:08:04 -08003860 dst_entry = huge_ptep_get(dst_pte);
3861 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3862 /*
3863 * Skip if src entry none. Also, skip in the
3864 * unlikely case dst entry !none as this implies
3865 * sharing with another vma.
3866 */
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003867 ;
3868 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3869 is_hugetlb_entry_hwpoisoned(entry))) {
3870 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3871
3872 if (is_write_migration_entry(swp_entry) && cow) {
3873 /*
3874 * COW mappings require pages in both
3875 * parent and child to be set to read.
3876 */
3877 make_migration_entry_read(&swp_entry);
3878 entry = swp_entry_to_pte(swp_entry);
Punit Agrawale5251fd2017-07-06 15:39:50 -07003879 set_huge_swap_pte_at(src, addr, src_pte,
3880 entry, sz);
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003881 }
Punit Agrawale5251fd2017-07-06 15:39:50 -07003882 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
Naoya Horiguchi4a705fe2014-06-23 13:22:03 -07003883 } else {
Joerg Roedel34ee6452014-11-13 13:46:09 +11003884 if (cow) {
Jérôme Glisse0f108512017-11-15 17:34:07 -08003885 /*
3886 * No need to notify as we are downgrading page
3887 * table protection not changing it to point
3888 * to a new page.
3889 *
Mike Rapoportad56b732018-03-21 21:22:47 +02003890 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -08003891 */
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07003892 huge_ptep_set_wrprotect(src, addr, src_pte);
Joerg Roedel34ee6452014-11-13 13:46:09 +11003893 }
Naoya Horiguchi0253d632014-07-23 14:00:19 -07003894 entry = huge_ptep_get(src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -07003895 ptepage = pte_page(entry);
3896 get_page(ptepage);
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08003897 page_dup_rmap(ptepage, true);
Hugh Dickins1c598272005-10-19 21:23:43 -07003898 set_huge_pte_at(dst, addr, dst_pte, entry);
Naoya Horiguchi5d317b22015-11-05 18:47:14 -08003899 hugetlb_count_add(pages_per_huge_page(h), dst);
Hugh Dickins1c598272005-10-19 21:23:43 -07003900 }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08003901 spin_unlock(src_ptl);
3902 spin_unlock(dst_ptl);
David Gibson63551ae2005-06-21 17:14:44 -07003903 }
David Gibson63551ae2005-06-21 17:14:44 -07003904
Andreas Sandberge8569dd2014-01-21 15:49:09 -08003905 if (cow)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003906 mmu_notifier_invalidate_range_end(&range);
Mike Kravetzc0d03812020-04-01 21:11:05 -07003907 else
3908 i_mmap_unlock_read(mapping);
Andreas Sandberge8569dd2014-01-21 15:49:09 -08003909
3910 return ret;
David Gibson63551ae2005-06-21 17:14:44 -07003911}
3912
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07003913void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3914 unsigned long start, unsigned long end,
3915 struct page *ref_page)
David Gibson63551ae2005-06-21 17:14:44 -07003916{
3917 struct mm_struct *mm = vma->vm_mm;
3918 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -07003919 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -07003920 pte_t pte;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08003921 spinlock_t *ptl;
David Gibson63551ae2005-06-21 17:14:44 -07003922 struct page *page;
Andi Kleena5516432008-07-23 21:27:41 -07003923 struct hstate *h = hstate_vma(vma);
3924 unsigned long sz = huge_page_size(h);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003925 struct mmu_notifier_range range;
Andi Kleena5516432008-07-23 21:27:41 -07003926
David Gibson63551ae2005-06-21 17:14:44 -07003927 WARN_ON(!is_vm_hugetlb_page(vma));
Andi Kleena5516432008-07-23 21:27:41 -07003928 BUG_ON(start & ~huge_page_mask(h));
3929 BUG_ON(end & ~huge_page_mask(h));
David Gibson63551ae2005-06-21 17:14:44 -07003930
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -08003931 /*
3932 * This is a hugetlb vma, all the pte entries should point
3933 * to huge page.
3934 */
Peter Zijlstraed6a7932018-08-31 14:46:08 +02003935 tlb_change_page_size(tlb, sz);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07003936 tlb_start_vma(tlb, vma);
Mike Kravetzdff11ab2018-10-05 15:51:33 -07003937
3938 /*
3939 * If sharing possible, alert mmu notifiers of worst case.
3940 */
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07003941 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3942 end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08003943 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3944 mmu_notifier_invalidate_range_start(&range);
Hillf Danton569f48b82014-12-10 15:44:41 -08003945 address = start;
Hillf Danton569f48b82014-12-10 15:44:41 -08003946 for (; address < end; address += sz) {
Punit Agrawal7868a202017-07-06 15:39:42 -07003947 ptep = huge_pte_offset(mm, address, sz);
Adam Litke4c887262005-10-29 18:16:46 -07003948 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -07003949 continue;
3950
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08003951 ptl = huge_pte_lock(h, mm, ptep);
Mike Kravetz34ae2042020-08-11 18:31:38 -07003952 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07003953 spin_unlock(ptl);
Mike Kravetzdff11ab2018-10-05 15:51:33 -07003954 /*
3955 * We just unmapped a page of PMDs by clearing a PUD.
3956 * The caller's TLB flush range should cover this area.
3957 */
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07003958 continue;
3959 }
Chen, Kenneth W39dde652006-12-06 20:32:03 -08003960
Hillf Danton66293262012-03-23 15:01:48 -07003961 pte = huge_ptep_get(ptep);
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07003962 if (huge_pte_none(pte)) {
3963 spin_unlock(ptl);
3964 continue;
3965 }
Hillf Danton66293262012-03-23 15:01:48 -07003966
3967 /*
Naoya Horiguchi9fbc1f62015-02-11 15:25:32 -08003968 * Migrating hugepage or HWPoisoned hugepage is already
3969 * unmapped and its refcount is dropped, so just clear pte here.
Hillf Danton66293262012-03-23 15:01:48 -07003970 */
Naoya Horiguchi9fbc1f62015-02-11 15:25:32 -08003971 if (unlikely(!pte_present(pte))) {
Punit Agrawal9386fac2017-07-06 15:39:46 -07003972 huge_pte_clear(mm, address, ptep, sz);
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07003973 spin_unlock(ptl);
3974 continue;
Naoya Horiguchi8c4894c2012-12-12 13:52:28 -08003975 }
Hillf Danton66293262012-03-23 15:01:48 -07003976
3977 page = pte_page(pte);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07003978 /*
3979 * If a reference page is supplied, it is because a specific
3980 * page is being unmapped, not a range. Ensure the page we
3981 * are about to unmap is the actual page of interest.
3982 */
3983 if (ref_page) {
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07003984 if (page != ref_page) {
3985 spin_unlock(ptl);
3986 continue;
3987 }
Mel Gorman04f2cbe2008-07-23 21:27:25 -07003988 /*
3989 * Mark the VMA as having unmapped its page so that
3990 * future faults in this VMA will fail rather than
3991 * looking like data was lost
3992 */
3993 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3994 }
3995
David Gibsonc7546f82005-08-05 11:59:35 -07003996 pte = huge_ptep_get_and_clear(mm, address, ptep);
Aneesh Kumar K.Vb528e4b2016-12-12 16:42:37 -08003997 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
Gerald Schaefer106c9922013-04-29 15:07:23 -07003998 if (huge_pte_dirty(pte))
Ken Chen6649a382007-02-08 14:20:27 -08003999 set_page_dirty(page);
Hillf Danton9e811302012-03-21 16:34:03 -07004000
Naoya Horiguchi5d317b22015-11-05 18:47:14 -08004001 hugetlb_count_sub(pages_per_huge_page(h), mm);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08004002 page_remove_rmap(page, true);
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07004003
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004004 spin_unlock(ptl);
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -07004005 tlb_remove_page_size(tlb, page, huge_page_size(h));
Aneesh Kumar K.V31d49da2016-07-26 15:24:06 -07004006 /*
4007 * Bail out after unmapping reference page if supplied
4008 */
4009 if (ref_page)
4010 break;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07004011 }
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004012 mmu_notifier_invalidate_range_end(&range);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07004013 tlb_end_vma(tlb, vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014}
David Gibson63551ae2005-06-21 17:14:44 -07004015
Mel Gormand8333522012-07-31 16:46:20 -07004016void __unmap_hugepage_range_final(struct mmu_gather *tlb,
4017 struct vm_area_struct *vma, unsigned long start,
4018 unsigned long end, struct page *ref_page)
4019{
4020 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
4021
4022 /*
4023 * Clear this flag so that x86's huge_pmd_share page_table_shareable
4024 * test will fail on a vma being torn down, and not grab a page table
4025 * on its way out. We're lucky that the flag has such an appropriate
4026 * name, and can in fact be safely cleared here. We could clear it
4027 * before the __unmap_hugepage_range above, but all that's necessary
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -08004028 * is to clear it before releasing the i_mmap_rwsem. This works
Mel Gormand8333522012-07-31 16:46:20 -07004029 * because in the context this is called, the VMA is about to be
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -08004030 * destroyed and the i_mmap_rwsem is held.
Mel Gormand8333522012-07-31 16:46:20 -07004031 */
4032 vma->vm_flags &= ~VM_MAYSHARE;
4033}
4034
Chen, Kenneth W502717f2006-10-11 01:20:46 -07004035void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004036 unsigned long end, struct page *ref_page)
Chen, Kenneth W502717f2006-10-11 01:20:46 -07004037{
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07004038 struct mm_struct *mm;
4039 struct mmu_gather tlb;
Mike Kravetzdff11ab2018-10-05 15:51:33 -07004040 unsigned long tlb_start = start;
4041 unsigned long tlb_end = end;
4042
4043 /*
4044 * If shared PMDs were possibly used within this vma range, adjust
4045 * start/end for worst case tlb flushing.
4046 * Note that we can not be sure if PMDs are shared until we try to
4047 * unmap pages. However, we want to make sure TLB flushing covers
4048 * the largest possible range.
4049 */
4050 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07004051
4052 mm = vma->vm_mm;
4053
Mike Kravetzdff11ab2018-10-05 15:51:33 -07004054 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07004055 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
Mike Kravetzdff11ab2018-10-05 15:51:33 -07004056 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
Chen, Kenneth W502717f2006-10-11 01:20:46 -07004057}
4058
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004059/*
4060 * This is called when the original mapper is failing to COW a MAP_PRIVATE
4061 * mappping it owns the reserve page for. The intention is to unmap the page
4062 * from other VMAs and let the children be SIGKILLed if they are faulting the
4063 * same region.
4064 */
Davidlohr Bueso2f4612a2014-08-06 16:06:45 -07004065static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4066 struct page *page, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004067{
Adam Litke75266742008-11-12 13:24:56 -08004068 struct hstate *h = hstate_vma(vma);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004069 struct vm_area_struct *iter_vma;
4070 struct address_space *mapping;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004071 pgoff_t pgoff;
4072
4073 /*
4074 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
4075 * from page cache lookup which is in HPAGE_SIZE units.
4076 */
Adam Litke75266742008-11-12 13:24:56 -08004077 address = address & huge_page_mask(h);
Michal Hocko36e4f202012-10-08 16:33:31 -07004078 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4079 vma->vm_pgoff;
Al Viro93c76a32015-12-04 23:45:44 -05004080 mapping = vma->vm_file->f_mapping;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004081
Mel Gorman4eb2b1d2009-12-14 17:59:53 -08004082 /*
4083 * Take the mapping lock for the duration of the table walk. As
4084 * this mapping should be shared between all the VMAs,
4085 * __unmap_hugepage_range() is called as the lock is already held
4086 */
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08004087 i_mmap_lock_write(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -07004088 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004089 /* Do not unmap the current VMA */
4090 if (iter_vma == vma)
4091 continue;
4092
4093 /*
Mel Gorman2f84a892015-10-01 15:36:57 -07004094 * Shared VMAs have their own reserves and do not affect
4095 * MAP_PRIVATE accounting but it is possible that a shared
4096 * VMA is using the same page so check and skip such VMAs.
4097 */
4098 if (iter_vma->vm_flags & VM_MAYSHARE)
4099 continue;
4100
4101 /*
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004102 * Unmap the page from other VMAs without their own reserves.
4103 * They get marked to be SIGKILLed if they fault in these
4104 * areas. This is because a future no-page fault on this VMA
4105 * could insert a zeroed page instead of the data existing
4106 * from the time of fork. This would look like data corruption
4107 */
4108 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -07004109 unmap_hugepage_range(iter_vma, address,
4110 address + huge_page_size(h), page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004111 }
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08004112 i_mmap_unlock_write(mapping);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004113}
4114
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004115/*
4116 * Hugetlb_cow() should be called with page lock of the original hugepage held.
Michal Hockoef009b22012-01-10 15:07:21 -08004117 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
4118 * cannot race with other handlers or page migration.
4119 * Keep the pte_same checks anyway to make transition from the mutex easier.
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004120 */
Souptick Joarder2b740302018-08-23 17:01:36 -07004121static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
Huang Ying974e6d62018-08-17 15:45:57 -07004122 unsigned long address, pte_t *ptep,
Aneesh Kumar K.V3999f522016-12-12 16:41:56 -08004123 struct page *pagecache_page, spinlock_t *ptl)
David Gibson1e8f8892006-01-06 00:10:44 -08004124{
Aneesh Kumar K.V3999f522016-12-12 16:41:56 -08004125 pte_t pte;
Andi Kleena5516432008-07-23 21:27:41 -07004126 struct hstate *h = hstate_vma(vma);
David Gibson1e8f8892006-01-06 00:10:44 -08004127 struct page *old_page, *new_page;
Souptick Joarder2b740302018-08-23 17:01:36 -07004128 int outside_reserve = 0;
4129 vm_fault_t ret = 0;
Huang Ying974e6d62018-08-17 15:45:57 -07004130 unsigned long haddr = address & huge_page_mask(h);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004131 struct mmu_notifier_range range;
David Gibson1e8f8892006-01-06 00:10:44 -08004132
Aneesh Kumar K.V3999f522016-12-12 16:41:56 -08004133 pte = huge_ptep_get(ptep);
David Gibson1e8f8892006-01-06 00:10:44 -08004134 old_page = pte_page(pte);
4135
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004136retry_avoidcopy:
David Gibson1e8f8892006-01-06 00:10:44 -08004137 /* If no-one else is actually using this page, avoid the copy
4138 * and just make the page writable */
Joonsoo Kim37a21402013-09-11 14:21:04 -07004139 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
Hugh Dickins5a499732016-07-14 12:07:38 -07004140 page_move_anon_rmap(old_page, vma);
Huang Ying5b7a1d42018-08-17 15:45:53 -07004141 set_huge_ptep_writable(vma, haddr, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -07004142 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08004143 }
4144
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004145 /*
4146 * If the process that created a MAP_PRIVATE mapping is about to
4147 * perform a COW due to a shared page count, attempt to satisfy
4148 * the allocation without using the existing reserves. The pagecache
4149 * page is used to determine if the reserve at this address was
4150 * consumed or not. If reserves were used, a partial faulted mapping
4151 * at the time of fork() could consume its reserves on COW instead
4152 * of the full address range.
4153 */
Joonsoo Kim5944d012013-09-11 14:21:55 -07004154 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004155 old_page != pagecache_page)
4156 outside_reserve = 1;
4157
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004158 get_page(old_page);
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08004159
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004160 /*
4161 * Drop page table lock as buddy allocator may be called. It will
4162 * be acquired again before returning to the caller, as expected.
4163 */
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004164 spin_unlock(ptl);
Huang Ying5b7a1d42018-08-17 15:45:53 -07004165 new_page = alloc_huge_page(vma, haddr, outside_reserve);
David Gibson1e8f8892006-01-06 00:10:44 -08004166
Adam Litke2fc39ce2007-11-14 16:59:39 -08004167 if (IS_ERR(new_page)) {
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004168 /*
4169 * If a process owning a MAP_PRIVATE mapping fails to COW,
4170 * it is due to references held by a child and an insufficient
4171 * huge page pool. To guarantee the original mappers
4172 * reliability, unmap the page from child processes. The child
4173 * may get SIGKILLed if it later faults.
4174 */
4175 if (outside_reserve) {
Mike Kravetzdf73c802020-12-29 15:14:25 -08004176 struct address_space *mapping = vma->vm_file->f_mapping;
4177 pgoff_t idx;
4178 u32 hash;
4179
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004180 put_page(old_page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004181 BUG_ON(huge_pte_none(pte));
Mike Kravetzdf73c802020-12-29 15:14:25 -08004182 /*
4183 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
4184 * unmapping. unmapping needs to hold i_mmap_rwsem
4185 * in write mode. Dropping i_mmap_rwsem in read mode
4186 * here is OK as COW mappings do not interact with
4187 * PMD sharing.
4188 *
4189 * Reacquire both after unmap operation.
4190 */
4191 idx = vma_hugecache_offset(h, vma, haddr);
4192 hash = hugetlb_fault_mutex_hash(mapping, idx);
4193 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4194 i_mmap_unlock_read(mapping);
4195
Huang Ying5b7a1d42018-08-17 15:45:53 -07004196 unmap_ref_private(mm, vma, old_page, haddr);
Mike Kravetzdf73c802020-12-29 15:14:25 -08004197
4198 i_mmap_lock_read(mapping);
4199 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Davidlohr Bueso2f4612a2014-08-06 16:06:45 -07004200 spin_lock(ptl);
Huang Ying5b7a1d42018-08-17 15:45:53 -07004201 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
Davidlohr Bueso2f4612a2014-08-06 16:06:45 -07004202 if (likely(ptep &&
4203 pte_same(huge_ptep_get(ptep), pte)))
4204 goto retry_avoidcopy;
4205 /*
4206 * race occurs while re-acquiring page table
4207 * lock, and our job is done.
4208 */
4209 return 0;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004210 }
4211
Souptick Joarder2b740302018-08-23 17:01:36 -07004212 ret = vmf_error(PTR_ERR(new_page));
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004213 goto out_release_old;
David Gibson1e8f8892006-01-06 00:10:44 -08004214 }
4215
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004216 /*
4217 * When the original hugepage is shared one, it does not have
4218 * anon_vma prepared.
4219 */
Dean Nelson44e2aa92010-10-26 14:22:08 -07004220 if (unlikely(anon_vma_prepare(vma))) {
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004221 ret = VM_FAULT_OOM;
4222 goto out_release_all;
Dean Nelson44e2aa92010-10-26 14:22:08 -07004223 }
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004224
Huang Ying974e6d62018-08-17 15:45:57 -07004225 copy_user_huge_page(new_page, old_page, address, vma,
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08004226 pages_per_huge_page(h));
Nick Piggin0ed361d2008-02-04 22:29:34 -08004227 __SetPageUptodate(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08004228
Jérôme Glisse7269f992019-05-13 17:20:53 -07004229 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07004230 haddr + huge_page_size(h));
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004231 mmu_notifier_invalidate_range_start(&range);
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004232
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08004233 /*
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004234 * Retake the page table lock to check for racing updates
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08004235 * before the page tables are altered
4236 */
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004237 spin_lock(ptl);
Huang Ying5b7a1d42018-08-17 15:45:53 -07004238 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
Naoya Horiguchia9af0c52014-04-07 15:36:54 -07004239 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
Joonsoo Kim07443a82013-09-11 14:21:58 -07004240 ClearPagePrivate(new_page);
4241
David Gibson1e8f8892006-01-06 00:10:44 -08004242 /* Break COW */
Huang Ying5b7a1d42018-08-17 15:45:53 -07004243 huge_ptep_clear_flush(vma, haddr, ptep);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004244 mmu_notifier_invalidate_range(mm, range.start, range.end);
Huang Ying5b7a1d42018-08-17 15:45:53 -07004245 set_huge_pte_at(mm, haddr, ptep,
David Gibson1e8f8892006-01-06 00:10:44 -08004246 make_huge_pte(vma, new_page, 1));
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08004247 page_remove_rmap(old_page, true);
Huang Ying5b7a1d42018-08-17 15:45:53 -07004248 hugepage_add_new_anon_rmap(new_page, vma, haddr);
Mike Kravetzcb6acd02019-02-28 16:22:02 -08004249 set_page_huge_active(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08004250 /* Make the old page be freed below */
4251 new_page = old_page;
4252 }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004253 spin_unlock(ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08004254 mmu_notifier_invalidate_range_end(&range);
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004255out_release_all:
Huang Ying5b7a1d42018-08-17 15:45:53 -07004256 restore_reserve_on_error(h, vma, haddr, new_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004257 put_page(new_page);
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004258out_release_old:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004259 put_page(old_page);
Joonsoo Kim83120342013-09-11 14:21:57 -07004260
Davidlohr Buesoad4404a2014-08-06 16:06:47 -07004261 spin_lock(ptl); /* Caller expects lock to be held */
4262 return ret;
David Gibson1e8f8892006-01-06 00:10:44 -08004263}
4264
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004265/* Return the pagecache page at a given address within a VMA */
Andi Kleena5516432008-07-23 21:27:41 -07004266static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4267 struct vm_area_struct *vma, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004268{
4269 struct address_space *mapping;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07004270 pgoff_t idx;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004271
4272 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07004273 idx = vma_hugecache_offset(h, vma, address);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004274
4275 return find_lock_page(mapping, idx);
4276}
4277
Hugh Dickins3ae77f42009-09-21 17:03:33 -07004278/*
4279 * Return whether there is a pagecache page to back given address within VMA.
4280 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
4281 */
4282static bool hugetlbfs_pagecache_present(struct hstate *h,
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004283 struct vm_area_struct *vma, unsigned long address)
4284{
4285 struct address_space *mapping;
4286 pgoff_t idx;
4287 struct page *page;
4288
4289 mapping = vma->vm_file->f_mapping;
4290 idx = vma_hugecache_offset(h, vma, address);
4291
4292 page = find_get_page(mapping, idx);
4293 if (page)
4294 put_page(page);
4295 return page != NULL;
4296}
4297
Mike Kravetzab76ad52015-09-08 15:01:50 -07004298int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4299 pgoff_t idx)
4300{
4301 struct inode *inode = mapping->host;
4302 struct hstate *h = hstate_inode(inode);
4303 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4304
4305 if (err)
4306 return err;
4307 ClearPagePrivate(page);
4308
Mike Kravetz22146c32018-10-26 15:10:58 -07004309 /*
4310 * set page dirty so that it will not be removed from cache/file
4311 * by non-hugetlbfs specific code paths.
4312 */
4313 set_page_dirty(page);
4314
Mike Kravetzab76ad52015-09-08 15:01:50 -07004315 spin_lock(&inode->i_lock);
4316 inode->i_blocks += blocks_per_huge_page(h);
4317 spin_unlock(&inode->i_lock);
4318 return 0;
4319}
4320
Souptick Joarder2b740302018-08-23 17:01:36 -07004321static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4322 struct vm_area_struct *vma,
4323 struct address_space *mapping, pgoff_t idx,
4324 unsigned long address, pte_t *ptep, unsigned int flags)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01004325{
Andi Kleena5516432008-07-23 21:27:41 -07004326 struct hstate *h = hstate_vma(vma);
Souptick Joarder2b740302018-08-23 17:01:36 -07004327 vm_fault_t ret = VM_FAULT_SIGBUS;
Hillf Danton409eb8c2012-01-20 14:34:13 -08004328 int anon_rmap = 0;
Adam Litke4c887262005-10-29 18:16:46 -07004329 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -07004330 struct page *page;
David Gibson1e8f8892006-01-06 00:10:44 -08004331 pte_t new_pte;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004332 spinlock_t *ptl;
Huang Ying285b8dc2018-06-07 17:08:08 -07004333 unsigned long haddr = address & huge_page_mask(h);
Mike Kravetzcb6acd02019-02-28 16:22:02 -08004334 bool new_page = false;
Adam Litke4c887262005-10-29 18:16:46 -07004335
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004336 /*
4337 * Currently, we are forced to kill the process in the event the
4338 * original mapper has unmapped pages from the child due to a failed
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004339 * COW. Warn that such a situation has occurred as it may not be obvious
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004340 */
4341 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
Geoffrey Thomas910154d2016-03-09 14:08:04 -08004342 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
Andrew Mortonffb22af2013-02-22 16:32:08 -08004343 current->pid);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07004344 return ret;
4345 }
4346
Adam Litke4c887262005-10-29 18:16:46 -07004347 /*
Mike Kravetz87bf91d2020-04-01 21:11:08 -07004348 * We can not race with truncation due to holding i_mmap_rwsem.
4349 * i_size is modified when holding i_mmap_rwsem, so check here
4350 * once for faults beyond end of file.
Adam Litke4c887262005-10-29 18:16:46 -07004351 */
Mike Kravetz87bf91d2020-04-01 21:11:08 -07004352 size = i_size_read(mapping->host) >> huge_page_shift(h);
4353 if (idx >= size)
4354 goto out;
4355
Christoph Lameter6bda6662006-01-06 00:10:49 -08004356retry:
4357 page = find_lock_page(mapping, idx);
4358 if (!page) {
Mike Kravetz1a1aad82017-02-22 15:43:01 -08004359 /*
4360 * Check for page in userfault range
4361 */
4362 if (userfaultfd_missing(vma)) {
4363 u32 hash;
4364 struct vm_fault vmf = {
4365 .vma = vma,
Huang Ying285b8dc2018-06-07 17:08:08 -07004366 .address = haddr,
Mike Kravetz1a1aad82017-02-22 15:43:01 -08004367 .flags = flags,
4368 /*
4369 * Hard to debug if it ends up being
4370 * used by a callee that assumes
4371 * something about the other
4372 * uninitialized fields... same as in
4373 * memory.c
4374 */
4375 };
4376
4377 /*
Mike Kravetzc0d03812020-04-01 21:11:05 -07004378 * hugetlb_fault_mutex and i_mmap_rwsem must be
4379 * dropped before handling userfault. Reacquire
4380 * after handling fault to make calling code simpler.
Mike Kravetz1a1aad82017-02-22 15:43:01 -08004381 */
Wei Yang188b04a2019-11-30 17:57:02 -08004382 hash = hugetlb_fault_mutex_hash(mapping, idx);
Mike Kravetz1a1aad82017-02-22 15:43:01 -08004383 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetzc0d03812020-04-01 21:11:05 -07004384 i_mmap_unlock_read(mapping);
Mike Kravetz1a1aad82017-02-22 15:43:01 -08004385 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
Mike Kravetzc0d03812020-04-01 21:11:05 -07004386 i_mmap_lock_read(mapping);
Mike Kravetz1a1aad82017-02-22 15:43:01 -08004387 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4388 goto out;
4389 }
4390
Huang Ying285b8dc2018-06-07 17:08:08 -07004391 page = alloc_huge_page(vma, haddr, 0);
Adam Litke2fc39ce2007-11-14 16:59:39 -08004392 if (IS_ERR(page)) {
Mike Kravetz4643d672019-08-13 15:38:00 -07004393 /*
4394 * Returning error will result in faulting task being
4395 * sent SIGBUS. The hugetlb fault mutex prevents two
4396 * tasks from racing to fault in the same page which
4397 * could result in false unable to allocate errors.
4398 * Page migration does not take the fault mutex, but
4399 * does a clear then write of pte's under page table
4400 * lock. Page fault code could race with migration,
4401 * notice the clear pte and try to allocate a page
4402 * here. Before returning error, get ptl and make
4403 * sure there really is no pte entry.
4404 */
4405 ptl = huge_pte_lock(h, mm, ptep);
4406 if (!huge_pte_none(huge_ptep_get(ptep))) {
4407 ret = 0;
4408 spin_unlock(ptl);
4409 goto out;
4410 }
4411 spin_unlock(ptl);
Souptick Joarder2b740302018-08-23 17:01:36 -07004412 ret = vmf_error(PTR_ERR(page));
Christoph Lameter6bda6662006-01-06 00:10:49 -08004413 goto out;
4414 }
Andrea Arcangeli47ad8472011-01-13 15:46:47 -08004415 clear_huge_page(page, address, pages_per_huge_page(h));
Nick Piggin0ed361d2008-02-04 22:29:34 -08004416 __SetPageUptodate(page);
Mike Kravetzcb6acd02019-02-28 16:22:02 -08004417 new_page = true;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01004418
Mel Gormanf83a2752009-05-28 14:34:40 -07004419 if (vma->vm_flags & VM_MAYSHARE) {
Mike Kravetzab76ad52015-09-08 15:01:50 -07004420 int err = huge_add_to_page_cache(page, mapping, idx);
Christoph Lameter6bda6662006-01-06 00:10:49 -08004421 if (err) {
4422 put_page(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08004423 if (err == -EEXIST)
4424 goto retry;
4425 goto out;
4426 }
Mel Gorman23be7462010-04-23 13:17:56 -04004427 } else {
Christoph Lameter6bda6662006-01-06 00:10:49 -08004428 lock_page(page);
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004429 if (unlikely(anon_vma_prepare(vma))) {
4430 ret = VM_FAULT_OOM;
4431 goto backout_unlocked;
4432 }
Hillf Danton409eb8c2012-01-20 14:34:13 -08004433 anon_rmap = 1;
Mel Gorman23be7462010-04-23 13:17:56 -04004434 }
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004435 } else {
Naoya Horiguchi998b4382010-09-08 10:19:32 +09004436 /*
4437 * If memory error occurs between mmap() and fault, some process
4438 * don't have hwpoisoned swap entry for errored virtual address.
4439 * So we need to block hugepage fault by PG_hwpoison bit check.
4440 */
4441 if (unlikely(PageHWPoison(page))) {
Miaohe Lind3e43af2021-01-12 15:49:24 -08004442 ret = VM_FAULT_HWPOISON_LARGE |
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -07004443 VM_FAULT_SET_HINDEX(hstate_index(h));
Naoya Horiguchi998b4382010-09-08 10:19:32 +09004444 goto backout_unlocked;
4445 }
Christoph Lameter6bda6662006-01-06 00:10:49 -08004446 }
David Gibson1e8f8892006-01-06 00:10:44 -08004447
Andy Whitcroft57303d82008-08-12 15:08:47 -07004448 /*
4449 * If we are going to COW a private mapping later, we examine the
4450 * pending reservations for this page now. This will ensure that
4451 * any allocations necessary to record that reservation occur outside
4452 * the spinlock.
4453 */
Mike Kravetz5e911372015-09-08 15:01:28 -07004454 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
Huang Ying285b8dc2018-06-07 17:08:08 -07004455 if (vma_needs_reservation(h, vma, haddr) < 0) {
Andy Whitcroft2b267362008-08-12 15:08:49 -07004456 ret = VM_FAULT_OOM;
4457 goto backout_unlocked;
4458 }
Mike Kravetz5e911372015-09-08 15:01:28 -07004459 /* Just decrements count, does not deallocate */
Huang Ying285b8dc2018-06-07 17:08:08 -07004460 vma_end_reservation(h, vma, haddr);
Mike Kravetz5e911372015-09-08 15:01:28 -07004461 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07004462
Aneesh Kumar K.V8bea8052016-12-12 16:41:59 -08004463 ptl = huge_pte_lock(h, mm, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -07004464 ret = 0;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07004465 if (!huge_pte_none(huge_ptep_get(ptep)))
Adam Litke4c887262005-10-29 18:16:46 -07004466 goto backout;
4467
Joonsoo Kim07443a82013-09-11 14:21:58 -07004468 if (anon_rmap) {
4469 ClearPagePrivate(page);
Huang Ying285b8dc2018-06-07 17:08:08 -07004470 hugepage_add_new_anon_rmap(page, vma, haddr);
Choi Gi-yongac714902014-04-07 15:37:36 -07004471 } else
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08004472 page_dup_rmap(page, true);
David Gibson1e8f8892006-01-06 00:10:44 -08004473 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4474 && (vma->vm_flags & VM_SHARED)));
Huang Ying285b8dc2018-06-07 17:08:08 -07004475 set_huge_pte_at(mm, haddr, ptep, new_pte);
David Gibson1e8f8892006-01-06 00:10:44 -08004476
Naoya Horiguchi5d317b22015-11-05 18:47:14 -08004477 hugetlb_count_add(pages_per_huge_page(h), mm);
Hugh Dickins788c7df2009-06-23 13:49:05 +01004478 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
David Gibson1e8f8892006-01-06 00:10:44 -08004479 /* Optimization, do the COW without a second fault */
Huang Ying974e6d62018-08-17 15:45:57 -07004480 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
David Gibson1e8f8892006-01-06 00:10:44 -08004481 }
4482
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004483 spin_unlock(ptl);
Mike Kravetzcb6acd02019-02-28 16:22:02 -08004484
4485 /*
4486 * Only make newly allocated pages active. Existing pages found
4487 * in the pagecache could be !page_huge_active() if they have been
4488 * isolated for migration.
4489 */
4490 if (new_page)
4491 set_page_huge_active(page);
4492
Adam Litke4c887262005-10-29 18:16:46 -07004493 unlock_page(page);
4494out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01004495 return ret;
Adam Litke4c887262005-10-29 18:16:46 -07004496
4497backout:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004498 spin_unlock(ptl);
Andy Whitcroft2b267362008-08-12 15:08:49 -07004499backout_unlocked:
Adam Litke4c887262005-10-29 18:16:46 -07004500 unlock_page(page);
Huang Ying285b8dc2018-06-07 17:08:08 -07004501 restore_reserve_on_error(h, vma, haddr, page);
Adam Litke4c887262005-10-29 18:16:46 -07004502 put_page(page);
4503 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01004504}
4505
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004506#ifdef CONFIG_SMP
Wei Yang188b04a2019-11-30 17:57:02 -08004507u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004508{
4509 unsigned long key[2];
4510 u32 hash;
4511
Mike Kravetz1b426ba2019-05-13 17:19:41 -07004512 key[0] = (unsigned long) mapping;
4513 key[1] = idx;
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004514
Mike Kravetz55254632019-11-30 17:56:30 -08004515 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004516
4517 return hash & (num_fault_mutexes - 1);
4518}
4519#else
4520/*
4521 * For uniprocesor systems we always use a single mutex, so just
4522 * return 0 and avoid the hashing overhead.
4523 */
Wei Yang188b04a2019-11-30 17:57:02 -08004524u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004525{
4526 return 0;
4527}
4528#endif
4529
Souptick Joarder2b740302018-08-23 17:01:36 -07004530vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +01004531 unsigned long address, unsigned int flags)
Adam Litke86e52162006-01-06 00:10:43 -08004532{
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004533 pte_t *ptep, entry;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004534 spinlock_t *ptl;
Souptick Joarder2b740302018-08-23 17:01:36 -07004535 vm_fault_t ret;
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004536 u32 hash;
4537 pgoff_t idx;
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004538 struct page *page = NULL;
Andy Whitcroft57303d82008-08-12 15:08:47 -07004539 struct page *pagecache_page = NULL;
Andi Kleena5516432008-07-23 21:27:41 -07004540 struct hstate *h = hstate_vma(vma);
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004541 struct address_space *mapping;
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004542 int need_wait_lock = 0;
Huang Ying285b8dc2018-06-07 17:08:08 -07004543 unsigned long haddr = address & huge_page_mask(h);
Adam Litke86e52162006-01-06 00:10:43 -08004544
Huang Ying285b8dc2018-06-07 17:08:08 -07004545 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
Naoya Horiguchifd6a03e2010-05-28 09:29:21 +09004546 if (ptep) {
Mike Kravetzc0d03812020-04-01 21:11:05 -07004547 /*
4548 * Since we hold no locks, ptep could be stale. That is
4549 * OK as we are only making decisions based on content and
4550 * not actually modifying content here.
4551 */
Naoya Horiguchifd6a03e2010-05-28 09:29:21 +09004552 entry = huge_ptep_get(ptep);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09004553 if (unlikely(is_hugetlb_entry_migration(entry))) {
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004554 migration_entry_wait_huge(vma, mm, ptep);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09004555 return 0;
4556 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
Chris Forbes32f84522011-07-25 17:12:14 -07004557 return VM_FAULT_HWPOISON_LARGE |
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -07004558 VM_FAULT_SET_HINDEX(hstate_index(h));
Naoya Horiguchifd6a03e2010-05-28 09:29:21 +09004559 }
4560
Mike Kravetzc0d03812020-04-01 21:11:05 -07004561 /*
4562 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
Mike Kravetz87bf91d2020-04-01 21:11:08 -07004563 * until finished with ptep. This serves two purposes:
4564 * 1) It prevents huge_pmd_unshare from being called elsewhere
4565 * and making the ptep no longer valid.
4566 * 2) It synchronizes us with i_size modifications during truncation.
Mike Kravetzc0d03812020-04-01 21:11:05 -07004567 *
4568 * ptep could have already be assigned via huge_pte_offset. That
4569 * is OK, as huge_pte_alloc will return the same value unless
4570 * something has changed.
4571 */
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004572 mapping = vma->vm_file->f_mapping;
Mike Kravetzc0d03812020-04-01 21:11:05 -07004573 i_mmap_lock_read(mapping);
4574 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4575 if (!ptep) {
4576 i_mmap_unlock_read(mapping);
4577 return VM_FAULT_OOM;
4578 }
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004579
David Gibson3935baa2006-03-22 00:08:53 -08004580 /*
4581 * Serialize hugepage allocation and instantiation, so that we don't
4582 * get spurious allocation failures if two CPUs race to instantiate
4583 * the same page in the page cache.
4584 */
Mike Kravetzc0d03812020-04-01 21:11:05 -07004585 idx = vma_hugecache_offset(h, vma, haddr);
Wei Yang188b04a2019-11-30 17:57:02 -08004586 hash = hugetlb_fault_mutex_hash(mapping, idx);
Mike Kravetzc672c7f2015-09-08 15:01:35 -07004587 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004588
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07004589 entry = huge_ptep_get(ptep);
4590 if (huge_pte_none(entry)) {
Davidlohr Bueso8382d912014-04-03 14:47:31 -07004591 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
David Gibsonb4d1d992008-10-15 22:01:11 -07004592 goto out_mutex;
David Gibson3935baa2006-03-22 00:08:53 -08004593 }
Adam Litke86e52162006-01-06 00:10:43 -08004594
Nick Piggin83c54072007-07-19 01:47:05 -07004595 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08004596
Andy Whitcroft57303d82008-08-12 15:08:47 -07004597 /*
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004598 * entry could be a migration/hwpoison entry at this point, so this
4599 * check prevents the kernel from going below assuming that we have
Ethon Paul7c8de352020-06-04 16:49:07 -07004600 * an active hugepage in pagecache. This goto expects the 2nd page
4601 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
4602 * properly handle it.
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004603 */
4604 if (!pte_present(entry))
4605 goto out_mutex;
4606
4607 /*
Andy Whitcroft57303d82008-08-12 15:08:47 -07004608 * If we are going to COW the mapping later, we examine the pending
4609 * reservations for this page now. This will ensure that any
4610 * allocations necessary to record that reservation occur outside the
4611 * spinlock. For private mappings, we also lookup the pagecache
4612 * page now as it is used to determine if a reservation has been
4613 * consumed.
4614 */
Gerald Schaefer106c9922013-04-29 15:07:23 -07004615 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
Huang Ying285b8dc2018-06-07 17:08:08 -07004616 if (vma_needs_reservation(h, vma, haddr) < 0) {
Andy Whitcroft2b267362008-08-12 15:08:49 -07004617 ret = VM_FAULT_OOM;
David Gibsonb4d1d992008-10-15 22:01:11 -07004618 goto out_mutex;
Andy Whitcroft2b267362008-08-12 15:08:49 -07004619 }
Mike Kravetz5e911372015-09-08 15:01:28 -07004620 /* Just decrements count, does not deallocate */
Huang Ying285b8dc2018-06-07 17:08:08 -07004621 vma_end_reservation(h, vma, haddr);
Andy Whitcroft57303d82008-08-12 15:08:47 -07004622
Mel Gormanf83a2752009-05-28 14:34:40 -07004623 if (!(vma->vm_flags & VM_MAYSHARE))
Andy Whitcroft57303d82008-08-12 15:08:47 -07004624 pagecache_page = hugetlbfs_pagecache_page(h,
Huang Ying285b8dc2018-06-07 17:08:08 -07004625 vma, haddr);
Andy Whitcroft57303d82008-08-12 15:08:47 -07004626 }
4627
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004628 ptl = huge_pte_lock(h, mm, ptep);
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +09004629
David Gibson1e8f8892006-01-06 00:10:44 -08004630 /* Check for a racing update before calling hugetlb_cow */
David Gibsonb4d1d992008-10-15 22:01:11 -07004631 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004632 goto out_ptl;
David Gibsonb4d1d992008-10-15 22:01:11 -07004633
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004634 /*
4635 * hugetlb_cow() requires page locks of pte_page(entry) and
4636 * pagecache_page, so here we need take the former one
4637 * when page != pagecache_page or !pagecache_page.
4638 */
4639 page = pte_page(entry);
4640 if (page != pagecache_page)
4641 if (!trylock_page(page)) {
4642 need_wait_lock = 1;
4643 goto out_ptl;
4644 }
4645
4646 get_page(page);
David Gibsonb4d1d992008-10-15 22:01:11 -07004647
Hugh Dickins788c7df2009-06-23 13:49:05 +01004648 if (flags & FAULT_FLAG_WRITE) {
Gerald Schaefer106c9922013-04-29 15:07:23 -07004649 if (!huge_pte_write(entry)) {
Huang Ying974e6d62018-08-17 15:45:57 -07004650 ret = hugetlb_cow(mm, vma, address, ptep,
Aneesh Kumar K.V3999f522016-12-12 16:41:56 -08004651 pagecache_page, ptl);
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004652 goto out_put_page;
David Gibsonb4d1d992008-10-15 22:01:11 -07004653 }
Gerald Schaefer106c9922013-04-29 15:07:23 -07004654 entry = huge_pte_mkdirty(entry);
David Gibsonb4d1d992008-10-15 22:01:11 -07004655 }
4656 entry = pte_mkyoung(entry);
Huang Ying285b8dc2018-06-07 17:08:08 -07004657 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
Hugh Dickins788c7df2009-06-23 13:49:05 +01004658 flags & FAULT_FLAG_WRITE))
Huang Ying285b8dc2018-06-07 17:08:08 -07004659 update_mmu_cache(vma, haddr, ptep);
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004660out_put_page:
4661 if (page != pagecache_page)
4662 unlock_page(page);
4663 put_page(page);
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004664out_ptl:
4665 spin_unlock(ptl);
Andy Whitcroft57303d82008-08-12 15:08:47 -07004666
4667 if (pagecache_page) {
4668 unlock_page(pagecache_page);
4669 put_page(pagecache_page);
4670 }
David Gibsonb4d1d992008-10-15 22:01:11 -07004671out_mutex:
Mike Kravetzc672c7f2015-09-08 15:01:35 -07004672 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetzc0d03812020-04-01 21:11:05 -07004673 i_mmap_unlock_read(mapping);
Naoya Horiguchi0f792cf2015-02-11 15:25:25 -08004674 /*
4675 * Generally it's safe to hold refcount during waiting page lock. But
4676 * here we just wait to defer the next page fault to avoid busy loop and
4677 * the page is not used after unlocked before returning from the current
4678 * page fault. So we are safe from accessing freed page, even if we wait
4679 * here without taking refcount.
4680 */
4681 if (need_wait_lock)
4682 wait_on_page_locked(page);
David Gibson1e8f8892006-01-06 00:10:44 -08004683 return ret;
Adam Litke86e52162006-01-06 00:10:43 -08004684}
4685
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004686/*
4687 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4688 * modifications for huge pages.
4689 */
4690int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4691 pte_t *dst_pte,
4692 struct vm_area_struct *dst_vma,
4693 unsigned long dst_addr,
4694 unsigned long src_addr,
4695 struct page **pagep)
4696{
Andrea Arcangeli1e3921472017-11-02 15:59:29 -07004697 struct address_space *mapping;
4698 pgoff_t idx;
4699 unsigned long size;
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004700 int vm_shared = dst_vma->vm_flags & VM_SHARED;
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004701 struct hstate *h = hstate_vma(dst_vma);
4702 pte_t _dst_pte;
4703 spinlock_t *ptl;
4704 int ret;
4705 struct page *page;
4706
4707 if (!*pagep) {
Mina Almasry2eb4ec92021-06-04 20:01:36 -07004708 /* If a page already exists, then it's UFFDIO_COPY for
4709 * a non-missing case. Return -EEXIST.
4710 */
4711 if (vm_shared &&
4712 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4713 ret = -EEXIST;
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004714 goto out;
Mina Almasry2eb4ec92021-06-04 20:01:36 -07004715 }
4716
4717 page = alloc_huge_page(dst_vma, dst_addr, 0);
4718 if (IS_ERR(page)) {
4719 ret = -ENOMEM;
4720 goto out;
4721 }
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004722
4723 ret = copy_huge_page_from_user(page,
4724 (const void __user *) src_addr,
Mike Kravetz810a56b2017-02-22 15:42:58 -08004725 pages_per_huge_page(h), false);
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004726
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07004727 /* fallback to copy_from_user outside mmap_lock */
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004728 if (unlikely(ret)) {
Andrea Arcangeli9e368252018-11-30 14:09:25 -08004729 ret = -ENOENT;
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004730 *pagep = page;
4731 /* don't free the page */
4732 goto out;
4733 }
4734 } else {
4735 page = *pagep;
4736 *pagep = NULL;
4737 }
4738
4739 /*
4740 * The memory barrier inside __SetPageUptodate makes sure that
4741 * preceding stores to the page contents become visible before
4742 * the set_pte_at() write.
4743 */
4744 __SetPageUptodate(page);
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004745
Andrea Arcangeli1e3921472017-11-02 15:59:29 -07004746 mapping = dst_vma->vm_file->f_mapping;
4747 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4748
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004749 /*
4750 * If shared, add to page cache
4751 */
4752 if (vm_shared) {
Andrea Arcangeli1e3921472017-11-02 15:59:29 -07004753 size = i_size_read(mapping->host) >> huge_page_shift(h);
4754 ret = -EFAULT;
4755 if (idx >= size)
4756 goto out_release_nounlock;
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004757
Andrea Arcangeli1e3921472017-11-02 15:59:29 -07004758 /*
4759 * Serialization between remove_inode_hugepages() and
4760 * huge_add_to_page_cache() below happens through the
4761 * hugetlb_fault_mutex_table that here must be hold by
4762 * the caller.
4763 */
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004764 ret = huge_add_to_page_cache(page, mapping, idx);
4765 if (ret)
4766 goto out_release_nounlock;
4767 }
4768
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004769 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4770 spin_lock(ptl);
4771
Andrea Arcangeli1e3921472017-11-02 15:59:29 -07004772 /*
4773 * Recheck the i_size after holding PT lock to make sure not
4774 * to leave any page mapped (as page_mapped()) beyond the end
4775 * of the i_size (remove_inode_hugepages() is strict about
4776 * enforcing that). If we bail out here, we'll also leave a
4777 * page in the radix tree in the vm_shared case beyond the end
4778 * of the i_size, but remove_inode_hugepages() will take care
4779 * of it as soon as we drop the hugetlb_fault_mutex_table.
4780 */
4781 size = i_size_read(mapping->host) >> huge_page_shift(h);
4782 ret = -EFAULT;
4783 if (idx >= size)
4784 goto out_release_unlock;
4785
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004786 ret = -EEXIST;
4787 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4788 goto out_release_unlock;
4789
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004790 if (vm_shared) {
4791 page_dup_rmap(page, true);
4792 } else {
4793 ClearPagePrivate(page);
4794 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4795 }
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004796
4797 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4798 if (dst_vma->vm_flags & VM_WRITE)
4799 _dst_pte = huge_pte_mkdirty(_dst_pte);
4800 _dst_pte = pte_mkyoung(_dst_pte);
4801
4802 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4803
4804 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4805 dst_vma->vm_flags & VM_WRITE);
4806 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4807
4808 /* No need to invalidate - it was non-present before */
4809 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4810
4811 spin_unlock(ptl);
Mike Kravetzcb6acd02019-02-28 16:22:02 -08004812 set_page_huge_active(page);
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004813 if (vm_shared)
4814 unlock_page(page);
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004815 ret = 0;
4816out:
4817 return ret;
4818out_release_unlock:
4819 spin_unlock(ptl);
Mike Kravetz1c9e8de2017-02-22 15:43:43 -08004820 if (vm_shared)
4821 unlock_page(page);
Andrea Arcangeli5af10df2017-08-10 15:23:38 -07004822out_release_nounlock:
Mike Kravetz8fb5deb2017-02-22 15:42:52 -08004823 put_page(page);
4824 goto out;
4825}
4826
Michel Lespinasse28a35712013-02-22 16:35:55 -08004827long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4828 struct page **pages, struct vm_area_struct **vmas,
4829 unsigned long *position, unsigned long *nr_pages,
Peter Xu4f6da932020-04-01 21:07:58 -07004830 long i, unsigned int flags, int *locked)
David Gibson63551ae2005-06-21 17:14:44 -07004831{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08004832 unsigned long pfn_offset;
4833 unsigned long vaddr = *position;
Michel Lespinasse28a35712013-02-22 16:35:55 -08004834 unsigned long remainder = *nr_pages;
Andi Kleena5516432008-07-23 21:27:41 -07004835 struct hstate *h = hstate_vma(vma);
Daniel Jordan2be7cfe2017-08-02 13:31:47 -07004836 int err = -EFAULT;
David Gibson63551ae2005-06-21 17:14:44 -07004837
David Gibson63551ae2005-06-21 17:14:44 -07004838 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -07004839 pte_t *pte;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004840 spinlock_t *ptl = NULL;
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004841 int absent;
Adam Litke4c887262005-10-29 18:16:46 -07004842 struct page *page;
4843
4844 /*
David Rientjes02057962015-04-14 15:48:24 -07004845 * If we have a pending SIGKILL, don't keep faulting pages and
4846 * potentially allocating memory.
4847 */
Davidlohr Buesofa45f112019-01-03 15:28:55 -08004848 if (fatal_signal_pending(current)) {
David Rientjes02057962015-04-14 15:48:24 -07004849 remainder = 0;
4850 break;
4851 }
4852
4853 /*
Adam Litke4c887262005-10-29 18:16:46 -07004854 * Some archs (sparc64, sh*) have multiple pte_ts to
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004855 * each hugepage. We have to make sure we get the
Adam Litke4c887262005-10-29 18:16:46 -07004856 * first, for the page indexing below to work.
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004857 *
4858 * Note that page table lock is not held when pte is null.
Adam Litke4c887262005-10-29 18:16:46 -07004859 */
Punit Agrawal7868a202017-07-06 15:39:42 -07004860 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4861 huge_page_size(h));
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004862 if (pte)
4863 ptl = huge_pte_lock(h, mm, pte);
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004864 absent = !pte || huge_pte_none(huge_ptep_get(pte));
Adam Litke4c887262005-10-29 18:16:46 -07004865
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004866 /*
4867 * When coredumping, it suits get_dump_page if we just return
Hugh Dickins3ae77f42009-09-21 17:03:33 -07004868 * an error where there's an empty slot with no huge pagecache
4869 * to back it. This way, we avoid allocating a hugepage, and
4870 * the sparse dumpfile avoids allocating disk blocks, but its
4871 * huge holes still show up with zeroes where they need to be.
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004872 */
Hugh Dickins3ae77f42009-09-21 17:03:33 -07004873 if (absent && (flags & FOLL_DUMP) &&
4874 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004875 if (pte)
4876 spin_unlock(ptl);
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004877 remainder = 0;
4878 break;
4879 }
4880
Naoya Horiguchi9cc3a5b2013-04-17 15:58:30 -07004881 /*
4882 * We need call hugetlb_fault for both hugepages under migration
4883 * (in which case hugetlb_fault waits for the migration,) and
4884 * hwpoisoned hugepages (in which case we need to prevent the
4885 * caller from accessing to them.) In order to do this, we use
4886 * here is_swap_pte instead of is_hugetlb_entry_migration and
4887 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4888 * both cases, and because we can't follow correct pages
4889 * directly from any kind of swap entries.
4890 */
4891 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
Gerald Schaefer106c9922013-04-29 15:07:23 -07004892 ((flags & FOLL_WRITE) &&
4893 !huge_pte_write(huge_ptep_get(pte)))) {
Souptick Joarder2b740302018-08-23 17:01:36 -07004894 vm_fault_t ret;
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004895 unsigned int fault_flags = 0;
Adam Litke4c887262005-10-29 18:16:46 -07004896
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004897 if (pte)
4898 spin_unlock(ptl);
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004899 if (flags & FOLL_WRITE)
4900 fault_flags |= FAULT_FLAG_WRITE;
Peter Xu4f6da932020-04-01 21:07:58 -07004901 if (locked)
Peter Xu71335f32020-04-01 21:08:53 -07004902 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4903 FAULT_FLAG_KILLABLE;
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004904 if (flags & FOLL_NOWAIT)
4905 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4906 FAULT_FLAG_RETRY_NOWAIT;
4907 if (flags & FOLL_TRIED) {
Peter Xu4426e942020-04-01 21:08:49 -07004908 /*
4909 * Note: FAULT_FLAG_ALLOW_RETRY and
4910 * FAULT_FLAG_TRIED can co-exist
4911 */
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004912 fault_flags |= FAULT_FLAG_TRIED;
4913 }
4914 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4915 if (ret & VM_FAULT_ERROR) {
Daniel Jordan2be7cfe2017-08-02 13:31:47 -07004916 err = vm_fault_to_errno(ret, flags);
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004917 remainder = 0;
4918 break;
4919 }
4920 if (ret & VM_FAULT_RETRY) {
Peter Xu4f6da932020-04-01 21:07:58 -07004921 if (locked &&
Andrea Arcangeli1ac25012019-02-01 14:20:16 -08004922 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
Peter Xu4f6da932020-04-01 21:07:58 -07004923 *locked = 0;
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004924 *nr_pages = 0;
4925 /*
4926 * VM_FAULT_RETRY must not return an
4927 * error, it will return zero
4928 * instead.
4929 *
4930 * No need to update "position" as the
4931 * caller will not check it after
4932 * *nr_pages is set to 0.
4933 */
4934 return i;
4935 }
4936 continue;
Adam Litke4c887262005-10-29 18:16:46 -07004937 }
David Gibson63551ae2005-06-21 17:14:44 -07004938
Andi Kleena5516432008-07-23 21:27:41 -07004939 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07004940 page = pte_page(huge_ptep_get(pte));
Linus Torvalds8fde12c2019-04-11 10:49:19 -07004941
4942 /*
Zhigang Luacbfb082019-11-30 17:57:06 -08004943 * If subpage information not requested, update counters
4944 * and skip the same_page loop below.
4945 */
4946 if (!pages && !vmas && !pfn_offset &&
4947 (vaddr + huge_page_size(h) < vma->vm_end) &&
4948 (remainder >= pages_per_huge_page(h))) {
4949 vaddr += huge_page_size(h);
4950 remainder -= pages_per_huge_page(h);
4951 i += pages_per_huge_page(h);
4952 spin_unlock(ptl);
4953 continue;
4954 }
4955
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08004956same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08004957 if (pages) {
Hugh Dickins2a15efc2009-09-21 17:03:27 -07004958 pages[i] = mem_map_offset(page, pfn_offset);
John Hubbard3faa52c2020-04-01 21:05:29 -07004959 /*
4960 * try_grab_page() should always succeed here, because:
4961 * a) we hold the ptl lock, and b) we've just checked
4962 * that the huge page is present in the page tables. If
4963 * the huge page is present, then the tail pages must
4964 * also be present. The ptl prevents the head page and
4965 * tail pages from being rearranged in any way. So this
4966 * page must be available at this point, unless the page
4967 * refcount overflowed:
4968 */
4969 if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
4970 spin_unlock(ptl);
4971 remainder = 0;
4972 err = -ENOMEM;
4973 break;
4974 }
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08004975 }
David Gibson63551ae2005-06-21 17:14:44 -07004976
4977 if (vmas)
4978 vmas[i] = vma;
4979
4980 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08004981 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -07004982 --remainder;
4983 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08004984 if (vaddr < vma->vm_end && remainder &&
Andi Kleena5516432008-07-23 21:27:41 -07004985 pfn_offset < pages_per_huge_page(h)) {
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08004986 /*
4987 * We use pfn_offset to avoid touching the pageframes
4988 * of this compound page.
4989 */
4990 goto same_page;
4991 }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08004992 spin_unlock(ptl);
David Gibson63551ae2005-06-21 17:14:44 -07004993 }
Michel Lespinasse28a35712013-02-22 16:35:55 -08004994 *nr_pages = remainder;
Andrea Arcangeli87ffc112017-02-22 15:43:13 -08004995 /*
4996 * setting position is actually required only if remainder is
4997 * not zero but it's faster not to add a "if (remainder)"
4998 * branch.
4999 */
David Gibson63551ae2005-06-21 17:14:44 -07005000 *position = vaddr;
5001
Daniel Jordan2be7cfe2017-08-02 13:31:47 -07005002 return i ? i : err;
David Gibson63551ae2005-06-21 17:14:44 -07005003}
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005004
Aneesh Kumar K.V5491ae72016-07-13 15:06:43 +05305005#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
5006/*
5007 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
5008 * implement this.
5009 */
5010#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
5011#endif
5012
Peter Zijlstra7da4d642012-11-19 03:14:23 +01005013unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005014 unsigned long address, unsigned long end, pgprot_t newprot)
5015{
5016 struct mm_struct *mm = vma->vm_mm;
5017 unsigned long start = address;
5018 pte_t *ptep;
5019 pte_t pte;
Andi Kleena5516432008-07-23 21:27:41 -07005020 struct hstate *h = hstate_vma(vma);
Peter Zijlstra7da4d642012-11-19 03:14:23 +01005021 unsigned long pages = 0;
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005022 bool shared_pmd = false;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005023 struct mmu_notifier_range range;
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005024
5025 /*
5026 * In the case of shared PMDs, the area to flush could be beyond
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005027 * start/end. Set range.start/range.end to cover the maximum possible
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005028 * range if PMD sharing is possible.
5029 */
Jérôme Glisse7269f992019-05-13 17:20:53 -07005030 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
5031 0, vma, mm, start, end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005032 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005033
5034 BUG_ON(address >= end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005035 flush_cache_range(vma, range.start, range.end);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005036
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005037 mmu_notifier_invalidate_range_start(&range);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08005038 i_mmap_lock_write(vma->vm_file->f_mapping);
Andi Kleena5516432008-07-23 21:27:41 -07005039 for (; address < end; address += huge_page_size(h)) {
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08005040 spinlock_t *ptl;
Punit Agrawal7868a202017-07-06 15:39:42 -07005041 ptep = huge_pte_offset(mm, address, huge_page_size(h));
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005042 if (!ptep)
5043 continue;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08005044 ptl = huge_pte_lock(h, mm, ptep);
Mike Kravetz34ae2042020-08-11 18:31:38 -07005045 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
Peter Zijlstra7da4d642012-11-19 03:14:23 +01005046 pages++;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08005047 spin_unlock(ptl);
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005048 shared_pmd = true;
Chen, Kenneth W39dde652006-12-06 20:32:03 -08005049 continue;
Peter Zijlstra7da4d642012-11-19 03:14:23 +01005050 }
Naoya Horiguchia8bda282015-02-11 15:25:28 -08005051 pte = huge_ptep_get(ptep);
5052 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5053 spin_unlock(ptl);
5054 continue;
5055 }
5056 if (unlikely(is_hugetlb_entry_migration(pte))) {
5057 swp_entry_t entry = pte_to_swp_entry(pte);
5058
5059 if (is_write_migration_entry(entry)) {
5060 pte_t newpte;
5061
5062 make_migration_entry_read(&entry);
5063 newpte = swp_entry_to_pte(entry);
Punit Agrawale5251fd2017-07-06 15:39:50 -07005064 set_huge_swap_pte_at(mm, address, ptep,
5065 newpte, huge_page_size(h));
Naoya Horiguchia8bda282015-02-11 15:25:28 -08005066 pages++;
5067 }
5068 spin_unlock(ptl);
5069 continue;
5070 }
5071 if (!huge_pte_none(pte)) {
Aneesh Kumar K.V023bdd02019-03-05 15:46:37 -08005072 pte_t old_pte;
5073
5074 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5075 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
Tony Lube7517d2013-02-04 14:28:46 -08005076 pte = arch_make_huge_pte(pte, vma, NULL, 0);
Aneesh Kumar K.V023bdd02019-03-05 15:46:37 -08005077 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
Peter Zijlstra7da4d642012-11-19 03:14:23 +01005078 pages++;
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005079 }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08005080 spin_unlock(ptl);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005081 }
Mel Gormand8333522012-07-31 16:46:20 -07005082 /*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -08005083 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
Mel Gormand8333522012-07-31 16:46:20 -07005084 * may have cleared our pud entry and done put_page on the page table:
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -08005085 * once we release i_mmap_rwsem, another task can do the final put_page
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005086 * and that page table be reused and filled with junk. If we actually
5087 * did unshare a page of pmds, flush the range corresponding to the pud.
Mel Gormand8333522012-07-31 16:46:20 -07005088 */
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005089 if (shared_pmd)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005090 flush_hugetlb_tlb_range(vma, range.start, range.end);
Mike Kravetzdff11ab2018-10-05 15:51:33 -07005091 else
5092 flush_hugetlb_tlb_range(vma, start, end);
Jérôme Glisse0f108512017-11-15 17:34:07 -08005093 /*
5094 * No need to call mmu_notifier_invalidate_range() we are downgrading
5095 * page table protection not changing it to point to a new page.
5096 *
Mike Rapoportad56b732018-03-21 21:22:47 +02005097 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -08005098 */
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -08005099 i_mmap_unlock_write(vma->vm_file->f_mapping);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08005100 mmu_notifier_invalidate_range_end(&range);
Peter Zijlstra7da4d642012-11-19 03:14:23 +01005101
5102 return pages << h->order;
Zhang, Yanmin8f860592006-03-22 00:08:50 -08005103}
5104
Mel Gormana1e78772008-07-23 21:27:23 -07005105int hugetlb_reserve_pages(struct inode *inode,
5106 long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +00005107 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09005108 vm_flags_t vm_flags)
Adam Litkee4e574b2007-10-16 01:26:19 -07005109{
Mina Almasry0db9d742020-04-01 21:11:25 -07005110 long ret, chg, add = -1;
Andi Kleena5516432008-07-23 21:27:41 -07005111 struct hstate *h = hstate_inode(inode);
David Gibson90481622012-03-21 16:34:12 -07005112 struct hugepage_subpool *spool = subpool_inode(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -07005113 struct resv_map *resv_map;
Mina Almasry075a61d2020-04-01 21:11:28 -07005114 struct hugetlb_cgroup *h_cg = NULL;
Mina Almasry0db9d742020-04-01 21:11:25 -07005115 long gbl_reserve, regions_needed = 0;
Adam Litkee4e574b2007-10-16 01:26:19 -07005116
Mike Kravetz63489f82018-03-22 16:17:13 -07005117 /* This should never happen */
5118 if (from > to) {
5119 VM_WARN(1, "%s called with a negative range\n", __func__);
5120 return -EINVAL;
5121 }
5122
Mel Gormana1e78772008-07-23 21:27:23 -07005123 /*
Mel Gorman17c9d122009-02-11 16:34:16 +00005124 * Only apply hugepage reservation if asked. At fault time, an
5125 * attempt will be made for VM_NORESERVE to allocate a page
David Gibson90481622012-03-21 16:34:12 -07005126 * without using reserves
Mel Gorman17c9d122009-02-11 16:34:16 +00005127 */
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09005128 if (vm_flags & VM_NORESERVE)
Mel Gorman17c9d122009-02-11 16:34:16 +00005129 return 0;
5130
5131 /*
Mel Gormana1e78772008-07-23 21:27:23 -07005132 * Shared mappings base their reservation on the number of pages that
5133 * are already allocated on behalf of the file. Private mappings need
5134 * to reserve the full area even if read-only as mprotect() may be
5135 * called to make the mapping read-write. Assume !vma is a shm mapping
5136 */
Joonsoo Kim9119a412014-04-03 14:47:25 -07005137 if (!vma || vma->vm_flags & VM_MAYSHARE) {
Mike Kravetzf27a5132019-05-13 17:22:55 -07005138 /*
5139 * resv_map can not be NULL as hugetlb_reserve_pages is only
5140 * called for inodes for which resv_maps were created (see
5141 * hugetlbfs_get_inode).
5142 */
Joonsoo Kim4e35f482014-04-03 14:47:30 -07005143 resv_map = inode_resv_map(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -07005144
Mina Almasry0db9d742020-04-01 21:11:25 -07005145 chg = region_chg(resv_map, from, to, &regions_needed);
Joonsoo Kim9119a412014-04-03 14:47:25 -07005146
5147 } else {
Mina Almasrye9fe92a2020-04-01 21:11:21 -07005148 /* Private mapping. */
Joonsoo Kim9119a412014-04-03 14:47:25 -07005149 resv_map = resv_map_alloc();
Mel Gorman5a6fe122009-02-10 14:02:27 +00005150 if (!resv_map)
5151 return -ENOMEM;
5152
Mel Gorman17c9d122009-02-11 16:34:16 +00005153 chg = to - from;
5154
Mel Gorman5a6fe122009-02-10 14:02:27 +00005155 set_vma_resv_map(vma, resv_map);
5156 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5157 }
5158
Dave Hansenc50ac052012-05-29 15:06:46 -07005159 if (chg < 0) {
5160 ret = chg;
5161 goto out_err;
5162 }
Mel Gorman17c9d122009-02-11 16:34:16 +00005163
Mina Almasry075a61d2020-04-01 21:11:28 -07005164 ret = hugetlb_cgroup_charge_cgroup_rsvd(
5165 hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
5166
5167 if (ret < 0) {
5168 ret = -ENOMEM;
5169 goto out_err;
5170 }
5171
5172 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5173 /* For private mappings, the hugetlb_cgroup uncharge info hangs
5174 * of the resv_map.
5175 */
5176 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5177 }
5178
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07005179 /*
5180 * There must be enough pages in the subpool for the mapping. If
5181 * the subpool has a minimum size, there may be some global
5182 * reservations already in place (gbl_reserve).
5183 */
5184 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5185 if (gbl_reserve < 0) {
Dave Hansenc50ac052012-05-29 15:06:46 -07005186 ret = -ENOSPC;
Mina Almasry075a61d2020-04-01 21:11:28 -07005187 goto out_uncharge_cgroup;
Dave Hansenc50ac052012-05-29 15:06:46 -07005188 }
Mel Gorman17c9d122009-02-11 16:34:16 +00005189
5190 /*
5191 * Check enough hugepages are available for the reservation.
David Gibson90481622012-03-21 16:34:12 -07005192 * Hand the pages back to the subpool if there are not
Mel Gorman17c9d122009-02-11 16:34:16 +00005193 */
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07005194 ret = hugetlb_acct_memory(h, gbl_reserve);
Mel Gorman17c9d122009-02-11 16:34:16 +00005195 if (ret < 0) {
Mina Almasry075a61d2020-04-01 21:11:28 -07005196 goto out_put_pages;
Mel Gorman17c9d122009-02-11 16:34:16 +00005197 }
5198
5199 /*
5200 * Account for the reservations made. Shared mappings record regions
5201 * that have reservations as they are shared by multiple VMAs.
5202 * When the last VMA disappears, the region map says how much
5203 * the reservation was and the page cache tells how much of
5204 * the reservation was consumed. Private mappings are per-VMA and
5205 * only the consumed reservations are tracked. When the VMA
5206 * disappears, the original reservation is the VMA size and the
5207 * consumed reservations are stored in the map. Hence, nothing
5208 * else has to be done for private mappings here
5209 */
Mike Kravetz33039672015-06-24 16:57:58 -07005210 if (!vma || vma->vm_flags & VM_MAYSHARE) {
Mina Almasry075a61d2020-04-01 21:11:28 -07005211 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
Mike Kravetz33039672015-06-24 16:57:58 -07005212
Mina Almasry0db9d742020-04-01 21:11:25 -07005213 if (unlikely(add < 0)) {
5214 hugetlb_acct_memory(h, -gbl_reserve);
Dan Carpenter9b52a372020-12-14 19:12:11 -08005215 ret = add;
Mina Almasry075a61d2020-04-01 21:11:28 -07005216 goto out_put_pages;
Mina Almasry0db9d742020-04-01 21:11:25 -07005217 } else if (unlikely(chg > add)) {
Mike Kravetz33039672015-06-24 16:57:58 -07005218 /*
5219 * pages in this range were added to the reserve
5220 * map between region_chg and region_add. This
5221 * indicates a race with alloc_huge_page. Adjust
5222 * the subpool and reserve counts modified above
5223 * based on the difference.
5224 */
5225 long rsv_adjust;
5226
Miaohe Linfe03ccc2021-03-24 21:37:17 -07005227 /*
5228 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
5229 * reference to h_cg->css. See comment below for detail.
5230 */
Mina Almasry075a61d2020-04-01 21:11:28 -07005231 hugetlb_cgroup_uncharge_cgroup_rsvd(
5232 hstate_index(h),
5233 (chg - add) * pages_per_huge_page(h), h_cg);
5234
Mike Kravetz33039672015-06-24 16:57:58 -07005235 rsv_adjust = hugepage_subpool_put_pages(spool,
5236 chg - add);
5237 hugetlb_acct_memory(h, -rsv_adjust);
Miaohe Linfe03ccc2021-03-24 21:37:17 -07005238 } else if (h_cg) {
5239 /*
5240 * The file_regions will hold their own reference to
5241 * h_cg->css. So we should release the reference held
5242 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
5243 * done.
5244 */
5245 hugetlb_cgroup_put_rsvd_cgroup(h_cg);
Mike Kravetz33039672015-06-24 16:57:58 -07005246 }
5247 }
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07005248 return 0;
Mina Almasry075a61d2020-04-01 21:11:28 -07005249out_put_pages:
5250 /* put back original number of pages, chg */
5251 (void)hugepage_subpool_put_pages(spool, chg);
5252out_uncharge_cgroup:
5253 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5254 chg * pages_per_huge_page(h), h_cg);
Dave Hansenc50ac052012-05-29 15:06:46 -07005255out_err:
Mike Kravetz5e911372015-09-08 15:01:28 -07005256 if (!vma || vma->vm_flags & VM_MAYSHARE)
Mina Almasry0db9d742020-04-01 21:11:25 -07005257 /* Only call region_abort if the region_chg succeeded but the
5258 * region_add failed or didn't run.
5259 */
5260 if (chg >= 0 && add < 0)
5261 region_abort(resv_map, from, to, regions_needed);
Joonsoo Kimf031dd22014-04-03 14:47:28 -07005262 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5263 kref_put(&resv_map->refs, resv_map_release);
Dave Hansenc50ac052012-05-29 15:06:46 -07005264 return ret;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07005265}
5266
Mike Kravetzb5cec282015-09-08 15:01:41 -07005267long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5268 long freed)
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07005269{
Andi Kleena5516432008-07-23 21:27:41 -07005270 struct hstate *h = hstate_inode(inode);
Joonsoo Kim4e35f482014-04-03 14:47:30 -07005271 struct resv_map *resv_map = inode_resv_map(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -07005272 long chg = 0;
David Gibson90481622012-03-21 16:34:12 -07005273 struct hugepage_subpool *spool = subpool_inode(inode);
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07005274 long gbl_reserve;
Ken Chen45c682a2007-11-14 16:59:44 -08005275
Mike Kravetzf27a5132019-05-13 17:22:55 -07005276 /*
5277 * Since this routine can be called in the evict inode path for all
5278 * hugetlbfs inodes, resv_map could be NULL.
5279 */
Mike Kravetzb5cec282015-09-08 15:01:41 -07005280 if (resv_map) {
5281 chg = region_del(resv_map, start, end);
5282 /*
5283 * region_del() can fail in the rare case where a region
5284 * must be split and another region descriptor can not be
5285 * allocated. If end == LONG_MAX, it will not fail.
5286 */
5287 if (chg < 0)
5288 return chg;
5289 }
5290
Ken Chen45c682a2007-11-14 16:59:44 -08005291 spin_lock(&inode->i_lock);
Eric Sandeene4c6f8b2009-07-29 15:02:16 -07005292 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
Ken Chen45c682a2007-11-14 16:59:44 -08005293 spin_unlock(&inode->i_lock);
5294
Mike Kravetz1c5ecae2015-04-15 16:13:39 -07005295 /*
5296 * If the subpool has a minimum size, the number of global
5297 * reservations to be released may be adjusted.
5298 */
5299 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5300 hugetlb_acct_memory(h, -gbl_reserve);
Mike Kravetzb5cec282015-09-08 15:01:41 -07005301
5302 return 0;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07005303}
Naoya Horiguchi93f70f92010-05-28 09:29:20 +09005304
Steve Capper3212b532013-04-23 12:35:02 +01005305#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
5306static unsigned long page_table_shareable(struct vm_area_struct *svma,
5307 struct vm_area_struct *vma,
5308 unsigned long addr, pgoff_t idx)
5309{
5310 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5311 svma->vm_start;
5312 unsigned long sbase = saddr & PUD_MASK;
5313 unsigned long s_end = sbase + PUD_SIZE;
5314
5315 /* Allow segments to share if only one is marked locked */
Eric B Munsonde60f5f2015-11-05 18:51:36 -08005316 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5317 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
Steve Capper3212b532013-04-23 12:35:02 +01005318
5319 /*
5320 * match the virtual addresses, permission and the alignment of the
5321 * page table page.
5322 */
5323 if (pmd_index(addr) != pmd_index(saddr) ||
5324 vm_flags != svm_flags ||
5325 sbase < svma->vm_start || svma->vm_end < s_end)
5326 return 0;
5327
5328 return saddr;
5329}
5330
Nicholas Krause31aafb42015-09-04 15:47:58 -07005331static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
Steve Capper3212b532013-04-23 12:35:02 +01005332{
5333 unsigned long base = addr & PUD_MASK;
5334 unsigned long end = base + PUD_SIZE;
5335
5336 /*
5337 * check on proper vm_flags and page table alignment
5338 */
Mike Kravetz017b1662018-10-05 15:51:29 -07005339 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
Nicholas Krause31aafb42015-09-04 15:47:58 -07005340 return true;
5341 return false;
Steve Capper3212b532013-04-23 12:35:02 +01005342}
5343
5344/*
Mike Kravetz017b1662018-10-05 15:51:29 -07005345 * Determine if start,end range within vma could be mapped by shared pmd.
5346 * If yes, adjust start and end to cover range associated with possible
5347 * shared pmd mappings.
5348 */
5349void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5350 unsigned long *start, unsigned long *end)
5351{
Li Xinhaie3359522021-02-24 12:06:54 -08005352 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
5353 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
Mike Kravetz017b1662018-10-05 15:51:29 -07005354
Li Xinhaie3359522021-02-24 12:06:54 -08005355 /*
5356 * vma need span at least one aligned PUD size and the start,end range
5357 * must at least partialy within it.
5358 */
5359 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
5360 (*end <= v_start) || (*start >= v_end))
Mike Kravetz017b1662018-10-05 15:51:29 -07005361 return;
5362
Peter Xu75802ca62020-08-06 23:26:11 -07005363 /* Extend the range to be PUD aligned for a worst case scenario */
Li Xinhaie3359522021-02-24 12:06:54 -08005364 if (*start > v_start)
5365 *start = ALIGN_DOWN(*start, PUD_SIZE);
Mike Kravetz017b1662018-10-05 15:51:29 -07005366
Li Xinhaie3359522021-02-24 12:06:54 -08005367 if (*end < v_end)
5368 *end = ALIGN(*end, PUD_SIZE);
Mike Kravetz017b1662018-10-05 15:51:29 -07005369}
5370
5371/*
Steve Capper3212b532013-04-23 12:35:02 +01005372 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5373 * and returns the corresponding pte. While this is not necessary for the
5374 * !shared pmd case because we can allocate the pmd later as well, it makes the
Mike Kravetzc0d03812020-04-01 21:11:05 -07005375 * code much cleaner.
5376 *
Mike Kravetz0bf7b642020-10-13 16:56:42 -07005377 * This routine must be called with i_mmap_rwsem held in at least read mode if
5378 * sharing is possible. For hugetlbfs, this prevents removal of any page
5379 * table entries associated with the address space. This is important as we
5380 * are setting up sharing based on existing page table entries (mappings).
5381 *
5382 * NOTE: This routine is only called from huge_pte_alloc. Some callers of
5383 * huge_pte_alloc know that sharing is not possible and do not take
5384 * i_mmap_rwsem as a performance optimization. This is handled by the
5385 * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
5386 * only required for subsequent processing.
Steve Capper3212b532013-04-23 12:35:02 +01005387 */
5388pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5389{
5390 struct vm_area_struct *vma = find_vma(mm, addr);
5391 struct address_space *mapping = vma->vm_file->f_mapping;
5392 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5393 vma->vm_pgoff;
5394 struct vm_area_struct *svma;
5395 unsigned long saddr;
5396 pte_t *spte = NULL;
5397 pte_t *pte;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08005398 spinlock_t *ptl;
Steve Capper3212b532013-04-23 12:35:02 +01005399
5400 if (!vma_shareable(vma, addr))
5401 return (pte_t *)pmd_alloc(mm, pud, addr);
5402
Mike Kravetz0bf7b642020-10-13 16:56:42 -07005403 i_mmap_assert_locked(mapping);
Steve Capper3212b532013-04-23 12:35:02 +01005404 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5405 if (svma == vma)
5406 continue;
5407
5408 saddr = page_table_shareable(svma, vma, addr, idx);
5409 if (saddr) {
Punit Agrawal7868a202017-07-06 15:39:42 -07005410 spte = huge_pte_offset(svma->vm_mm, saddr,
5411 vma_mmu_pagesize(svma));
Steve Capper3212b532013-04-23 12:35:02 +01005412 if (spte) {
5413 get_page(virt_to_page(spte));
5414 break;
5415 }
5416 }
5417 }
5418
5419 if (!spte)
5420 goto out;
5421
Aneesh Kumar K.V8bea8052016-12-12 16:41:59 -08005422 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08005423 if (pud_none(*pud)) {
Steve Capper3212b532013-04-23 12:35:02 +01005424 pud_populate(mm, pud,
5425 (pmd_t *)((unsigned long)spte & PAGE_MASK));
Kirill A. Shutemovc17b1f42016-06-24 14:49:51 -07005426 mm_inc_nr_pmds(mm);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08005427 } else {
Steve Capper3212b532013-04-23 12:35:02 +01005428 put_page(virt_to_page(spte));
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08005429 }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -08005430 spin_unlock(ptl);
Steve Capper3212b532013-04-23 12:35:02 +01005431out:
5432 pte = (pte_t *)pmd_alloc(mm, pud, addr);
Steve Capper3212b532013-04-23 12:35:02 +01005433 return pte;
5434}
5435
5436/*
5437 * unmap huge page backed by shared pte.
5438 *
5439 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
5440 * indicated by page_count > 1, unmap is achieved by clearing pud and
5441 * decrementing the ref count. If count == 1, the pte page is not shared.
5442 *
Mike Kravetzc0d03812020-04-01 21:11:05 -07005443 * Called with page table lock held and i_mmap_rwsem held in write mode.
Steve Capper3212b532013-04-23 12:35:02 +01005444 *
5445 * returns: 1 successfully unmapped a shared pte page
5446 * 0 the underlying pte page is not shared, or it is the last user
5447 */
Mike Kravetz34ae2042020-08-11 18:31:38 -07005448int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5449 unsigned long *addr, pte_t *ptep)
Steve Capper3212b532013-04-23 12:35:02 +01005450{
5451 pgd_t *pgd = pgd_offset(mm, *addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005452 p4d_t *p4d = p4d_offset(pgd, *addr);
5453 pud_t *pud = pud_offset(p4d, *addr);
Steve Capper3212b532013-04-23 12:35:02 +01005454
Mike Kravetz34ae2042020-08-11 18:31:38 -07005455 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
Steve Capper3212b532013-04-23 12:35:02 +01005456 BUG_ON(page_count(virt_to_page(ptep)) == 0);
5457 if (page_count(virt_to_page(ptep)) == 1)
5458 return 0;
5459
5460 pud_clear(pud);
5461 put_page(virt_to_page(ptep));
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -08005462 mm_dec_nr_pmds(mm);
Steve Capper3212b532013-04-23 12:35:02 +01005463 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
5464 return 1;
5465}
Steve Capper9e5fc742013-04-30 08:02:03 +01005466#define want_pmd_share() (1)
5467#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5468pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5469{
5470 return NULL;
5471}
Zhang Zhene81f2d22015-06-24 16:56:13 -07005472
Mike Kravetz34ae2042020-08-11 18:31:38 -07005473int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5474 unsigned long *addr, pte_t *ptep)
Zhang Zhene81f2d22015-06-24 16:56:13 -07005475{
5476 return 0;
5477}
Mike Kravetz017b1662018-10-05 15:51:29 -07005478
5479void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5480 unsigned long *start, unsigned long *end)
5481{
5482}
Steve Capper9e5fc742013-04-30 08:02:03 +01005483#define want_pmd_share() (0)
Steve Capper3212b532013-04-23 12:35:02 +01005484#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5485
Steve Capper9e5fc742013-04-30 08:02:03 +01005486#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5487pte_t *huge_pte_alloc(struct mm_struct *mm,
5488 unsigned long addr, unsigned long sz)
5489{
5490 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005491 p4d_t *p4d;
Steve Capper9e5fc742013-04-30 08:02:03 +01005492 pud_t *pud;
5493 pte_t *pte = NULL;
5494
5495 pgd = pgd_offset(mm, addr);
Kirill A. Shutemovf4f0a3d2017-11-29 16:11:30 -08005496 p4d = p4d_alloc(mm, pgd, addr);
5497 if (!p4d)
5498 return NULL;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005499 pud = pud_alloc(mm, p4d, addr);
Steve Capper9e5fc742013-04-30 08:02:03 +01005500 if (pud) {
5501 if (sz == PUD_SIZE) {
5502 pte = (pte_t *)pud;
5503 } else {
5504 BUG_ON(sz != PMD_SIZE);
5505 if (want_pmd_share() && pud_none(*pud))
5506 pte = huge_pmd_share(mm, addr, pud);
5507 else
5508 pte = (pte_t *)pmd_alloc(mm, pud, addr);
5509 }
5510 }
Michal Hocko4e666312016-08-02 14:02:34 -07005511 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
Steve Capper9e5fc742013-04-30 08:02:03 +01005512
5513 return pte;
5514}
5515
Punit Agrawal9b19df22017-09-06 16:21:01 -07005516/*
5517 * huge_pte_offset() - Walk the page table to resolve the hugepage
5518 * entry at address @addr
5519 *
Li Xinhai8ac0b812020-06-03 16:00:53 -07005520 * Return: Pointer to page table entry (PUD or PMD) for
5521 * address @addr, or NULL if a !p*d_present() entry is encountered and the
Punit Agrawal9b19df22017-09-06 16:21:01 -07005522 * size @sz doesn't match the hugepage size at this level of the page
5523 * table.
5524 */
Punit Agrawal7868a202017-07-06 15:39:42 -07005525pte_t *huge_pte_offset(struct mm_struct *mm,
5526 unsigned long addr, unsigned long sz)
Steve Capper9e5fc742013-04-30 08:02:03 +01005527{
5528 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005529 p4d_t *p4d;
Li Xinhai8ac0b812020-06-03 16:00:53 -07005530 pud_t *pud;
5531 pmd_t *pmd;
Steve Capper9e5fc742013-04-30 08:02:03 +01005532
5533 pgd = pgd_offset(mm, addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005534 if (!pgd_present(*pgd))
5535 return NULL;
5536 p4d = p4d_offset(pgd, addr);
5537 if (!p4d_present(*p4d))
5538 return NULL;
Punit Agrawal9b19df22017-09-06 16:21:01 -07005539
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005540 pud = pud_offset(p4d, addr);
Li Xinhai8ac0b812020-06-03 16:00:53 -07005541 if (sz == PUD_SIZE)
5542 /* must be pud huge, non-present or none */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005543 return (pte_t *)pud;
Li Xinhai8ac0b812020-06-03 16:00:53 -07005544 if (!pud_present(*pud))
5545 return NULL;
5546 /* must have a valid entry and size to go further */
Punit Agrawal9b19df22017-09-06 16:21:01 -07005547
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03005548 pmd = pmd_offset(pud, addr);
Li Xinhai8ac0b812020-06-03 16:00:53 -07005549 /* must be pmd huge, non-present or none */
5550 return (pte_t *)pmd;
Steve Capper9e5fc742013-04-30 08:02:03 +01005551}
5552
Naoya Horiguchi61f77ed2015-02-11 15:25:15 -08005553#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5554
5555/*
5556 * These functions are overwritable if your architecture needs its own
5557 * behavior.
5558 */
5559struct page * __weak
5560follow_huge_addr(struct mm_struct *mm, unsigned long address,
5561 int write)
5562{
5563 return ERR_PTR(-EINVAL);
5564}
5565
5566struct page * __weak
Aneesh Kumar K.V4dc71452017-07-06 15:38:56 -07005567follow_huge_pd(struct vm_area_struct *vma,
5568 unsigned long address, hugepd_t hpd, int flags, int pdshift)
5569{
5570 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5571 return NULL;
5572}
5573
5574struct page * __weak
Steve Capper9e5fc742013-04-30 08:02:03 +01005575follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005576 pmd_t *pmd, int flags)
Steve Capper9e5fc742013-04-30 08:02:03 +01005577{
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005578 struct page *page = NULL;
5579 spinlock_t *ptl;
Naoya Horiguchic9d398f2017-03-31 15:11:55 -07005580 pte_t pte;
John Hubbard3faa52c2020-04-01 21:05:29 -07005581
5582 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
5583 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5584 (FOLL_PIN | FOLL_GET)))
5585 return NULL;
5586
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005587retry:
5588 ptl = pmd_lockptr(mm, pmd);
5589 spin_lock(ptl);
5590 /*
5591 * make sure that the address range covered by this pmd is not
5592 * unmapped from other threads.
5593 */
5594 if (!pmd_huge(*pmd))
5595 goto out;
Naoya Horiguchic9d398f2017-03-31 15:11:55 -07005596 pte = huge_ptep_get((pte_t *)pmd);
5597 if (pte_present(pte)) {
Gerald Schaefer97534122015-04-14 15:42:30 -07005598 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
John Hubbard3faa52c2020-04-01 21:05:29 -07005599 /*
5600 * try_grab_page() should always succeed here, because: a) we
5601 * hold the pmd (ptl) lock, and b) we've just checked that the
5602 * huge pmd (head) page is present in the page tables. The ptl
5603 * prevents the head page and tail pages from being rearranged
5604 * in any way. So this page must be available at this point,
5605 * unless the page refcount overflowed:
5606 */
5607 if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5608 page = NULL;
5609 goto out;
5610 }
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005611 } else {
Naoya Horiguchic9d398f2017-03-31 15:11:55 -07005612 if (is_hugetlb_entry_migration(pte)) {
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005613 spin_unlock(ptl);
5614 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
5615 goto retry;
5616 }
5617 /*
5618 * hwpoisoned entry is treated as no_page_table in
5619 * follow_page_mask().
5620 */
5621 }
5622out:
5623 spin_unlock(ptl);
Steve Capper9e5fc742013-04-30 08:02:03 +01005624 return page;
5625}
5626
Naoya Horiguchi61f77ed2015-02-11 15:25:15 -08005627struct page * __weak
Steve Capper9e5fc742013-04-30 08:02:03 +01005628follow_huge_pud(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005629 pud_t *pud, int flags)
Steve Capper9e5fc742013-04-30 08:02:03 +01005630{
John Hubbard3faa52c2020-04-01 21:05:29 -07005631 if (flags & (FOLL_GET | FOLL_PIN))
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005632 return NULL;
Steve Capper9e5fc742013-04-30 08:02:03 +01005633
Naoya Horiguchie66f17f2015-02-11 15:25:22 -08005634 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
Steve Capper9e5fc742013-04-30 08:02:03 +01005635}
5636
Anshuman Khandualfaaa5b62017-07-06 15:38:50 -07005637struct page * __weak
5638follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5639{
John Hubbard3faa52c2020-04-01 21:05:29 -07005640 if (flags & (FOLL_GET | FOLL_PIN))
Anshuman Khandualfaaa5b62017-07-06 15:38:50 -07005641 return NULL;
5642
5643 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5644}
5645
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005646bool isolate_huge_page(struct page *page, struct list_head *list)
5647{
Naoya Horiguchibcc54222015-04-15 16:14:38 -07005648 bool ret = true;
5649
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005650 spin_lock(&hugetlb_lock);
Muchun Song5b9631c2021-02-04 18:32:10 -08005651 if (!PageHeadHuge(page) || !page_huge_active(page) ||
5652 !get_page_unless_zero(page)) {
Naoya Horiguchibcc54222015-04-15 16:14:38 -07005653 ret = false;
5654 goto unlock;
5655 }
5656 clear_page_huge_active(page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005657 list_move_tail(&page->lru, list);
Naoya Horiguchibcc54222015-04-15 16:14:38 -07005658unlock:
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005659 spin_unlock(&hugetlb_lock);
Naoya Horiguchibcc54222015-04-15 16:14:38 -07005660 return ret;
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005661}
5662
5663void putback_active_hugepage(struct page *page)
5664{
Sasha Levin309381fea2014-01-23 15:52:54 -08005665 VM_BUG_ON_PAGE(!PageHead(page), page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005666 spin_lock(&hugetlb_lock);
Naoya Horiguchibcc54222015-04-15 16:14:38 -07005667 set_page_huge_active(page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07005668 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5669 spin_unlock(&hugetlb_lock);
5670 put_page(page);
5671}
Michal Hockoab5ac902018-01-31 16:20:48 -08005672
5673void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5674{
5675 struct hstate *h = page_hstate(oldpage);
5676
5677 hugetlb_cgroup_migrate(oldpage, newpage);
5678 set_page_owner_migrate_reason(newpage, reason);
5679
5680 /*
5681 * transfer temporary state of the new huge page. This is
5682 * reverse to other transitions because the newpage is going to
5683 * be final while the old one will be freed so it takes over
5684 * the temporary status.
5685 *
5686 * Also note that we have to transfer the per-node surplus state
5687 * here as well otherwise the global surplus count will not match
5688 * the per-node's.
5689 */
5690 if (PageHugeTemporary(newpage)) {
5691 int old_nid = page_to_nid(oldpage);
5692 int new_nid = page_to_nid(newpage);
5693
5694 SetPageHugeTemporary(oldpage);
5695 ClearPageHugeTemporary(newpage);
5696
5697 spin_lock(&hugetlb_lock);
5698 if (h->surplus_huge_pages_node[old_nid]) {
5699 h->surplus_huge_pages_node[old_nid]--;
5700 h->surplus_huge_pages_node[new_nid]++;
5701 }
5702 spin_unlock(&hugetlb_lock);
5703 }
5704}
Roman Gushchincf11e852020-04-10 14:32:45 -07005705
5706#ifdef CONFIG_CMA
Roman Gushchincf11e852020-04-10 14:32:45 -07005707static bool cma_reserve_called __initdata;
5708
5709static int __init cmdline_parse_hugetlb_cma(char *p)
5710{
5711 hugetlb_cma_size = memparse(p, &p);
5712 return 0;
5713}
5714
5715early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
5716
5717void __init hugetlb_cma_reserve(int order)
5718{
5719 unsigned long size, reserved, per_node;
5720 int nid;
5721
5722 cma_reserve_called = true;
5723
5724 if (!hugetlb_cma_size)
5725 return;
5726
5727 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
5728 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
5729 (PAGE_SIZE << order) / SZ_1M);
5730 return;
5731 }
5732
5733 /*
5734 * If 3 GB area is requested on a machine with 4 numa nodes,
5735 * let's allocate 1 GB on first three nodes and ignore the last one.
5736 */
5737 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
5738 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
5739 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
5740
5741 reserved = 0;
5742 for_each_node_state(nid, N_ONLINE) {
5743 int res;
Barry Song2281f792020-08-24 11:03:09 +12005744 char name[CMA_MAX_NAME];
Roman Gushchincf11e852020-04-10 14:32:45 -07005745
5746 size = min(per_node, hugetlb_cma_size - reserved);
5747 size = round_up(size, PAGE_SIZE << order);
5748
Barry Song2281f792020-08-24 11:03:09 +12005749 snprintf(name, sizeof(name), "hugetlb%d", nid);
Roman Gushchincf11e852020-04-10 14:32:45 -07005750 res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
Barry Song29d0f412020-08-11 18:32:00 -07005751 0, false, name,
Roman Gushchincf11e852020-04-10 14:32:45 -07005752 &hugetlb_cma[nid], nid);
5753 if (res) {
5754 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
5755 res, nid);
5756 continue;
5757 }
5758
5759 reserved += size;
5760 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
5761 size / SZ_1M, nid);
5762
5763 if (reserved >= hugetlb_cma_size)
5764 break;
5765 }
5766}
5767
5768void __init hugetlb_cma_check(void)
5769{
5770 if (!hugetlb_cma_size || cma_reserve_called)
5771 return;
5772
5773 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
5774}
5775
5776#endif /* CONFIG_CMA */