blob: b5807f23caf80458a6238d286c4deec6fd21fe07 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07005#include <linux/mm_types.h>
Sasha Levin309381fea2014-01-23 15:52:54 -08006#include <linux/mmdebug.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09008#include <linux/hugetlb_inline.h>
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -07009#include <linux/cgroup.h>
Joonsoo Kim9119a412014-04-03 14:47:25 -070010#include <linux/list.h>
11#include <linux/kref.h>
Mike Rapoportca5999f2020-06-08 21:32:38 -070012#include <linux/pgtable.h>
Joonsoo Kimd92bbc22020-08-11 18:37:17 -070013#include <linux/gfp.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040014
Andrew Mortone9ea0e22009-09-24 14:47:45 -070015struct ctl_table;
16struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070017struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070018
Aneesh Kumar K.Ve2299292017-07-06 15:38:53 -070019#ifndef is_hugepd
Aneesh Kumar K.Ve2299292017-07-06 15:38:53 -070020typedef struct { unsigned long pd; } hugepd_t;
21#define is_hugepd(hugepd) (0)
22#define __hugepd(x) ((hugepd_t) { (x) })
Aneesh Kumar K.Ve2299292017-07-06 15:38:53 -070023#endif
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#ifdef CONFIG_HUGETLB_PAGE
26
27#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080028#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070029#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
David Gibson90481622012-03-21 16:34:12 -070031struct hugepage_subpool {
32 spinlock_t lock;
33 long count;
Mike Kravetzc6a91822015-04-15 16:13:36 -070034 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
35 long used_hpages; /* Used count against maximum, includes */
36 /* both alloced and reserved pages. */
37 struct hstate *hstate;
38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
39 long rsv_hpages; /* Pages reserved against global pool to */
40 /* sasitfy minimum size. */
David Gibson90481622012-03-21 16:34:12 -070041};
42
Joonsoo Kim9119a412014-04-03 14:47:25 -070043struct resv_map {
44 struct kref refs;
Davidlohr Bueso7b24d862014-04-03 14:47:27 -070045 spinlock_t lock;
Joonsoo Kim9119a412014-04-03 14:47:25 -070046 struct list_head regions;
Mike Kravetz5e911372015-09-08 15:01:28 -070047 long adds_in_progress;
48 struct list_head region_cache;
49 long region_cache_count;
Mina Almasrye9fe92a2020-04-01 21:11:21 -070050#ifdef CONFIG_CGROUP_HUGETLB
51 /*
52 * On private mappings, the counter to uncharge reservations is stored
53 * here. If these fields are 0, then either the mapping is shared, or
54 * cgroup accounting is disabled for this resv_map.
55 */
56 struct page_counter *reservation_counter;
57 unsigned long pages_per_hpage;
58 struct cgroup_subsys_state *css;
59#endif
Joonsoo Kim9119a412014-04-03 14:47:25 -070060};
Mina Almasry075a61d2020-04-01 21:11:28 -070061
62/*
63 * Region tracking -- allows tracking of reservations and instantiated pages
64 * across the pages in a mapping.
65 *
66 * The region data structures are embedded into a resv_map and protected
67 * by a resv_map's lock. The set of regions within the resv_map represent
68 * reservations for huge pages, or huge pages that have already been
69 * instantiated within the map. The from and to elements are huge page
70 * indicies into the associated mapping. from indicates the starting index
71 * of the region. to represents the first index past the end of the region.
72 *
73 * For example, a file region structure with from == 0 and to == 4 represents
74 * four huge pages in a mapping. It is important to note that the to element
75 * represents the first element past the end of the region. This is used in
76 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
77 *
78 * Interval notation of the form [from, to) will be used to indicate that
79 * the endpoint from is inclusive and to is exclusive.
80 */
81struct file_region {
82 struct list_head link;
83 long from;
84 long to;
85#ifdef CONFIG_CGROUP_HUGETLB
86 /*
87 * On shared mappings, each reserved region appears as a struct
88 * file_region in resv_map. These fields hold the info needed to
89 * uncharge each reservation.
90 */
91 struct page_counter *reservation_counter;
92 struct cgroup_subsys_state *css;
93#endif
94};
95
Joonsoo Kim9119a412014-04-03 14:47:25 -070096extern struct resv_map *resv_map_alloc(void);
97void resv_map_release(struct kref *ref);
98
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070099extern spinlock_t hugetlb_lock;
100extern int hugetlb_max_hstate __read_mostly;
101#define for_each_hstate(h) \
102 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
103
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -0700104struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
105 long min_hpages);
David Gibson90481622012-03-21 16:34:12 -0700106void hugepage_put_subpool(struct hugepage_subpool *spool);
107
Mel Gormana1e78772008-07-23 21:27:23 -0700108void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Christoph Hellwig32927392020-04-24 08:43:38 +0200109int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
110int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
111 loff_t *);
112int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
113 loff_t *);
114int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
115 loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -0800116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Michel Lespinasse28a35712013-02-22 16:35:55 -0800118long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
119 struct page **, struct vm_area_struct **,
Andrea Arcangeli87ffc112017-02-22 15:43:13 -0800120 unsigned long *, unsigned long *, long, unsigned int,
121 int *);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700122void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700123 unsigned long, unsigned long, struct page *);
Mel Gormand8333522012-07-31 16:46:20 -0700124void __unmap_hugepage_range_final(struct mmu_gather *tlb,
125 struct vm_area_struct *vma,
126 unsigned long start, unsigned long end,
127 struct page *ref_page);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700128void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
129 unsigned long start, unsigned long end,
130 struct page *ref_page);
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400131void hugetlb_report_meminfo(struct seq_file *);
Joe Perches79815932020-09-16 13:40:43 -0700132int hugetlb_report_node_meminfo(char *buf, int len, int nid);
David Rientjes949f7ec2013-04-29 15:07:48 -0700133void hugetlb_show_meminfo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134unsigned long hugetlb_total_pages(void);
Souptick Joarder2b740302018-08-23 17:01:36 -0700135vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +0100136 unsigned long address, unsigned int flags);
Mike Kravetz8fb5deb2017-02-22 15:42:52 -0800137int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
138 struct vm_area_struct *dst_vma,
139 unsigned long dst_addr,
140 unsigned long src_addr,
141 struct page **pagep);
Mel Gormana1e78772008-07-23 21:27:23 -0700142int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +0000143 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900144 vm_flags_t vm_flags);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700145long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
146 long freed);
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700147bool isolate_huge_page(struct page *page, struct list_head *list);
148void putback_active_hugepage(struct page *page);
Michal Hockoab5ac902018-01-31 16:20:48 -0800149void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
Atsushi Kumagai8f1d26d2014-07-30 16:08:39 -0700150void free_huge_page(struct page *page);
zhong jiang72e29362016-10-07 17:02:01 -0700151void hugetlb_fix_reserve_counts(struct inode *inode);
Mike Kravetzc672c7f2015-09-08 15:01:35 -0700152extern struct mutex *hugetlb_fault_mutex_table;
Wei Yang188b04a2019-11-30 17:57:02 -0800153u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Steve Capper3212b532013-04-23 12:35:02 +0100155pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
Steve Capper3212b532013-04-23 12:35:02 +0100156
Mike Kravetzc0d03812020-04-01 21:11:05 -0700157struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700160extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
David Gibson63551ae2005-06-21 17:14:44 -0700162/* arch callbacks */
163
Andi Kleena5516432008-07-23 21:27:41 -0700164pte_t *huge_pte_alloc(struct mm_struct *mm,
165 unsigned long addr, unsigned long sz);
Punit Agrawal7868a202017-07-06 15:39:42 -0700166pte_t *huge_pte_offset(struct mm_struct *mm,
167 unsigned long addr, unsigned long sz);
Mike Kravetz34ae2042020-08-11 18:31:38 -0700168int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
169 unsigned long *addr, pte_t *ptep);
Mike Kravetz017b1662018-10-05 15:51:29 -0700170void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
171 unsigned long *start, unsigned long *end);
David Gibson63551ae2005-06-21 17:14:44 -0700172struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
173 int write);
Aneesh Kumar K.V4dc71452017-07-06 15:38:56 -0700174struct page *follow_huge_pd(struct vm_area_struct *vma,
175 unsigned long address, hugepd_t hpd,
176 int flags, int pdshift);
David Gibson63551ae2005-06-21 17:14:44 -0700177struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800178 pmd_t *pmd, int flags);
Andi Kleenceb86872008-07-23 21:27:50 -0700179struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800180 pud_t *pud, int flags);
Anshuman Khandualfaaa5b62017-07-06 15:38:50 -0700181struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
182 pgd_t *pgd, int flags);
183
David Gibson63551ae2005-06-21 17:14:44 -0700184int pmd_huge(pmd_t pmd);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300185int pud_huge(pud_t pud);
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100186unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800187 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -0700188
Aneesh Kumar K.Vd5ed7442017-07-06 15:38:47 -0700189bool is_hugetlb_entry_migration(pte_t pte);
Michal Hockoab5ac902018-01-31 16:20:48 -0800190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#else /* !CONFIG_HUGETLB_PAGE */
192
Mel Gormana1e78772008-07-23 21:27:23 -0700193static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
194{
195}
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197static inline unsigned long hugetlb_total_pages(void)
198{
199 return 0;
200}
201
Mike Kravetzc0d03812020-04-01 21:11:05 -0700202static inline struct address_space *hugetlb_page_mapping_lock_write(
203 struct page *hpage)
204{
205 return NULL;
206}
207
Mike Kravetz34ae2042020-08-11 18:31:38 -0700208static inline int huge_pmd_unshare(struct mm_struct *mm,
209 struct vm_area_struct *vma,
210 unsigned long *addr, pte_t *ptep)
Mike Kravetz017b1662018-10-05 15:51:29 -0700211{
212 return 0;
213}
214
215static inline void adjust_range_if_pmd_sharing_possible(
216 struct vm_area_struct *vma,
217 unsigned long *start, unsigned long *end)
218{
219}
220
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800221static inline long follow_hugetlb_page(struct mm_struct *mm,
222 struct vm_area_struct *vma, struct page **pages,
223 struct vm_area_struct **vmas, unsigned long *position,
224 unsigned long *nr_pages, long i, unsigned int flags,
225 int *nonblocking)
226{
227 BUG();
228 return 0;
229}
230
231static inline struct page *follow_huge_addr(struct mm_struct *mm,
232 unsigned long address, int write)
233{
234 return ERR_PTR(-EINVAL);
235}
236
237static inline int copy_hugetlb_page_range(struct mm_struct *dst,
238 struct mm_struct *src, struct vm_area_struct *vma)
239{
240 BUG();
241 return 0;
242}
243
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400244static inline void hugetlb_report_meminfo(struct seq_file *m)
245{
246}
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800247
Joe Perches79815932020-09-16 13:40:43 -0700248static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800249{
250 return 0;
251}
252
David Rientjes949f7ec2013-04-29 15:07:48 -0700253static inline void hugetlb_show_meminfo(void)
254{
255}
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800256
257static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
258 unsigned long address, hugepd_t hpd, int flags,
259 int pdshift)
260{
261 return NULL;
262}
263
264static inline struct page *follow_huge_pmd(struct mm_struct *mm,
265 unsigned long address, pmd_t *pmd, int flags)
266{
267 return NULL;
268}
269
270static inline struct page *follow_huge_pud(struct mm_struct *mm,
271 unsigned long address, pud_t *pud, int flags)
272{
273 return NULL;
274}
275
276static inline struct page *follow_huge_pgd(struct mm_struct *mm,
277 unsigned long address, pgd_t *pgd, int flags)
278{
279 return NULL;
280}
281
282static inline int prepare_hugepage_range(struct file *file,
283 unsigned long addr, unsigned long len)
284{
285 return -EINVAL;
286}
287
288static inline int pmd_huge(pmd_t pmd)
289{
290 return 0;
291}
292
293static inline int pud_huge(pud_t pud)
294{
295 return 0;
296}
297
298static inline int is_hugepage_only_range(struct mm_struct *mm,
299 unsigned long addr, unsigned long len)
300{
301 return 0;
302}
303
304static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
305 unsigned long addr, unsigned long end,
306 unsigned long floor, unsigned long ceiling)
307{
308 BUG();
309}
310
311static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
312 pte_t *dst_pte,
313 struct vm_area_struct *dst_vma,
314 unsigned long dst_addr,
315 unsigned long src_addr,
316 struct page **pagep)
317{
318 BUG();
319 return 0;
320}
321
322static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
323 unsigned long sz)
324{
325 return NULL;
326}
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700327
Naoya Horiguchif40386a2013-12-12 17:12:19 -0800328static inline bool isolate_huge_page(struct page *page, struct list_head *list)
329{
330 return false;
331}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800333static inline void putback_active_hugepage(struct page *page)
334{
335}
336
337static inline void move_hugetlb_state(struct page *oldpage,
338 struct page *newpage, int reason)
339{
340}
341
342static inline unsigned long hugetlb_change_protection(
343 struct vm_area_struct *vma, unsigned long address,
344 unsigned long end, pgprot_t newprot)
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100345{
346 return 0;
347}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800348
Mel Gormand8333522012-07-31 16:46:20 -0700349static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
350 struct vm_area_struct *vma, unsigned long start,
351 unsigned long end, struct page *ref_page)
352{
353 BUG();
354}
355
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700356static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
357 struct vm_area_struct *vma, unsigned long start,
358 unsigned long end, struct page *ref_page)
359{
360 BUG();
361}
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800362
Souptick Joardera953e772019-03-28 20:43:51 -0700363static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
Mike Kravetz1f9dccb2019-11-30 17:56:40 -0800364 struct vm_area_struct *vma, unsigned long address,
365 unsigned int flags)
Souptick Joardera953e772019-03-28 20:43:51 -0700366{
367 BUG();
368 return 0;
369}
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371#endif /* !CONFIG_HUGETLB_PAGE */
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530372/*
373 * hugepages at page global directory. If arch support
374 * hugepages at pgd level, they need to define this.
375 */
376#ifndef pgd_huge
377#define pgd_huge(x) 0
378#endif
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300379#ifndef p4d_huge
380#define p4d_huge(x) 0
381#endif
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530382
383#ifndef pgd_write
384static inline int pgd_write(pgd_t pgd)
385{
386 BUG();
387 return 0;
388}
389#endif
390
Eric B Munson4e527802009-09-21 17:03:47 -0700391#define HUGETLB_ANON_FILE "anon_hugepage"
392
Eric B Munson6bfde052009-09-21 17:03:43 -0700393enum {
394 /*
395 * The file will be used as an shm file so shmfs accounting rules
396 * apply
397 */
398 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700399 /*
400 * The file is being created on the internal vfs mount and shmfs
401 * accounting rules do not apply
402 */
403 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700404};
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 long max_inodes; /* inodes allowed */
409 long free_inodes; /* inodes free */
410 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700411 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700412 struct hugepage_subpool *spool;
David Howells4a252202017-07-05 16:24:18 +0100413 kuid_t uid;
414 kgid_t gid;
415 umode_t mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416};
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
419{
420 return sb->s_fs_info;
421}
422
Marc-André Lureauda14c1e2018-01-31 16:19:22 -0800423struct hugetlbfs_inode_info {
424 struct shared_policy policy;
425 struct inode vfs_inode;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800426 unsigned int seals;
Marc-André Lureauda14c1e2018-01-31 16:19:22 -0800427};
428
429static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
430{
431 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
432}
433
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800434extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400435extern const struct vm_operations_struct hugetlb_vm_ops;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700436struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
Andi Kleen42d73952012-12-11 16:01:34 -0800437 struct user_struct **user, int creat_flags,
438 int page_size_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Yaowei Bai719ff3212016-01-14 15:18:51 -0800440static inline bool is_file_hugepages(struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
Adam Litke516dffd2007-03-01 15:46:08 -0800442 if (file->f_op == &hugetlbfs_file_operations)
Yaowei Bai719ff3212016-01-14 15:18:51 -0800443 return true;
Adam Litke516dffd2007-03-01 15:46:08 -0800444
Yaowei Bai719ff3212016-01-14 15:18:51 -0800445 return is_file_shm_hugepages(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446}
447
Christophe Leroybb297bb2020-04-01 21:11:54 -0700448static inline struct hstate *hstate_inode(struct inode *i)
449{
450 return HUGETLBFS_SB(i->i_sb)->hstate;
451}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452#else /* !CONFIG_HUGETLBFS */
453
Yaowei Bai719ff3212016-01-14 15:18:51 -0800454#define is_file_hugepages(file) false
Steven Truelove40716e22012-03-21 16:34:14 -0700455static inline struct file *
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700456hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
457 struct user_struct **user, int creat_flags,
Andi Kleen42d73952012-12-11 16:01:34 -0800458 int page_size_log)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700459{
460 return ERR_PTR(-ENOSYS);
461}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
Christophe Leroybb297bb2020-04-01 21:11:54 -0700463static inline struct hstate *hstate_inode(struct inode *i)
464{
465 return NULL;
466}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467#endif /* !CONFIG_HUGETLBFS */
468
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700469#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
470unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
471 unsigned long len, unsigned long pgoff,
472 unsigned long flags);
473#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
474
Andi Kleena5516432008-07-23 21:27:41 -0700475#ifdef CONFIG_HUGETLB_PAGE
476
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700477#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700478/* Defines one hugetlb page size */
479struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700480 int next_nid_to_alloc;
481 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700482 unsigned int order;
483 unsigned long mask;
484 unsigned long max_huge_pages;
485 unsigned long nr_huge_pages;
486 unsigned long free_huge_pages;
487 unsigned long resv_huge_pages;
488 unsigned long surplus_huge_pages;
489 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700490 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700491 struct list_head hugepage_freelists[MAX_NUMNODES];
492 unsigned int nr_huge_pages_node[MAX_NUMNODES];
493 unsigned int free_huge_pages_node[MAX_NUMNODES];
494 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700495#ifdef CONFIG_CGROUP_HUGETLB
496 /* cgroup control files */
Mina Almasrycdc2fcf2020-04-01 21:11:11 -0700497 struct cftype cgroup_files_dfl[7];
498 struct cftype cgroup_files_legacy[9];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700499#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700500 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700501};
502
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700503struct huge_bootmem_page {
504 struct list_head list;
505 struct hstate *hstate;
506};
507
Mike Kravetz70c35472015-09-08 15:01:54 -0700508struct page *alloc_huge_page(struct vm_area_struct *vma,
509 unsigned long addr, int avoid_reserve);
Michal Hocko3e59fcb2017-07-10 15:49:11 -0700510struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
Joonsoo Kimd92bbc22020-08-11 18:37:17 -0700511 nodemask_t *nmask, gfp_t gfp_mask);
Michal Hocko389c8172018-01-31 16:21:03 -0800512struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
513 unsigned long address);
Mike Kravetzab76ad52015-09-08 15:01:50 -0700514int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
515 pgoff_t idx);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900516
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700517/* arch callback */
Aneesh Kumar K.Ve24a1302017-07-28 10:31:25 +0530518int __init __alloc_bootmem_huge_page(struct hstate *h);
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700519int __init alloc_bootmem_huge_page(struct hstate *h);
520
Andi Kleene5ff2152008-07-23 21:27:42 -0700521void __init hugetlb_add_hstate(unsigned order);
Mike Kravetzae94da82020-06-03 16:00:34 -0700522bool __init arch_hugetlb_valid_size(unsigned long size);
Andi Kleene5ff2152008-07-23 21:27:42 -0700523struct hstate *size_to_hstate(unsigned long size);
524
525#ifndef HUGE_MAX_HSTATE
526#define HUGE_MAX_HSTATE 1
527#endif
528
529extern struct hstate hstates[HUGE_MAX_HSTATE];
530extern unsigned int default_hstate_idx;
531
532#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700533
Andi Kleena5516432008-07-23 21:27:41 -0700534static inline struct hstate *hstate_file(struct file *f)
535{
Al Viro496ad9a2013-01-23 17:07:38 -0500536 return hstate_inode(file_inode(f));
Andi Kleena5516432008-07-23 21:27:41 -0700537}
538
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700539static inline struct hstate *hstate_sizelog(int page_size_log)
540{
541 if (!page_size_log)
542 return &default_hstate;
Sasha Levin97ad2be2014-12-10 15:44:13 -0800543
544 return size_to_hstate(1UL << page_size_log);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700545}
546
Andi Kleena137e1c2008-07-23 21:27:43 -0700547static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700548{
Andi Kleena137e1c2008-07-23 21:27:43 -0700549 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700550}
551
552static inline unsigned long huge_page_size(struct hstate *h)
553{
554 return (unsigned long)PAGE_SIZE << h->order;
555}
556
Mel Gorman08fba692009-01-06 14:38:53 -0800557extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
558
Mel Gorman33402892009-01-06 14:38:54 -0800559extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
560
Andi Kleena5516432008-07-23 21:27:41 -0700561static inline unsigned long huge_page_mask(struct hstate *h)
562{
563 return h->mask;
564}
565
566static inline unsigned int huge_page_order(struct hstate *h)
567{
568 return h->order;
569}
570
571static inline unsigned huge_page_shift(struct hstate *h)
572{
573 return h->order + PAGE_SHIFT;
574}
575
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -0700576static inline bool hstate_is_gigantic(struct hstate *h)
577{
578 return huge_page_order(h) >= MAX_ORDER;
579}
580
Andi Kleena5516432008-07-23 21:27:41 -0700581static inline unsigned int pages_per_huge_page(struct hstate *h)
582{
583 return 1 << h->order;
584}
585
586static inline unsigned int blocks_per_huge_page(struct hstate *h)
587{
588 return huge_page_size(h) / 512;
589}
590
591#include <asm/hugetlb.h>
592
Anshuman Khandualb0eae982020-06-03 16:01:01 -0700593#ifndef is_hugepage_only_range
594static inline int is_hugepage_only_range(struct mm_struct *mm,
595 unsigned long addr, unsigned long len)
596{
597 return 0;
598}
599#define is_hugepage_only_range is_hugepage_only_range
600#endif
601
Anshuman Khandual5be99342020-06-03 16:01:05 -0700602#ifndef arch_clear_hugepage_flags
603static inline void arch_clear_hugepage_flags(struct page *page) { }
604#define arch_clear_hugepage_flags arch_clear_hugepage_flags
605#endif
606
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400607#ifndef arch_make_huge_pte
608static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
609 struct page *page, int writable)
610{
611 return entry;
612}
613#endif
614
Andi Kleene5ff2152008-07-23 21:27:42 -0700615static inline struct hstate *page_hstate(struct page *page)
616{
Sasha Levin309381fea2014-01-23 15:52:54 -0800617 VM_BUG_ON_PAGE(!PageHuge(page), page);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -0700618 return size_to_hstate(page_size(page));
Andi Kleene5ff2152008-07-23 21:27:42 -0700619}
620
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200621static inline unsigned hstate_index_to_shift(unsigned index)
622{
623 return hstates[index].order + PAGE_SHIFT;
624}
625
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700626static inline int hstate_index(struct hstate *h)
627{
628 return h - hstates;
629}
630
Zhang Yi13d60f42013-06-25 21:19:31 +0800631pgoff_t __basepage_index(struct page *page);
632
633/* Return page->index in PAGE_SIZE units */
634static inline pgoff_t basepage_index(struct page *page)
635{
636 if (!PageCompound(page))
637 return page->index;
638
639 return __basepage_index(page);
640}
641
Anshuman Khandualc3114a82017-07-10 15:47:41 -0700642extern int dissolve_free_huge_page(struct page *page);
Gerald Schaefer082d5b62016-10-07 17:01:10 -0700643extern int dissolve_free_huge_pages(unsigned long start_pfn,
644 unsigned long end_pfn);
Anshuman Khanduale693de12019-03-05 15:43:51 -0800645
Naoya Horiguchic177c812014-06-04 16:05:35 -0700646#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
Anshuman Khanduale693de12019-03-05 15:43:51 -0800647#ifndef arch_hugetlb_migration_supported
648static inline bool arch_hugetlb_migration_supported(struct hstate *h)
649{
Anshuman Khandual94310cb2017-07-06 15:38:38 -0700650 if ((huge_page_shift(h) == PMD_SHIFT) ||
Anshuman Khandual9b553bf2019-03-05 15:43:48 -0800651 (huge_page_shift(h) == PUD_SHIFT) ||
652 (huge_page_shift(h) == PGDIR_SHIFT))
Anshuman Khandual94310cb2017-07-06 15:38:38 -0700653 return true;
654 else
655 return false;
Anshuman Khanduale693de12019-03-05 15:43:51 -0800656}
Naoya Horiguchic177c812014-06-04 16:05:35 -0700657#endif
Anshuman Khanduale693de12019-03-05 15:43:51 -0800658#else
659static inline bool arch_hugetlb_migration_supported(struct hstate *h)
660{
661 return false;
662}
663#endif
664
665static inline bool hugepage_migration_supported(struct hstate *h)
666{
667 return arch_hugetlb_migration_supported(h);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700668}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700669
Anshuman Khandual7ed2c312019-03-05 15:43:44 -0800670/*
671 * Movability check is different as compared to migration check.
672 * It determines whether or not a huge page should be placed on
673 * movable zone or not. Movability of any huge page should be
674 * required only if huge page size is supported for migration.
675 * There wont be any reason for the huge page to be movable if
676 * it is not migratable to start with. Also the size of the huge
677 * page should be large enough to be placed under a movable zone
678 * and still feasible enough to be migratable. Just the presence
679 * in movable zone does not make the migration feasible.
680 *
681 * So even though large huge page sizes like the gigantic ones
682 * are migratable they should not be movable because its not
683 * feasible to migrate them from movable zone.
684 */
685static inline bool hugepage_movable_supported(struct hstate *h)
686{
687 if (!hugepage_migration_supported(h))
688 return false;
689
690 if (hstate_is_gigantic(h))
691 return false;
692 return true;
693}
694
Joonsoo Kimd92bbc22020-08-11 18:37:17 -0700695/* Movability of hugepages depends on migration support. */
696static inline gfp_t htlb_alloc_mask(struct hstate *h)
697{
698 if (hugepage_movable_supported(h))
699 return GFP_HIGHUSER_MOVABLE;
700 else
701 return GFP_HIGHUSER;
702}
703
Joonsoo Kim19fc7be2020-08-11 18:37:25 -0700704static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
705{
706 gfp_t modified_mask = htlb_alloc_mask(h);
707
708 /* Some callers might want to enforce node */
709 modified_mask |= (gfp_mask & __GFP_THISNODE);
710
Joonsoo Kim41b4dc12020-08-11 18:37:34 -0700711 modified_mask |= (gfp_mask & __GFP_NOWARN);
712
Joonsoo Kim19fc7be2020-08-11 18:37:25 -0700713 return modified_mask;
714}
715
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800716static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
717 struct mm_struct *mm, pte_t *pte)
718{
719 if (huge_page_size(h) == PMD_SIZE)
720 return pmd_lockptr(mm, (pmd_t *) pte);
721 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
722 return &mm->page_table_lock;
723}
724
Dominik Dingel2531c8c2015-07-17 16:23:37 -0700725#ifndef hugepages_supported
726/*
727 * Some platform decide whether they support huge pages at boot
728 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
729 * when there is no such support
730 */
731#define hugepages_supported() (HPAGE_SHIFT != 0)
732#endif
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -0700733
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800734void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
735
736static inline void hugetlb_count_add(long l, struct mm_struct *mm)
737{
738 atomic_long_add(l, &mm->hugetlb_usage);
739}
740
741static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
742{
743 atomic_long_sub(l, &mm->hugetlb_usage);
744}
Punit Agrawale5251fd2017-07-06 15:39:50 -0700745
746#ifndef set_huge_swap_pte_at
747static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
748 pte_t *ptep, pte_t pte, unsigned long sz)
749{
750 set_huge_pte_at(mm, addr, ptep, pte);
751}
752#endif
Aneesh Kumar K.V023bdd02019-03-05 15:46:37 -0800753
754#ifndef huge_ptep_modify_prot_start
755#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
756static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
757 unsigned long addr, pte_t *ptep)
758{
759 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
760}
761#endif
762
763#ifndef huge_ptep_modify_prot_commit
764#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
765static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
766 unsigned long addr, pte_t *ptep,
767 pte_t old_pte, pte_t pte)
768{
769 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
770}
771#endif
772
Muchun Songafe6c312021-02-04 18:32:03 -0800773void set_page_huge_active(struct page *page);
774
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700775#else /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700776struct hstate {};
Jason Gunthorpe442a5a92019-07-11 20:54:40 -0700777
778static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
779 unsigned long addr,
780 int avoid_reserve)
781{
782 return NULL;
783}
784
Jason Gunthorpe442a5a92019-07-11 20:54:40 -0700785static inline struct page *
Joonsoo Kimd92bbc22020-08-11 18:37:17 -0700786alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
787 nodemask_t *nmask, gfp_t gfp_mask)
Jason Gunthorpe442a5a92019-07-11 20:54:40 -0700788{
789 return NULL;
790}
791
792static inline struct page *alloc_huge_page_vma(struct hstate *h,
793 struct vm_area_struct *vma,
794 unsigned long address)
795{
796 return NULL;
797}
798
799static inline int __alloc_bootmem_huge_page(struct hstate *h)
800{
801 return 0;
802}
803
804static inline struct hstate *hstate_file(struct file *f)
805{
806 return NULL;
807}
808
809static inline struct hstate *hstate_sizelog(int page_size_log)
810{
811 return NULL;
812}
813
814static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
815{
816 return NULL;
817}
818
Jason Gunthorpe442a5a92019-07-11 20:54:40 -0700819static inline struct hstate *page_hstate(struct page *page)
820{
821 return NULL;
822}
823
824static inline unsigned long huge_page_size(struct hstate *h)
825{
826 return PAGE_SIZE;
827}
828
829static inline unsigned long huge_page_mask(struct hstate *h)
830{
831 return PAGE_MASK;
832}
833
834static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
835{
836 return PAGE_SIZE;
837}
838
839static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
840{
841 return PAGE_SIZE;
842}
843
844static inline unsigned int huge_page_order(struct hstate *h)
845{
846 return 0;
847}
848
849static inline unsigned int huge_page_shift(struct hstate *h)
850{
851 return PAGE_SHIFT;
852}
853
Anshuman Khandual94310cb2017-07-06 15:38:38 -0700854static inline bool hstate_is_gigantic(struct hstate *h)
855{
856 return false;
857}
858
Andrea Righi510a35d2008-07-26 15:22:27 -0700859static inline unsigned int pages_per_huge_page(struct hstate *h)
860{
861 return 1;
862}
Anshuman Khandualc3114a82017-07-10 15:47:41 -0700863
864static inline unsigned hstate_index_to_shift(unsigned index)
865{
866 return 0;
867}
868
869static inline int hstate_index(struct hstate *h)
870{
871 return 0;
872}
Zhang Yi13d60f42013-06-25 21:19:31 +0800873
874static inline pgoff_t basepage_index(struct page *page)
875{
876 return page->index;
877}
Anshuman Khandualc3114a82017-07-10 15:47:41 -0700878
879static inline int dissolve_free_huge_page(struct page *page)
880{
881 return 0;
882}
883
884static inline int dissolve_free_huge_pages(unsigned long start_pfn,
885 unsigned long end_pfn)
886{
887 return 0;
888}
889
890static inline bool hugepage_migration_supported(struct hstate *h)
891{
892 return false;
893}
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800894
Anshuman Khandual7ed2c312019-03-05 15:43:44 -0800895static inline bool hugepage_movable_supported(struct hstate *h)
896{
897 return false;
898}
899
Joonsoo Kimd92bbc22020-08-11 18:37:17 -0700900static inline gfp_t htlb_alloc_mask(struct hstate *h)
901{
902 return 0;
903}
904
Joonsoo Kim19fc7be2020-08-11 18:37:25 -0700905static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
906{
907 return 0;
908}
909
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800910static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
911 struct mm_struct *mm, pte_t *pte)
912{
913 return &mm->page_table_lock;
914}
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800915
916static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
917{
918}
919
920static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
921{
922}
Punit Agrawale5251fd2017-07-06 15:39:50 -0700923
924static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
925 pte_t *ptep, pte_t pte, unsigned long sz)
926{
927}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700928#endif /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700929
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800930static inline spinlock_t *huge_pte_lock(struct hstate *h,
931 struct mm_struct *mm, pte_t *pte)
932{
933 spinlock_t *ptl;
934
935 ptl = huge_pte_lockptr(h, mm, pte);
936 spin_lock(ptl);
937 return ptl;
938}
939
Roman Gushchincf11e852020-04-10 14:32:45 -0700940#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
941extern void __init hugetlb_cma_reserve(int order);
942extern void __init hugetlb_cma_check(void);
943#else
944static inline __init void hugetlb_cma_reserve(int order)
945{
946}
947static inline __init void hugetlb_cma_check(void)
948{
949}
950#endif
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952#endif /* _LINUX_HUGETLB_H */