blob: b857fc8cc2ecaef504a12e1a88d6e87fa38045ad [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07004#include <linux/mm_types.h>
Sasha Levin309381fea2014-01-23 15:52:54 -08005#include <linux/mmdebug.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04006#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09007#include <linux/hugetlb_inline.h>
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -07008#include <linux/cgroup.h>
Joonsoo Kim9119a412014-04-03 14:47:25 -07009#include <linux/list.h>
10#include <linux/kref.h>
Dan Williams888cdbc2016-01-15 16:56:32 -080011#include <asm/pgtable.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040012
Andrew Mortone9ea0e22009-09-24 14:47:45 -070013struct ctl_table;
14struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070015struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070016
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#ifdef CONFIG_HUGETLB_PAGE
18
19#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080020#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070021#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
David Gibson90481622012-03-21 16:34:12 -070023struct hugepage_subpool {
24 spinlock_t lock;
25 long count;
Mike Kravetzc6a91822015-04-15 16:13:36 -070026 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
27 long used_hpages; /* Used count against maximum, includes */
28 /* both alloced and reserved pages. */
29 struct hstate *hstate;
30 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
31 long rsv_hpages; /* Pages reserved against global pool to */
32 /* sasitfy minimum size. */
David Gibson90481622012-03-21 16:34:12 -070033};
34
Joonsoo Kim9119a412014-04-03 14:47:25 -070035struct resv_map {
36 struct kref refs;
Davidlohr Bueso7b24d862014-04-03 14:47:27 -070037 spinlock_t lock;
Joonsoo Kim9119a412014-04-03 14:47:25 -070038 struct list_head regions;
Mike Kravetz5e911372015-09-08 15:01:28 -070039 long adds_in_progress;
40 struct list_head region_cache;
41 long region_cache_count;
Joonsoo Kim9119a412014-04-03 14:47:25 -070042};
43extern struct resv_map *resv_map_alloc(void);
44void resv_map_release(struct kref *ref);
45
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070046extern spinlock_t hugetlb_lock;
47extern int hugetlb_max_hstate __read_mostly;
48#define for_each_hstate(h) \
49 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
50
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -070051struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
52 long min_hpages);
David Gibson90481622012-03-21 16:34:12 -070053void hugepage_put_subpool(struct hugepage_subpool *spool);
54
Mel Gormana1e78772008-07-23 21:27:23 -070055void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070056int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
58int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -080059
60#ifdef CONFIG_NUMA
61int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
62 void __user *, size_t *, loff_t *);
63#endif
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Michel Lespinasse28a35712013-02-22 16:35:55 -080066long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
67 struct page **, struct vm_area_struct **,
Andrea Arcangeli87ffc112017-02-22 15:43:13 -080068 unsigned long *, unsigned long *, long, unsigned int,
69 int *);
Mel Gorman04f2cbe2008-07-23 21:27:25 -070070void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070071 unsigned long, unsigned long, struct page *);
Mel Gormand8333522012-07-31 16:46:20 -070072void __unmap_hugepage_range_final(struct mmu_gather *tlb,
73 struct vm_area_struct *vma,
74 unsigned long start, unsigned long end,
75 struct page *ref_page);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070076void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
77 unsigned long start, unsigned long end,
78 struct page *ref_page);
Alexey Dobriyane1759c22008-10-15 23:50:22 +040079void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080int hugetlb_report_node_meminfo(int, char *);
David Rientjes949f7ec2013-04-29 15:07:48 -070081void hugetlb_show_meminfo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010083int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +010084 unsigned long address, unsigned int flags);
Mike Kravetz8fb5deb2017-02-22 15:42:52 -080085int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
86 struct vm_area_struct *dst_vma,
87 unsigned long dst_addr,
88 unsigned long src_addr,
89 struct page **pagep);
Mel Gormana1e78772008-07-23 21:27:23 -070090int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +000091 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +090092 vm_flags_t vm_flags);
Mike Kravetzb5cec282015-09-08 15:01:41 -070093long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
94 long freed);
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +090095int dequeue_hwpoisoned_huge_page(struct page *page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -070096bool isolate_huge_page(struct page *page, struct list_head *list);
97void putback_active_hugepage(struct page *page);
Atsushi Kumagai8f1d26d2014-07-30 16:08:39 -070098void free_huge_page(struct page *page);
zhong jiang72e29362016-10-07 17:02:01 -070099void hugetlb_fix_reserve_counts(struct inode *inode);
Mike Kravetzc672c7f2015-09-08 15:01:35 -0700100extern struct mutex *hugetlb_fault_mutex_table;
101u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
102 struct vm_area_struct *vma,
103 struct address_space *mapping,
104 pgoff_t idx, unsigned long address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Steve Capper3212b532013-04-23 12:35:02 +0100106pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
Steve Capper3212b532013-04-23 12:35:02 +0100107
Andrey Ryabinin753162c2015-02-10 14:11:36 -0800108extern int hugepages_treat_as_movable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700110extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
David Gibson63551ae2005-06-21 17:14:44 -0700112/* arch callbacks */
113
Andi Kleena5516432008-07-23 21:27:41 -0700114pte_t *huge_pte_alloc(struct mm_struct *mm,
115 unsigned long addr, unsigned long sz);
David Gibson63551ae2005-06-21 17:14:44 -0700116pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800117int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700118struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
119 int write);
120struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800121 pmd_t *pmd, int flags);
Andi Kleenceb86872008-07-23 21:27:50 -0700122struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800123 pud_t *pud, int flags);
David Gibson63551ae2005-06-21 17:14:44 -0700124int pmd_huge(pmd_t pmd);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300125int pud_huge(pud_t pud);
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100126unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800127 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -0700128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#else /* !CONFIG_HUGETLB_PAGE */
130
Mel Gormana1e78772008-07-23 21:27:23 -0700131static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
132{
133}
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static inline unsigned long hugetlb_total_pages(void)
136{
137 return 0;
138}
139
Andrea Arcangeli87ffc112017-02-22 15:43:13 -0800140#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
142#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400143static inline void hugetlb_report_meminfo(struct seq_file *m)
144{
145}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#define hugetlb_report_node_meminfo(n, buf) 0
David Rientjes949f7ec2013-04-29 15:07:48 -0700147static inline void hugetlb_show_meminfo(void)
148{
149}
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800150#define follow_huge_pmd(mm, addr, pmd, flags) NULL
151#define follow_huge_pud(mm, addr, pud, flags) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700152#define prepare_hugepage_range(file, addr, len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define pmd_huge(x) 0
Andi Kleenceb86872008-07-23 21:27:50 -0700154#define pud_huge(x) 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800156#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickins788c7df2009-06-23 13:49:05 +0100157#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
Mike Kravetz8fb5deb2017-02-22 15:42:52 -0800158#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
159 src_addr, pagep) ({ BUG(); 0; })
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900160#define huge_pte_offset(mm, address) 0
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700161static inline int dequeue_hwpoisoned_huge_page(struct page *page)
162{
163 return 0;
164}
165
Naoya Horiguchif40386a2013-12-12 17:12:19 -0800166static inline bool isolate_huge_page(struct page *page, struct list_head *list)
167{
168 return false;
169}
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700170#define putback_active_hugepage(p) do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100172static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
173 unsigned long address, unsigned long end, pgprot_t newprot)
174{
175 return 0;
176}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800177
Mel Gormand8333522012-07-31 16:46:20 -0700178static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
179 struct vm_area_struct *vma, unsigned long start,
180 unsigned long end, struct page *ref_page)
181{
182 BUG();
183}
184
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700185static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
186 struct vm_area_struct *vma, unsigned long start,
187 unsigned long end, struct page *ref_page)
188{
189 BUG();
190}
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#endif /* !CONFIG_HUGETLB_PAGE */
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530193/*
194 * hugepages at page global directory. If arch support
195 * hugepages at pgd level, they need to define this.
196 */
197#ifndef pgd_huge
198#define pgd_huge(x) 0
199#endif
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300200#ifndef p4d_huge
201#define p4d_huge(x) 0
202#endif
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530203
204#ifndef pgd_write
205static inline int pgd_write(pgd_t pgd)
206{
207 BUG();
208 return 0;
209}
210#endif
211
212#ifndef pud_write
213static inline int pud_write(pud_t pud)
214{
215 BUG();
216 return 0;
217}
218#endif
219
220#ifndef is_hugepd
221/*
222 * Some architectures requires a hugepage directory format that is
223 * required to support multiple hugepage sizes. For example
224 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
225 * introduced the same on powerpc. This allows for a more flexible hugepage
226 * pagetable layout.
227 */
228typedef struct { unsigned long pd; } hugepd_t;
229#define is_hugepd(hugepd) (0)
230#define __hugepd(x) ((hugepd_t) { (x) })
231static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
232 unsigned pdshift, unsigned long end,
233 int write, struct page **pages, int *nr)
234{
235 return 0;
236}
237#else
238extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
239 unsigned pdshift, unsigned long end,
240 int write, struct page **pages, int *nr);
241#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Eric B Munson4e527802009-09-21 17:03:47 -0700243#define HUGETLB_ANON_FILE "anon_hugepage"
244
Eric B Munson6bfde052009-09-21 17:03:43 -0700245enum {
246 /*
247 * The file will be used as an shm file so shmfs accounting rules
248 * apply
249 */
250 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700251 /*
252 * The file is being created on the internal vfs mount and shmfs
253 * accounting rules do not apply
254 */
255 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700256};
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 long max_inodes; /* inodes allowed */
261 long free_inodes; /* inodes free */
262 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700263 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700264 struct hugepage_subpool *spool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265};
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
268{
269 return sb->s_fs_info;
270}
271
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800272extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400273extern const struct vm_operations_struct hugetlb_vm_ops;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700274struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
Andi Kleen42d73952012-12-11 16:01:34 -0800275 struct user_struct **user, int creat_flags,
276 int page_size_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Yaowei Bai719ff3212016-01-14 15:18:51 -0800278static inline bool is_file_hugepages(struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279{
Adam Litke516dffd2007-03-01 15:46:08 -0800280 if (file->f_op == &hugetlbfs_file_operations)
Yaowei Bai719ff3212016-01-14 15:18:51 -0800281 return true;
Adam Litke516dffd2007-03-01 15:46:08 -0800282
Yaowei Bai719ff3212016-01-14 15:18:51 -0800283 return is_file_shm_hugepages(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
Andi Kleen42d73952012-12-11 16:01:34 -0800286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#else /* !CONFIG_HUGETLBFS */
288
Yaowei Bai719ff3212016-01-14 15:18:51 -0800289#define is_file_hugepages(file) false
Steven Truelove40716e22012-03-21 16:34:14 -0700290static inline struct file *
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700291hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
292 struct user_struct **user, int creat_flags,
Andi Kleen42d73952012-12-11 16:01:34 -0800293 int page_size_log)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700294{
295 return ERR_PTR(-ENOSYS);
296}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298#endif /* !CONFIG_HUGETLBFS */
299
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700300#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
301unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
302 unsigned long len, unsigned long pgoff,
303 unsigned long flags);
304#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
305
Andi Kleena5516432008-07-23 21:27:41 -0700306#ifdef CONFIG_HUGETLB_PAGE
307
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700308#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700309/* Defines one hugetlb page size */
310struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700311 int next_nid_to_alloc;
312 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700313 unsigned int order;
314 unsigned long mask;
315 unsigned long max_huge_pages;
316 unsigned long nr_huge_pages;
317 unsigned long free_huge_pages;
318 unsigned long resv_huge_pages;
319 unsigned long surplus_huge_pages;
320 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700321 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700322 struct list_head hugepage_freelists[MAX_NUMNODES];
323 unsigned int nr_huge_pages_node[MAX_NUMNODES];
324 unsigned int free_huge_pages_node[MAX_NUMNODES];
325 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700326#ifdef CONFIG_CGROUP_HUGETLB
327 /* cgroup control files */
328 struct cftype cgroup_files[5];
329#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700330 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700331};
332
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700333struct huge_bootmem_page {
334 struct list_head list;
335 struct hstate *hstate;
Becky Bruceee8f2482011-07-25 17:11:50 -0700336#ifdef CONFIG_HIGHMEM
337 phys_addr_t phys;
338#endif
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700339};
340
Mike Kravetz70c35472015-09-08 15:01:54 -0700341struct page *alloc_huge_page(struct vm_area_struct *vma,
342 unsigned long addr, int avoid_reserve);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900343struct page *alloc_huge_page_node(struct hstate *h, int nid);
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700344struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
345 unsigned long addr, int avoid_reserve);
Mike Kravetzab76ad52015-09-08 15:01:50 -0700346int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
347 pgoff_t idx);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900348
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700349/* arch callback */
350int __init alloc_bootmem_huge_page(struct hstate *h);
351
Vaishali Thakkar9fee0212016-05-19 17:11:04 -0700352void __init hugetlb_bad_size(void);
Andi Kleene5ff2152008-07-23 21:27:42 -0700353void __init hugetlb_add_hstate(unsigned order);
354struct hstate *size_to_hstate(unsigned long size);
355
356#ifndef HUGE_MAX_HSTATE
357#define HUGE_MAX_HSTATE 1
358#endif
359
360extern struct hstate hstates[HUGE_MAX_HSTATE];
361extern unsigned int default_hstate_idx;
362
363#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700364
Andi Kleena137e1c2008-07-23 21:27:43 -0700365static inline struct hstate *hstate_inode(struct inode *i)
Andi Kleena5516432008-07-23 21:27:41 -0700366{
Chen Gang7fab3582016-05-20 16:57:59 -0700367 return HUGETLBFS_SB(i->i_sb)->hstate;
Andi Kleena5516432008-07-23 21:27:41 -0700368}
369
370static inline struct hstate *hstate_file(struct file *f)
371{
Al Viro496ad9a2013-01-23 17:07:38 -0500372 return hstate_inode(file_inode(f));
Andi Kleena5516432008-07-23 21:27:41 -0700373}
374
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700375static inline struct hstate *hstate_sizelog(int page_size_log)
376{
377 if (!page_size_log)
378 return &default_hstate;
Sasha Levin97ad2be2014-12-10 15:44:13 -0800379
380 return size_to_hstate(1UL << page_size_log);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700381}
382
Andi Kleena137e1c2008-07-23 21:27:43 -0700383static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700384{
Andi Kleena137e1c2008-07-23 21:27:43 -0700385 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700386}
387
388static inline unsigned long huge_page_size(struct hstate *h)
389{
390 return (unsigned long)PAGE_SIZE << h->order;
391}
392
Mel Gorman08fba692009-01-06 14:38:53 -0800393extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
394
Mel Gorman33402892009-01-06 14:38:54 -0800395extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
396
Andi Kleena5516432008-07-23 21:27:41 -0700397static inline unsigned long huge_page_mask(struct hstate *h)
398{
399 return h->mask;
400}
401
402static inline unsigned int huge_page_order(struct hstate *h)
403{
404 return h->order;
405}
406
407static inline unsigned huge_page_shift(struct hstate *h)
408{
409 return h->order + PAGE_SHIFT;
410}
411
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -0700412static inline bool hstate_is_gigantic(struct hstate *h)
413{
414 return huge_page_order(h) >= MAX_ORDER;
415}
416
Andi Kleena5516432008-07-23 21:27:41 -0700417static inline unsigned int pages_per_huge_page(struct hstate *h)
418{
419 return 1 << h->order;
420}
421
422static inline unsigned int blocks_per_huge_page(struct hstate *h)
423{
424 return huge_page_size(h) / 512;
425}
426
427#include <asm/hugetlb.h>
428
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400429#ifndef arch_make_huge_pte
430static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
431 struct page *page, int writable)
432{
433 return entry;
434}
435#endif
436
Andi Kleene5ff2152008-07-23 21:27:42 -0700437static inline struct hstate *page_hstate(struct page *page)
438{
Sasha Levin309381fea2014-01-23 15:52:54 -0800439 VM_BUG_ON_PAGE(!PageHuge(page), page);
Andi Kleene5ff2152008-07-23 21:27:42 -0700440 return size_to_hstate(PAGE_SIZE << compound_order(page));
441}
442
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200443static inline unsigned hstate_index_to_shift(unsigned index)
444{
445 return hstates[index].order + PAGE_SHIFT;
446}
447
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700448static inline int hstate_index(struct hstate *h)
449{
450 return h - hstates;
451}
452
Zhang Yi13d60f42013-06-25 21:19:31 +0800453pgoff_t __basepage_index(struct page *page);
454
455/* Return page->index in PAGE_SIZE units */
456static inline pgoff_t basepage_index(struct page *page)
457{
458 if (!PageCompound(page))
459 return page->index;
460
461 return __basepage_index(page);
462}
463
Gerald Schaefer082d5b62016-10-07 17:01:10 -0700464extern int dissolve_free_huge_pages(unsigned long start_pfn,
465 unsigned long end_pfn);
Chen Gangd70c17d2016-05-20 16:58:01 -0700466static inline bool hugepage_migration_supported(struct hstate *h)
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700467{
Naoya Horiguchic177c812014-06-04 16:05:35 -0700468#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
469 return huge_page_shift(h) == PMD_SHIFT;
470#else
Chen Gangd70c17d2016-05-20 16:58:01 -0700471 return false;
Naoya Horiguchic177c812014-06-04 16:05:35 -0700472#endif
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700473}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700474
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800475static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
476 struct mm_struct *mm, pte_t *pte)
477{
478 if (huge_page_size(h) == PMD_SIZE)
479 return pmd_lockptr(mm, (pmd_t *) pte);
480 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
481 return &mm->page_table_lock;
482}
483
Dominik Dingel2531c8c2015-07-17 16:23:37 -0700484#ifndef hugepages_supported
485/*
486 * Some platform decide whether they support huge pages at boot
487 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
488 * when there is no such support
489 */
490#define hugepages_supported() (HPAGE_SHIFT != 0)
491#endif
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -0700492
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800493void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
494
495static inline void hugetlb_count_add(long l, struct mm_struct *mm)
496{
497 atomic_long_add(l, &mm->hugetlb_usage);
498}
499
500static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
501{
502 atomic_long_sub(l, &mm->hugetlb_usage);
503}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700504#else /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700505struct hstate {};
Mike Kravetz70c35472015-09-08 15:01:54 -0700506#define alloc_huge_page(v, a, r) NULL
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900507#define alloc_huge_page_node(h, nid) NULL
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700508#define alloc_huge_page_noerr(v, a, r) NULL
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700509#define alloc_bootmem_huge_page(h) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700510#define hstate_file(f) NULL
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700511#define hstate_sizelog(s) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700512#define hstate_vma(v) NULL
513#define hstate_inode(i) NULL
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800514#define page_hstate(page) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700515#define huge_page_size(h) PAGE_SIZE
516#define huge_page_mask(h) PAGE_MASK
Mel Gorman08fba692009-01-06 14:38:53 -0800517#define vma_kernel_pagesize(v) PAGE_SIZE
Mel Gorman33402892009-01-06 14:38:54 -0800518#define vma_mmu_pagesize(v) PAGE_SIZE
Andi Kleena5516432008-07-23 21:27:41 -0700519#define huge_page_order(h) 0
520#define huge_page_shift(h) PAGE_SHIFT
Andrea Righi510a35d2008-07-26 15:22:27 -0700521static inline unsigned int pages_per_huge_page(struct hstate *h)
522{
523 return 1;
524}
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200525#define hstate_index_to_shift(index) 0
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700526#define hstate_index(h) 0
Zhang Yi13d60f42013-06-25 21:19:31 +0800527
528static inline pgoff_t basepage_index(struct page *page)
529{
530 return page->index;
531}
Gerald Schaefer082d5b62016-10-07 17:01:10 -0700532#define dissolve_free_huge_pages(s, e) 0
Chen Gangd70c17d2016-05-20 16:58:01 -0700533#define hugepage_migration_supported(h) false
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800534
535static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
536 struct mm_struct *mm, pte_t *pte)
537{
538 return &mm->page_table_lock;
539}
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800540
541static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
542{
543}
544
545static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
546{
547}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700548#endif /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700549
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800550static inline spinlock_t *huge_pte_lock(struct hstate *h,
551 struct mm_struct *mm, pte_t *pte)
552{
553 spinlock_t *ptl;
554
555 ptl = huge_pte_lockptr(h, mm, pte);
556 spin_lock(ptl);
557 return ptl;
558}
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560#endif /* _LINUX_HUGETLB_H */