Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_HUGETLB_H |
| 3 | #define _LINUX_HUGETLB_H |
| 4 | |
Linus Torvalds | be93d8c | 2011-05-26 12:03:50 -0700 | [diff] [blame] | 5 | #include <linux/mm_types.h> |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 6 | #include <linux/mmdebug.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 7 | #include <linux/fs.h> |
Naoya Horiguchi | 8edf344 | 2010-05-28 09:29:15 +0900 | [diff] [blame] | 8 | #include <linux/hugetlb_inline.h> |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 9 | #include <linux/cgroup.h> |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 10 | #include <linux/list.h> |
| 11 | #include <linux/kref.h> |
Mike Rapoport | ca5999f | 2020-06-08 21:32:38 -0700 | [diff] [blame] | 12 | #include <linux/pgtable.h> |
Joonsoo Kim | d92bbc2 | 2020-08-11 18:37:17 -0700 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Axel Rasmussen | 4a5cf92 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 14 | #include <linux/userfaultfd_k.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 15 | |
Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 16 | struct ctl_table; |
| 17 | struct user_struct; |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 18 | struct mmu_gather; |
Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 19 | |
Aneesh Kumar K.V | e229929 | 2017-07-06 15:38:53 -0700 | [diff] [blame] | 20 | #ifndef is_hugepd |
Aneesh Kumar K.V | e229929 | 2017-07-06 15:38:53 -0700 | [diff] [blame] | 21 | typedef struct { unsigned long pd; } hugepd_t; |
| 22 | #define is_hugepd(hugepd) (0) |
| 23 | #define __hugepd(x) ((hugepd_t) { (x) }) |
Aneesh Kumar K.V | e229929 | 2017-07-06 15:38:53 -0700 | [diff] [blame] | 24 | #endif |
| 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #ifdef CONFIG_HUGETLB_PAGE |
| 27 | |
| 28 | #include <linux/mempolicy.h> |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 29 | #include <linux/shm.h> |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 30 | #include <asm/tlbflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 32 | struct hugepage_subpool { |
| 33 | spinlock_t lock; |
| 34 | long count; |
Mike Kravetz | c6a9182 | 2015-04-15 16:13:36 -0700 | [diff] [blame] | 35 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
| 36 | long used_hpages; /* Used count against maximum, includes */ |
| 37 | /* both alloced and reserved pages. */ |
| 38 | struct hstate *hstate; |
| 39 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ |
| 40 | long rsv_hpages; /* Pages reserved against global pool to */ |
| 41 | /* sasitfy minimum size. */ |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 42 | }; |
| 43 | |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 44 | struct resv_map { |
| 45 | struct kref refs; |
Davidlohr Bueso | 7b24d86 | 2014-04-03 14:47:27 -0700 | [diff] [blame] | 46 | spinlock_t lock; |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 47 | struct list_head regions; |
Mike Kravetz | 5e91137 | 2015-09-08 15:01:28 -0700 | [diff] [blame] | 48 | long adds_in_progress; |
| 49 | struct list_head region_cache; |
| 50 | long region_cache_count; |
Mina Almasry | e9fe92a | 2020-04-01 21:11:21 -0700 | [diff] [blame] | 51 | #ifdef CONFIG_CGROUP_HUGETLB |
| 52 | /* |
| 53 | * On private mappings, the counter to uncharge reservations is stored |
| 54 | * here. If these fields are 0, then either the mapping is shared, or |
| 55 | * cgroup accounting is disabled for this resv_map. |
| 56 | */ |
| 57 | struct page_counter *reservation_counter; |
| 58 | unsigned long pages_per_hpage; |
| 59 | struct cgroup_subsys_state *css; |
| 60 | #endif |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 61 | }; |
Mina Almasry | 075a61d | 2020-04-01 21:11:28 -0700 | [diff] [blame] | 62 | |
| 63 | /* |
| 64 | * Region tracking -- allows tracking of reservations and instantiated pages |
| 65 | * across the pages in a mapping. |
| 66 | * |
| 67 | * The region data structures are embedded into a resv_map and protected |
| 68 | * by a resv_map's lock. The set of regions within the resv_map represent |
| 69 | * reservations for huge pages, or huge pages that have already been |
| 70 | * instantiated within the map. The from and to elements are huge page |
| 71 | * indicies into the associated mapping. from indicates the starting index |
| 72 | * of the region. to represents the first index past the end of the region. |
| 73 | * |
| 74 | * For example, a file region structure with from == 0 and to == 4 represents |
| 75 | * four huge pages in a mapping. It is important to note that the to element |
| 76 | * represents the first element past the end of the region. This is used in |
| 77 | * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. |
| 78 | * |
| 79 | * Interval notation of the form [from, to) will be used to indicate that |
| 80 | * the endpoint from is inclusive and to is exclusive. |
| 81 | */ |
| 82 | struct file_region { |
| 83 | struct list_head link; |
| 84 | long from; |
| 85 | long to; |
| 86 | #ifdef CONFIG_CGROUP_HUGETLB |
| 87 | /* |
| 88 | * On shared mappings, each reserved region appears as a struct |
| 89 | * file_region in resv_map. These fields hold the info needed to |
| 90 | * uncharge each reservation. |
| 91 | */ |
| 92 | struct page_counter *reservation_counter; |
| 93 | struct cgroup_subsys_state *css; |
| 94 | #endif |
| 95 | }; |
| 96 | |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 97 | extern struct resv_map *resv_map_alloc(void); |
| 98 | void resv_map_release(struct kref *ref); |
| 99 | |
Aneesh Kumar K.V | c3f38a3 | 2012-07-31 16:42:10 -0700 | [diff] [blame] | 100 | extern spinlock_t hugetlb_lock; |
| 101 | extern int hugetlb_max_hstate __read_mostly; |
| 102 | #define for_each_hstate(h) \ |
| 103 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) |
| 104 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 105 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
| 106 | long min_hpages); |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 107 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
| 108 | |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 109 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 110 | int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); |
| 111 | int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, |
| 112 | loff_t *); |
| 113 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, |
| 114 | loff_t *); |
| 115 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, |
| 116 | loff_t *); |
Lee Schermerhorn | 06808b0 | 2009-12-14 17:58:21 -0800 | [diff] [blame] | 117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 119 | long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
| 120 | struct page **, struct vm_area_struct **, |
Andrea Arcangeli | 87ffc11 | 2017-02-22 15:43:13 -0800 | [diff] [blame] | 121 | unsigned long *, unsigned long *, long, unsigned int, |
| 122 | int *); |
Mel Gorman | 04f2cbe | 2008-07-23 21:27:25 -0700 | [diff] [blame] | 123 | void unmap_hugepage_range(struct vm_area_struct *, |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 124 | unsigned long, unsigned long, struct page *); |
Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 125 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
| 126 | struct vm_area_struct *vma, |
| 127 | unsigned long start, unsigned long end, |
| 128 | struct page *ref_page); |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 129 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 130 | unsigned long start, unsigned long end, |
| 131 | struct page *ref_page); |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 132 | void hugetlb_report_meminfo(struct seq_file *); |
Joe Perches | 7981593 | 2020-09-16 13:40:43 -0700 | [diff] [blame] | 133 | int hugetlb_report_node_meminfo(char *buf, int len, int nid); |
David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 134 | void hugetlb_show_meminfo(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | unsigned long hugetlb_total_pages(void); |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 136 | vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
Hugh Dickins | 788c7df | 2009-06-23 13:49:05 +0100 | [diff] [blame] | 137 | unsigned long address, unsigned int flags); |
Axel Rasmussen | e6bf076 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 138 | #ifdef CONFIG_USERFAULTFD |
Mike Kravetz | 8fb5deb | 2017-02-22 15:42:52 -0800 | [diff] [blame] | 139 | int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, |
| 140 | struct vm_area_struct *dst_vma, |
| 141 | unsigned long dst_addr, |
| 142 | unsigned long src_addr, |
Axel Rasmussen | 4a5cf92 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 143 | enum mcopy_atomic_mode mode, |
Mike Kravetz | 8fb5deb | 2017-02-22 15:42:52 -0800 | [diff] [blame] | 144 | struct page **pagep); |
Axel Rasmussen | e6bf076 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 145 | #endif /* CONFIG_USERFAULTFD */ |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 146 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 147 | struct vm_area_struct *vma, |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 148 | vm_flags_t vm_flags); |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 149 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
| 150 | long freed); |
Naoya Horiguchi | 31caf66 | 2013-09-11 14:21:59 -0700 | [diff] [blame] | 151 | bool isolate_huge_page(struct page *page, struct list_head *list); |
| 152 | void putback_active_hugepage(struct page *page); |
Michal Hocko | ab5ac90 | 2018-01-31 16:20:48 -0800 | [diff] [blame] | 153 | void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); |
Atsushi Kumagai | 8f1d26d | 2014-07-30 16:08:39 -0700 | [diff] [blame] | 154 | void free_huge_page(struct page *page); |
zhong jiang | 72e2936 | 2016-10-07 17:02:01 -0700 | [diff] [blame] | 155 | void hugetlb_fix_reserve_counts(struct inode *inode); |
Mike Kravetz | c672c7f | 2015-09-08 15:01:35 -0700 | [diff] [blame] | 156 | extern struct mutex *hugetlb_fault_mutex_table; |
Wei Yang | 188b04a | 2019-11-30 17:57:02 -0800 | [diff] [blame] | 157 | u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
Peter Xu | 59caf93 | 2021-03-18 17:01:46 +1100 | [diff] [blame] | 159 | pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, |
| 160 | unsigned long addr, pud_t *pud); |
Steve Capper | 3212b53 | 2013-04-23 12:35:02 +0100 | [diff] [blame] | 161 | |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 162 | struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); |
| 163 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | extern int sysctl_hugetlb_shm_group; |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 165 | extern struct list_head huge_boot_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 167 | /* arch callbacks */ |
| 168 | |
Peter Xu | 59caf93 | 2021-03-18 17:01:46 +1100 | [diff] [blame] | 169 | pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 170 | unsigned long addr, unsigned long sz); |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 171 | pte_t *huge_pte_offset(struct mm_struct *mm, |
| 172 | unsigned long addr, unsigned long sz); |
Mike Kravetz | 34ae204 | 2020-08-11 18:31:38 -0700 | [diff] [blame] | 173 | int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, |
| 174 | unsigned long *addr, pte_t *ptep); |
Mike Kravetz | 017b166 | 2018-10-05 15:51:29 -0700 | [diff] [blame] | 175 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
| 176 | unsigned long *start, unsigned long *end); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 177 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
| 178 | int write); |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 179 | struct page *follow_huge_pd(struct vm_area_struct *vma, |
| 180 | unsigned long address, hugepd_t hpd, |
| 181 | int flags, int pdshift); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 182 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 183 | pmd_t *pmd, int flags); |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 184 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 185 | pud_t *pud, int flags); |
Anshuman Khandual | faaa5b6 | 2017-07-06 15:38:50 -0700 | [diff] [blame] | 186 | struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, |
| 187 | pgd_t *pgd, int flags); |
| 188 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 189 | int pmd_huge(pmd_t pmd); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 190 | int pud_huge(pud_t pud); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 191 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 192 | unsigned long address, unsigned long end, pgprot_t newprot); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 193 | |
Aneesh Kumar K.V | d5ed744 | 2017-07-06 15:38:47 -0700 | [diff] [blame] | 194 | bool is_hugetlb_entry_migration(pte_t pte); |
Peter Xu | 343cacf | 2021-03-18 17:01:47 +1100 | [diff] [blame] | 195 | void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); |
Michal Hocko | ab5ac90 | 2018-01-31 16:20:48 -0800 | [diff] [blame] | 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 198 | |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 199 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
| 200 | { |
| 201 | } |
| 202 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | static inline unsigned long hugetlb_total_pages(void) |
| 204 | { |
| 205 | return 0; |
| 206 | } |
| 207 | |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 208 | static inline struct address_space *hugetlb_page_mapping_lock_write( |
| 209 | struct page *hpage) |
| 210 | { |
| 211 | return NULL; |
| 212 | } |
| 213 | |
Mike Kravetz | 34ae204 | 2020-08-11 18:31:38 -0700 | [diff] [blame] | 214 | static inline int huge_pmd_unshare(struct mm_struct *mm, |
| 215 | struct vm_area_struct *vma, |
| 216 | unsigned long *addr, pte_t *ptep) |
Mike Kravetz | 017b166 | 2018-10-05 15:51:29 -0700 | [diff] [blame] | 217 | { |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | static inline void adjust_range_if_pmd_sharing_possible( |
| 222 | struct vm_area_struct *vma, |
| 223 | unsigned long *start, unsigned long *end) |
| 224 | { |
| 225 | } |
| 226 | |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 227 | static inline long follow_hugetlb_page(struct mm_struct *mm, |
| 228 | struct vm_area_struct *vma, struct page **pages, |
| 229 | struct vm_area_struct **vmas, unsigned long *position, |
| 230 | unsigned long *nr_pages, long i, unsigned int flags, |
| 231 | int *nonblocking) |
| 232 | { |
| 233 | BUG(); |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | static inline struct page *follow_huge_addr(struct mm_struct *mm, |
| 238 | unsigned long address, int write) |
| 239 | { |
| 240 | return ERR_PTR(-EINVAL); |
| 241 | } |
| 242 | |
| 243 | static inline int copy_hugetlb_page_range(struct mm_struct *dst, |
| 244 | struct mm_struct *src, struct vm_area_struct *vma) |
| 245 | { |
| 246 | BUG(); |
| 247 | return 0; |
| 248 | } |
| 249 | |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 250 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
| 251 | { |
| 252 | } |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 253 | |
Joe Perches | 7981593 | 2020-09-16 13:40:43 -0700 | [diff] [blame] | 254 | static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 255 | { |
| 256 | return 0; |
| 257 | } |
| 258 | |
David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 259 | static inline void hugetlb_show_meminfo(void) |
| 260 | { |
| 261 | } |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 262 | |
| 263 | static inline struct page *follow_huge_pd(struct vm_area_struct *vma, |
| 264 | unsigned long address, hugepd_t hpd, int flags, |
| 265 | int pdshift) |
| 266 | { |
| 267 | return NULL; |
| 268 | } |
| 269 | |
| 270 | static inline struct page *follow_huge_pmd(struct mm_struct *mm, |
| 271 | unsigned long address, pmd_t *pmd, int flags) |
| 272 | { |
| 273 | return NULL; |
| 274 | } |
| 275 | |
| 276 | static inline struct page *follow_huge_pud(struct mm_struct *mm, |
| 277 | unsigned long address, pud_t *pud, int flags) |
| 278 | { |
| 279 | return NULL; |
| 280 | } |
| 281 | |
| 282 | static inline struct page *follow_huge_pgd(struct mm_struct *mm, |
| 283 | unsigned long address, pgd_t *pgd, int flags) |
| 284 | { |
| 285 | return NULL; |
| 286 | } |
| 287 | |
| 288 | static inline int prepare_hugepage_range(struct file *file, |
| 289 | unsigned long addr, unsigned long len) |
| 290 | { |
| 291 | return -EINVAL; |
| 292 | } |
| 293 | |
| 294 | static inline int pmd_huge(pmd_t pmd) |
| 295 | { |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | static inline int pud_huge(pud_t pud) |
| 300 | { |
| 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
| 305 | unsigned long addr, unsigned long len) |
| 306 | { |
| 307 | return 0; |
| 308 | } |
| 309 | |
| 310 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
| 311 | unsigned long addr, unsigned long end, |
| 312 | unsigned long floor, unsigned long ceiling) |
| 313 | { |
| 314 | BUG(); |
| 315 | } |
| 316 | |
Axel Rasmussen | e6bf076 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 317 | #ifdef CONFIG_USERFAULTFD |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 318 | static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, |
| 319 | pte_t *dst_pte, |
| 320 | struct vm_area_struct *dst_vma, |
| 321 | unsigned long dst_addr, |
| 322 | unsigned long src_addr, |
Axel Rasmussen | 4a5cf92 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 323 | enum mcopy_atomic_mode mode, |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 324 | struct page **pagep) |
| 325 | { |
| 326 | BUG(); |
| 327 | return 0; |
| 328 | } |
Axel Rasmussen | e6bf076 | 2021-03-18 17:01:50 +1100 | [diff] [blame] | 329 | #endif /* CONFIG_USERFAULTFD */ |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 330 | |
| 331 | static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, |
| 332 | unsigned long sz) |
| 333 | { |
| 334 | return NULL; |
| 335 | } |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 336 | |
Naoya Horiguchi | f40386a | 2013-12-12 17:12:19 -0800 | [diff] [blame] | 337 | static inline bool isolate_huge_page(struct page *page, struct list_head *list) |
| 338 | { |
| 339 | return false; |
| 340 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 342 | static inline void putback_active_hugepage(struct page *page) |
| 343 | { |
| 344 | } |
| 345 | |
| 346 | static inline void move_hugetlb_state(struct page *oldpage, |
| 347 | struct page *newpage, int reason) |
| 348 | { |
| 349 | } |
| 350 | |
| 351 | static inline unsigned long hugetlb_change_protection( |
| 352 | struct vm_area_struct *vma, unsigned long address, |
| 353 | unsigned long end, pgprot_t newprot) |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 354 | { |
| 355 | return 0; |
| 356 | } |
Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 357 | |
Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 358 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
| 359 | struct vm_area_struct *vma, unsigned long start, |
| 360 | unsigned long end, struct page *ref_page) |
| 361 | { |
| 362 | BUG(); |
| 363 | } |
| 364 | |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 365 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, |
| 366 | struct vm_area_struct *vma, unsigned long start, |
| 367 | unsigned long end, struct page *ref_page) |
| 368 | { |
| 369 | BUG(); |
| 370 | } |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 371 | |
Souptick Joarder | a953e77 | 2019-03-28 20:43:51 -0700 | [diff] [blame] | 372 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, |
Mike Kravetz | 1f9dccb | 2019-11-30 17:56:40 -0800 | [diff] [blame] | 373 | struct vm_area_struct *vma, unsigned long address, |
| 374 | unsigned int flags) |
Souptick Joarder | a953e77 | 2019-03-28 20:43:51 -0700 | [diff] [blame] | 375 | { |
| 376 | BUG(); |
| 377 | return 0; |
| 378 | } |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 379 | |
Peter Xu | 343cacf | 2021-03-18 17:01:47 +1100 | [diff] [blame] | 380 | static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } |
| 381 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | #endif /* !CONFIG_HUGETLB_PAGE */ |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 383 | /* |
| 384 | * hugepages at page global directory. If arch support |
| 385 | * hugepages at pgd level, they need to define this. |
| 386 | */ |
| 387 | #ifndef pgd_huge |
| 388 | #define pgd_huge(x) 0 |
| 389 | #endif |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 390 | #ifndef p4d_huge |
| 391 | #define p4d_huge(x) 0 |
| 392 | #endif |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 393 | |
| 394 | #ifndef pgd_write |
| 395 | static inline int pgd_write(pgd_t pgd) |
| 396 | { |
| 397 | BUG(); |
| 398 | return 0; |
| 399 | } |
| 400 | #endif |
| 401 | |
Eric B Munson | 4e52780 | 2009-09-21 17:03:47 -0700 | [diff] [blame] | 402 | #define HUGETLB_ANON_FILE "anon_hugepage" |
| 403 | |
Eric B Munson | 6bfde05 | 2009-09-21 17:03:43 -0700 | [diff] [blame] | 404 | enum { |
| 405 | /* |
| 406 | * The file will be used as an shm file so shmfs accounting rules |
| 407 | * apply |
| 408 | */ |
| 409 | HUGETLB_SHMFS_INODE = 1, |
Eric B Munson | 4e52780 | 2009-09-21 17:03:47 -0700 | [diff] [blame] | 410 | /* |
| 411 | * The file is being created on the internal vfs mount and shmfs |
| 412 | * accounting rules do not apply |
| 413 | */ |
| 414 | HUGETLB_ANONHUGE_INODE = 2, |
Eric B Munson | 6bfde05 | 2009-09-21 17:03:43 -0700 | [diff] [blame] | 415 | }; |
| 416 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | #ifdef CONFIG_HUGETLBFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | struct hugetlbfs_sb_info { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | long max_inodes; /* inodes allowed */ |
| 420 | long free_inodes; /* inodes free */ |
| 421 | spinlock_t stat_lock; |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 422 | struct hstate *hstate; |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 423 | struct hugepage_subpool *spool; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 424 | kuid_t uid; |
| 425 | kgid_t gid; |
| 426 | umode_t mode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | }; |
| 428 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
| 430 | { |
| 431 | return sb->s_fs_info; |
| 432 | } |
| 433 | |
Marc-André Lureau | da14c1e | 2018-01-31 16:19:22 -0800 | [diff] [blame] | 434 | struct hugetlbfs_inode_info { |
| 435 | struct shared_policy policy; |
| 436 | struct inode vfs_inode; |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 437 | unsigned int seals; |
Marc-André Lureau | da14c1e | 2018-01-31 16:19:22 -0800 | [diff] [blame] | 438 | }; |
| 439 | |
| 440 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) |
| 441 | { |
| 442 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); |
| 443 | } |
| 444 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 445 | extern const struct file_operations hugetlbfs_file_operations; |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 446 | extern const struct vm_operations_struct hugetlb_vm_ops; |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 447 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 448 | struct user_struct **user, int creat_flags, |
| 449 | int page_size_log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
Yaowei Bai | 719ff321 | 2016-01-14 15:18:51 -0800 | [diff] [blame] | 451 | static inline bool is_file_hugepages(struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | { |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 453 | if (file->f_op == &hugetlbfs_file_operations) |
Yaowei Bai | 719ff321 | 2016-01-14 15:18:51 -0800 | [diff] [blame] | 454 | return true; |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 455 | |
Yaowei Bai | 719ff321 | 2016-01-14 15:18:51 -0800 | [diff] [blame] | 456 | return is_file_shm_hugepages(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } |
| 458 | |
Christophe Leroy | bb297bb | 2020-04-01 21:11:54 -0700 | [diff] [blame] | 459 | static inline struct hstate *hstate_inode(struct inode *i) |
| 460 | { |
| 461 | return HUGETLBFS_SB(i->i_sb)->hstate; |
| 462 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | #else /* !CONFIG_HUGETLBFS */ |
| 464 | |
Yaowei Bai | 719ff321 | 2016-01-14 15:18:51 -0800 | [diff] [blame] | 465 | #define is_file_hugepages(file) false |
Steven Truelove | 40716e2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 466 | static inline struct file * |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 467 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
| 468 | struct user_struct **user, int creat_flags, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 469 | int page_size_log) |
Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 470 | { |
| 471 | return ERR_PTR(-ENOSYS); |
| 472 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
Christophe Leroy | bb297bb | 2020-04-01 21:11:54 -0700 | [diff] [blame] | 474 | static inline struct hstate *hstate_inode(struct inode *i) |
| 475 | { |
| 476 | return NULL; |
| 477 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | #endif /* !CONFIG_HUGETLBFS */ |
| 479 | |
Adrian Bunk | d2ba27e8 | 2007-05-06 14:49:00 -0700 | [diff] [blame] | 480 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 481 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 482 | unsigned long len, unsigned long pgoff, |
| 483 | unsigned long flags); |
| 484 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ |
| 485 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 486 | #ifdef CONFIG_HUGETLB_PAGE |
| 487 | |
Nishanth Aravamudan | a343787 | 2008-07-23 21:27:44 -0700 | [diff] [blame] | 488 | #define HSTATE_NAME_LEN 32 |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 489 | /* Defines one hugetlb page size */ |
| 490 | struct hstate { |
Lee Schermerhorn | e8c5c82 | 2009-09-21 17:01:22 -0700 | [diff] [blame] | 491 | int next_nid_to_alloc; |
| 492 | int next_nid_to_free; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 493 | unsigned int order; |
| 494 | unsigned long mask; |
| 495 | unsigned long max_huge_pages; |
| 496 | unsigned long nr_huge_pages; |
| 497 | unsigned long free_huge_pages; |
| 498 | unsigned long resv_huge_pages; |
| 499 | unsigned long surplus_huge_pages; |
| 500 | unsigned long nr_overcommit_huge_pages; |
Aneesh Kumar K.V | 0edaecf | 2012-07-31 16:42:07 -0700 | [diff] [blame] | 501 | struct list_head hugepage_activelist; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 502 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
| 503 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
| 504 | unsigned int free_huge_pages_node[MAX_NUMNODES]; |
| 505 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 506 | #ifdef CONFIG_CGROUP_HUGETLB |
| 507 | /* cgroup control files */ |
Mina Almasry | cdc2fcf | 2020-04-01 21:11:11 -0700 | [diff] [blame] | 508 | struct cftype cgroup_files_dfl[7]; |
| 509 | struct cftype cgroup_files_legacy[9]; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 510 | #endif |
Nishanth Aravamudan | a343787 | 2008-07-23 21:27:44 -0700 | [diff] [blame] | 511 | char name[HSTATE_NAME_LEN]; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 512 | }; |
| 513 | |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 514 | struct huge_bootmem_page { |
| 515 | struct list_head list; |
| 516 | struct hstate *hstate; |
| 517 | }; |
| 518 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 519 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
| 520 | unsigned long addr, int avoid_reserve); |
Michal Hocko | 3e59fcb | 2017-07-10 15:49:11 -0700 | [diff] [blame] | 521 | struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
Joonsoo Kim | d92bbc2 | 2020-08-11 18:37:17 -0700 | [diff] [blame] | 522 | nodemask_t *nmask, gfp_t gfp_mask); |
Michal Hocko | 389c817 | 2018-01-31 16:21:03 -0800 | [diff] [blame] | 523 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
| 524 | unsigned long address); |
Mike Kravetz | ab76ad5 | 2015-09-08 15:01:50 -0700 | [diff] [blame] | 525 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
| 526 | pgoff_t idx); |
Naoya Horiguchi | bf50bab | 2010-09-08 10:19:33 +0900 | [diff] [blame] | 527 | |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 528 | /* arch callback */ |
Aneesh Kumar K.V | e24a130 | 2017-07-28 10:31:25 +0530 | [diff] [blame] | 529 | int __init __alloc_bootmem_huge_page(struct hstate *h); |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 530 | int __init alloc_bootmem_huge_page(struct hstate *h); |
| 531 | |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 532 | void __init hugetlb_add_hstate(unsigned order); |
Mike Kravetz | ae94da8 | 2020-06-03 16:00:34 -0700 | [diff] [blame] | 533 | bool __init arch_hugetlb_valid_size(unsigned long size); |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 534 | struct hstate *size_to_hstate(unsigned long size); |
| 535 | |
| 536 | #ifndef HUGE_MAX_HSTATE |
| 537 | #define HUGE_MAX_HSTATE 1 |
| 538 | #endif |
| 539 | |
| 540 | extern struct hstate hstates[HUGE_MAX_HSTATE]; |
| 541 | extern unsigned int default_hstate_idx; |
| 542 | |
| 543 | #define default_hstate (hstates[default_hstate_idx]) |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 544 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 545 | static inline struct hstate *hstate_file(struct file *f) |
| 546 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 547 | return hstate_inode(file_inode(f)); |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 548 | } |
| 549 | |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 550 | static inline struct hstate *hstate_sizelog(int page_size_log) |
| 551 | { |
| 552 | if (!page_size_log) |
| 553 | return &default_hstate; |
Sasha Levin | 97ad2be | 2014-12-10 15:44:13 -0800 | [diff] [blame] | 554 | |
| 555 | return size_to_hstate(1UL << page_size_log); |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 558 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 559 | { |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 560 | return hstate_file(vma->vm_file); |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | static inline unsigned long huge_page_size(struct hstate *h) |
| 564 | { |
| 565 | return (unsigned long)PAGE_SIZE << h->order; |
| 566 | } |
| 567 | |
Mel Gorman | 08fba69 | 2009-01-06 14:38:53 -0800 | [diff] [blame] | 568 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
| 569 | |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 570 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
| 571 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 572 | static inline unsigned long huge_page_mask(struct hstate *h) |
| 573 | { |
| 574 | return h->mask; |
| 575 | } |
| 576 | |
| 577 | static inline unsigned int huge_page_order(struct hstate *h) |
| 578 | { |
| 579 | return h->order; |
| 580 | } |
| 581 | |
| 582 | static inline unsigned huge_page_shift(struct hstate *h) |
| 583 | { |
| 584 | return h->order + PAGE_SHIFT; |
| 585 | } |
| 586 | |
Luiz Capitulino | bae7f4a | 2014-06-04 16:07:08 -0700 | [diff] [blame] | 587 | static inline bool hstate_is_gigantic(struct hstate *h) |
| 588 | { |
| 589 | return huge_page_order(h) >= MAX_ORDER; |
| 590 | } |
| 591 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 592 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
| 593 | { |
| 594 | return 1 << h->order; |
| 595 | } |
| 596 | |
| 597 | static inline unsigned int blocks_per_huge_page(struct hstate *h) |
| 598 | { |
| 599 | return huge_page_size(h) / 512; |
| 600 | } |
| 601 | |
| 602 | #include <asm/hugetlb.h> |
| 603 | |
Anshuman Khandual | b0eae98 | 2020-06-03 16:01:01 -0700 | [diff] [blame] | 604 | #ifndef is_hugepage_only_range |
| 605 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
| 606 | unsigned long addr, unsigned long len) |
| 607 | { |
| 608 | return 0; |
| 609 | } |
| 610 | #define is_hugepage_only_range is_hugepage_only_range |
| 611 | #endif |
| 612 | |
Anshuman Khandual | 5be9934 | 2020-06-03 16:01:05 -0700 | [diff] [blame] | 613 | #ifndef arch_clear_hugepage_flags |
| 614 | static inline void arch_clear_hugepage_flags(struct page *page) { } |
| 615 | #define arch_clear_hugepage_flags arch_clear_hugepage_flags |
| 616 | #endif |
| 617 | |
Chris Metcalf | d9ed9fa | 2012-04-01 14:01:34 -0400 | [diff] [blame] | 618 | #ifndef arch_make_huge_pte |
| 619 | static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, |
| 620 | struct page *page, int writable) |
| 621 | { |
| 622 | return entry; |
| 623 | } |
| 624 | #endif |
| 625 | |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 626 | static inline struct hstate *page_hstate(struct page *page) |
| 627 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 628 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 629 | return size_to_hstate(page_size(page)); |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 630 | } |
| 631 | |
Andi Kleen | aa50d3a | 2010-10-06 21:45:00 +0200 | [diff] [blame] | 632 | static inline unsigned hstate_index_to_shift(unsigned index) |
| 633 | { |
| 634 | return hstates[index].order + PAGE_SHIFT; |
| 635 | } |
| 636 | |
Aneesh Kumar K.V | 972dc4d | 2012-07-31 16:42:00 -0700 | [diff] [blame] | 637 | static inline int hstate_index(struct hstate *h) |
| 638 | { |
| 639 | return h - hstates; |
| 640 | } |
| 641 | |
Anshuman Khandual | c3114a8 | 2017-07-10 15:47:41 -0700 | [diff] [blame] | 642 | extern int dissolve_free_huge_page(struct page *page); |
Gerald Schaefer | 082d5b6 | 2016-10-07 17:01:10 -0700 | [diff] [blame] | 643 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
| 644 | unsigned long end_pfn); |
Anshuman Khandual | e693de1 | 2019-03-05 15:43:51 -0800 | [diff] [blame] | 645 | |
Naoya Horiguchi | c177c81 | 2014-06-04 16:05:35 -0700 | [diff] [blame] | 646 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
Anshuman Khandual | e693de1 | 2019-03-05 15:43:51 -0800 | [diff] [blame] | 647 | #ifndef arch_hugetlb_migration_supported |
| 648 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
| 649 | { |
Anshuman Khandual | 94310cb | 2017-07-06 15:38:38 -0700 | [diff] [blame] | 650 | if ((huge_page_shift(h) == PMD_SHIFT) || |
Anshuman Khandual | 9b553bf | 2019-03-05 15:43:48 -0800 | [diff] [blame] | 651 | (huge_page_shift(h) == PUD_SHIFT) || |
| 652 | (huge_page_shift(h) == PGDIR_SHIFT)) |
Anshuman Khandual | 94310cb | 2017-07-06 15:38:38 -0700 | [diff] [blame] | 653 | return true; |
| 654 | else |
| 655 | return false; |
Anshuman Khandual | e693de1 | 2019-03-05 15:43:51 -0800 | [diff] [blame] | 656 | } |
Naoya Horiguchi | c177c81 | 2014-06-04 16:05:35 -0700 | [diff] [blame] | 657 | #endif |
Anshuman Khandual | e693de1 | 2019-03-05 15:43:51 -0800 | [diff] [blame] | 658 | #else |
| 659 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
| 660 | { |
| 661 | return false; |
| 662 | } |
| 663 | #endif |
| 664 | |
| 665 | static inline bool hugepage_migration_supported(struct hstate *h) |
| 666 | { |
| 667 | return arch_hugetlb_migration_supported(h); |
Naoya Horiguchi | 83467ef | 2013-09-11 14:22:11 -0700 | [diff] [blame] | 668 | } |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 669 | |
Anshuman Khandual | 7ed2c31 | 2019-03-05 15:43:44 -0800 | [diff] [blame] | 670 | /* |
| 671 | * Movability check is different as compared to migration check. |
| 672 | * It determines whether or not a huge page should be placed on |
| 673 | * movable zone or not. Movability of any huge page should be |
| 674 | * required only if huge page size is supported for migration. |
| 675 | * There wont be any reason for the huge page to be movable if |
| 676 | * it is not migratable to start with. Also the size of the huge |
| 677 | * page should be large enough to be placed under a movable zone |
| 678 | * and still feasible enough to be migratable. Just the presence |
| 679 | * in movable zone does not make the migration feasible. |
| 680 | * |
| 681 | * So even though large huge page sizes like the gigantic ones |
| 682 | * are migratable they should not be movable because its not |
| 683 | * feasible to migrate them from movable zone. |
| 684 | */ |
| 685 | static inline bool hugepage_movable_supported(struct hstate *h) |
| 686 | { |
| 687 | if (!hugepage_migration_supported(h)) |
| 688 | return false; |
| 689 | |
| 690 | if (hstate_is_gigantic(h)) |
| 691 | return false; |
| 692 | return true; |
| 693 | } |
| 694 | |
Joonsoo Kim | d92bbc2 | 2020-08-11 18:37:17 -0700 | [diff] [blame] | 695 | /* Movability of hugepages depends on migration support. */ |
| 696 | static inline gfp_t htlb_alloc_mask(struct hstate *h) |
| 697 | { |
| 698 | if (hugepage_movable_supported(h)) |
| 699 | return GFP_HIGHUSER_MOVABLE; |
| 700 | else |
| 701 | return GFP_HIGHUSER; |
| 702 | } |
| 703 | |
Joonsoo Kim | 19fc7be | 2020-08-11 18:37:25 -0700 | [diff] [blame] | 704 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
| 705 | { |
| 706 | gfp_t modified_mask = htlb_alloc_mask(h); |
| 707 | |
| 708 | /* Some callers might want to enforce node */ |
| 709 | modified_mask |= (gfp_mask & __GFP_THISNODE); |
| 710 | |
Joonsoo Kim | 41b4dc1 | 2020-08-11 18:37:34 -0700 | [diff] [blame] | 711 | modified_mask |= (gfp_mask & __GFP_NOWARN); |
| 712 | |
Joonsoo Kim | 19fc7be | 2020-08-11 18:37:25 -0700 | [diff] [blame] | 713 | return modified_mask; |
| 714 | } |
| 715 | |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 716 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| 717 | struct mm_struct *mm, pte_t *pte) |
| 718 | { |
| 719 | if (huge_page_size(h) == PMD_SIZE) |
| 720 | return pmd_lockptr(mm, (pmd_t *) pte); |
| 721 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); |
| 722 | return &mm->page_table_lock; |
| 723 | } |
| 724 | |
Dominik Dingel | 2531c8c | 2015-07-17 16:23:37 -0700 | [diff] [blame] | 725 | #ifndef hugepages_supported |
| 726 | /* |
| 727 | * Some platform decide whether they support huge pages at boot |
| 728 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 |
| 729 | * when there is no such support |
| 730 | */ |
| 731 | #define hugepages_supported() (HPAGE_SHIFT != 0) |
| 732 | #endif |
Nishanth Aravamudan | 457c1b2 | 2014-05-06 12:50:00 -0700 | [diff] [blame] | 733 | |
Naoya Horiguchi | 5d317b2 | 2015-11-05 18:47:14 -0800 | [diff] [blame] | 734 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); |
| 735 | |
Liu Zixian | 2d2d8b0 | 2021-09-08 18:10:05 -0700 | [diff] [blame] | 736 | static inline void hugetlb_count_init(struct mm_struct *mm) |
| 737 | { |
| 738 | atomic_long_set(&mm->hugetlb_usage, 0); |
| 739 | } |
| 740 | |
Naoya Horiguchi | 5d317b2 | 2015-11-05 18:47:14 -0800 | [diff] [blame] | 741 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) |
| 742 | { |
| 743 | atomic_long_add(l, &mm->hugetlb_usage); |
| 744 | } |
| 745 | |
| 746 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) |
| 747 | { |
| 748 | atomic_long_sub(l, &mm->hugetlb_usage); |
| 749 | } |
Punit Agrawal | e5251fd | 2017-07-06 15:39:50 -0700 | [diff] [blame] | 750 | |
| 751 | #ifndef set_huge_swap_pte_at |
| 752 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, |
| 753 | pte_t *ptep, pte_t pte, unsigned long sz) |
| 754 | { |
| 755 | set_huge_pte_at(mm, addr, ptep, pte); |
| 756 | } |
| 757 | #endif |
Aneesh Kumar K.V | 023bdd0 | 2019-03-05 15:46:37 -0800 | [diff] [blame] | 758 | |
| 759 | #ifndef huge_ptep_modify_prot_start |
| 760 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start |
| 761 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, |
| 762 | unsigned long addr, pte_t *ptep) |
| 763 | { |
| 764 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); |
| 765 | } |
| 766 | #endif |
| 767 | |
| 768 | #ifndef huge_ptep_modify_prot_commit |
| 769 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit |
| 770 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, |
| 771 | unsigned long addr, pte_t *ptep, |
| 772 | pte_t old_pte, pte_t pte) |
| 773 | { |
| 774 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); |
| 775 | } |
| 776 | #endif |
| 777 | |
Muchun Song | afe6c31 | 2021-02-04 18:32:03 -0800 | [diff] [blame] | 778 | void set_page_huge_active(struct page *page); |
| 779 | |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 780 | #else /* CONFIG_HUGETLB_PAGE */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 781 | struct hstate {}; |
Jason Gunthorpe | 442a5a9 | 2019-07-11 20:54:40 -0700 | [diff] [blame] | 782 | |
| 783 | static inline struct page *alloc_huge_page(struct vm_area_struct *vma, |
| 784 | unsigned long addr, |
| 785 | int avoid_reserve) |
| 786 | { |
| 787 | return NULL; |
| 788 | } |
| 789 | |
Jason Gunthorpe | 442a5a9 | 2019-07-11 20:54:40 -0700 | [diff] [blame] | 790 | static inline struct page * |
Joonsoo Kim | d92bbc2 | 2020-08-11 18:37:17 -0700 | [diff] [blame] | 791 | alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
| 792 | nodemask_t *nmask, gfp_t gfp_mask) |
Jason Gunthorpe | 442a5a9 | 2019-07-11 20:54:40 -0700 | [diff] [blame] | 793 | { |
| 794 | return NULL; |
| 795 | } |
| 796 | |
| 797 | static inline struct page *alloc_huge_page_vma(struct hstate *h, |
| 798 | struct vm_area_struct *vma, |
| 799 | unsigned long address) |
| 800 | { |
| 801 | return NULL; |
| 802 | } |
| 803 | |
| 804 | static inline int __alloc_bootmem_huge_page(struct hstate *h) |
| 805 | { |
| 806 | return 0; |
| 807 | } |
| 808 | |
| 809 | static inline struct hstate *hstate_file(struct file *f) |
| 810 | { |
| 811 | return NULL; |
| 812 | } |
| 813 | |
| 814 | static inline struct hstate *hstate_sizelog(int page_size_log) |
| 815 | { |
| 816 | return NULL; |
| 817 | } |
| 818 | |
| 819 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
| 820 | { |
| 821 | return NULL; |
| 822 | } |
| 823 | |
Jason Gunthorpe | 442a5a9 | 2019-07-11 20:54:40 -0700 | [diff] [blame] | 824 | static inline struct hstate *page_hstate(struct page *page) |
| 825 | { |
| 826 | return NULL; |
| 827 | } |
| 828 | |
| 829 | static inline unsigned long huge_page_size(struct hstate *h) |
| 830 | { |
| 831 | return PAGE_SIZE; |
| 832 | } |
| 833 | |
| 834 | static inline unsigned long huge_page_mask(struct hstate *h) |
| 835 | { |
| 836 | return PAGE_MASK; |
| 837 | } |
| 838 | |
| 839 | static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) |
| 840 | { |
| 841 | return PAGE_SIZE; |
| 842 | } |
| 843 | |
| 844 | static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) |
| 845 | { |
| 846 | return PAGE_SIZE; |
| 847 | } |
| 848 | |
| 849 | static inline unsigned int huge_page_order(struct hstate *h) |
| 850 | { |
| 851 | return 0; |
| 852 | } |
| 853 | |
| 854 | static inline unsigned int huge_page_shift(struct hstate *h) |
| 855 | { |
| 856 | return PAGE_SHIFT; |
| 857 | } |
| 858 | |
Anshuman Khandual | 94310cb | 2017-07-06 15:38:38 -0700 | [diff] [blame] | 859 | static inline bool hstate_is_gigantic(struct hstate *h) |
| 860 | { |
| 861 | return false; |
| 862 | } |
| 863 | |
Andrea Righi | 510a35d | 2008-07-26 15:22:27 -0700 | [diff] [blame] | 864 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
| 865 | { |
| 866 | return 1; |
| 867 | } |
Anshuman Khandual | c3114a8 | 2017-07-10 15:47:41 -0700 | [diff] [blame] | 868 | |
| 869 | static inline unsigned hstate_index_to_shift(unsigned index) |
| 870 | { |
| 871 | return 0; |
| 872 | } |
| 873 | |
| 874 | static inline int hstate_index(struct hstate *h) |
| 875 | { |
| 876 | return 0; |
| 877 | } |
Zhang Yi | 13d60f4 | 2013-06-25 21:19:31 +0800 | [diff] [blame] | 878 | |
Anshuman Khandual | c3114a8 | 2017-07-10 15:47:41 -0700 | [diff] [blame] | 879 | static inline int dissolve_free_huge_page(struct page *page) |
| 880 | { |
| 881 | return 0; |
| 882 | } |
| 883 | |
| 884 | static inline int dissolve_free_huge_pages(unsigned long start_pfn, |
| 885 | unsigned long end_pfn) |
| 886 | { |
| 887 | return 0; |
| 888 | } |
| 889 | |
| 890 | static inline bool hugepage_migration_supported(struct hstate *h) |
| 891 | { |
| 892 | return false; |
| 893 | } |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 894 | |
Anshuman Khandual | 7ed2c31 | 2019-03-05 15:43:44 -0800 | [diff] [blame] | 895 | static inline bool hugepage_movable_supported(struct hstate *h) |
| 896 | { |
| 897 | return false; |
| 898 | } |
| 899 | |
Joonsoo Kim | d92bbc2 | 2020-08-11 18:37:17 -0700 | [diff] [blame] | 900 | static inline gfp_t htlb_alloc_mask(struct hstate *h) |
| 901 | { |
| 902 | return 0; |
| 903 | } |
| 904 | |
Joonsoo Kim | 19fc7be | 2020-08-11 18:37:25 -0700 | [diff] [blame] | 905 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
| 906 | { |
| 907 | return 0; |
| 908 | } |
| 909 | |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 910 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| 911 | struct mm_struct *mm, pte_t *pte) |
| 912 | { |
| 913 | return &mm->page_table_lock; |
| 914 | } |
Naoya Horiguchi | 5d317b2 | 2015-11-05 18:47:14 -0800 | [diff] [blame] | 915 | |
Liu Zixian | 2d2d8b0 | 2021-09-08 18:10:05 -0700 | [diff] [blame] | 916 | static inline void hugetlb_count_init(struct mm_struct *mm) |
| 917 | { |
| 918 | } |
| 919 | |
Naoya Horiguchi | 5d317b2 | 2015-11-05 18:47:14 -0800 | [diff] [blame] | 920 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) |
| 921 | { |
| 922 | } |
| 923 | |
| 924 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) |
| 925 | { |
| 926 | } |
Punit Agrawal | e5251fd | 2017-07-06 15:39:50 -0700 | [diff] [blame] | 927 | |
| 928 | static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, |
| 929 | pte_t *ptep, pte_t pte, unsigned long sz) |
| 930 | { |
| 931 | } |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 932 | #endif /* CONFIG_HUGETLB_PAGE */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 933 | |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 934 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
| 935 | struct mm_struct *mm, pte_t *pte) |
| 936 | { |
| 937 | spinlock_t *ptl; |
| 938 | |
| 939 | ptl = huge_pte_lockptr(h, mm, pte); |
| 940 | spin_lock(ptl); |
| 941 | return ptl; |
| 942 | } |
| 943 | |
Roman Gushchin | cf11e85 | 2020-04-10 14:32:45 -0700 | [diff] [blame] | 944 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) |
| 945 | extern void __init hugetlb_cma_reserve(int order); |
| 946 | extern void __init hugetlb_cma_check(void); |
| 947 | #else |
| 948 | static inline __init void hugetlb_cma_reserve(int order) |
| 949 | { |
| 950 | } |
| 951 | static inline __init void hugetlb_cma_check(void) |
| 952 | { |
| 953 | } |
| 954 | #endif |
| 955 | |
Peter Xu | f1d8bd5 | 2021-03-18 17:01:46 +1100 | [diff] [blame] | 956 | bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); |
| 957 | |
Peter Xu | 5c3d6ac | 2021-03-18 17:01:47 +1100 | [diff] [blame] | 958 | #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE |
| 959 | /* |
| 960 | * ARCHes with special requirements for evicting HUGETLB backing TLB entries can |
| 961 | * implement this. |
| 962 | */ |
| 963 | #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
| 964 | #endif |
| 965 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | #endif /* _LINUX_HUGETLB_H */ |