blob: 9a75ba078e1b3741e5a7305260790a61df57c326 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Paul Mackerras342d3db2011-12-12 12:38:05 +000015#include <linux/export.h>
Becky Bruce41151e72011-06-28 09:54:48 +000016#include <linux/of_fdt.h>
17#include <linux/memblock.h>
Kumar Gala13020be2011-11-24 09:40:07 +000018#include <linux/moduleparam.h>
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -070019#include <linux/swap.h>
20#include <linux/swapops.h>
Christophe Leroy803d6902018-08-13 13:19:52 +000021#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/pgalloc.h>
23#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000024#include <asm/setup.h>
Aneesh Kumar K.V29409992013-06-20 14:30:16 +053025#include <asm/hugetlb.h>
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053026#include <asm/pte-walk.h>
27
Hari Bathini85975382018-04-10 19:11:31 +053028bool hugetlb_disabled = false;
29
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053030#define hugepd_none(hpd) (hpd_val(hpd) == 0)
David Gibsona4fe3ce2009-10-26 19:24:31 +000031
Christophe Leroyb12c07a2020-05-19 05:49:06 +000032#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
33 __builtin_ffs(sizeof(void *)))
Christophe Leroy03566562018-11-29 14:07:05 +000034
Punit Agrawal7868a202017-07-06 15:39:42 -070035pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
David Gibsona4fe3ce2009-10-26 19:24:31 +000036{
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053037 /*
38 * Only called for hugetlbfs pages, hence can ignore THP and the
39 * irq disabled walk.
40 */
41 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
David Gibsona4fe3ce2009-10-26 19:24:31 +000042}
43
44static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +053045 unsigned long address, unsigned int pdshift,
46 unsigned int pshift, spinlock_t *ptl)
David Gibsona4fe3ce2009-10-26 19:24:31 +000047{
Becky Bruce41151e72011-06-28 09:54:48 +000048 struct kmem_cache *cachep;
49 pte_t *new;
Becky Bruce41151e72011-06-28 09:54:48 +000050 int i;
Christophe Leroy03bb2d62016-12-07 08:47:26 +010051 int num_hugepd;
52
53 if (pshift >= pdshift) {
Christophe Leroy03566562018-11-29 14:07:05 +000054 cachep = PGT_CACHE(PTE_T_ORDER);
Christophe Leroy03bb2d62016-12-07 08:47:26 +010055 num_hugepd = 1 << (pshift - pdshift);
56 } else {
57 cachep = PGT_CACHE(pdshift - pshift);
58 num_hugepd = 1;
59 }
Becky Bruce41151e72011-06-28 09:54:48 +000060
Christophe Leroyd4870b82020-05-19 05:49:10 +000061 if (!cachep) {
Aneesh Kumar K.V5d492752019-05-28 11:06:25 +053062 WARN_ONCE(1, "No page table cache created for hugetlb tables");
63 return -ENOMEM;
64 }
65
Christophe Leroyd4870b82020-05-19 05:49:10 +000066 new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
David Gibsonf10a04c2006-04-28 15:02:51 +100067
David Gibsona4fe3ce2009-10-26 19:24:31 +000068 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
69 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
70
Aneesh Kumar K.V5d492752019-05-28 11:06:25 +053071 if (!new)
David Gibsonf10a04c2006-04-28 15:02:51 +100072 return -ENOMEM;
73
Sukadev Bhattiprolu0eab46b2016-03-24 02:07:57 -040074 /*
75 * Make sure other cpus find the hugepd set only after a
76 * properly initialized page table is visible to them.
77 * For more details look for comment in __pte_alloc().
78 */
79 smp_wmb();
80
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +053081 spin_lock(ptl);
Becky Bruce41151e72011-06-28 09:54:48 +000082 /*
83 * We have multiple higher-level entries that point to the same
84 * actual pte location. Fill in each as we go and backtrack on error.
85 * We need all of these so the DTLB pgtable walk code can find the
86 * right higher-level entry without knowing if it's a hugepage or not.
87 */
88 for (i = 0; i < num_hugepd; i++, hpdp++) {
89 if (unlikely(!hugepd_none(*hpdp)))
90 break;
Christophe Leroy5fb84fe2019-04-26 05:59:45 +000091 hugepd_populate(hpdp, new, pshift);
Becky Bruce41151e72011-06-28 09:54:48 +000092 }
93 /* If we bailed from the for loop early, an error occurred, clean up */
94 if (i < num_hugepd) {
95 for (i = i - 1 ; i >= 0; i--, hpdp--)
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053096 *hpdp = __hugepd(0);
Christophe Leroyd4870b82020-05-19 05:49:10 +000097 kmem_cache_free(cachep, new);
Christophe Leroy803d6902018-08-13 13:19:52 +000098 } else {
99 kmemleak_ignore(new);
Becky Bruce41151e72011-06-28 09:54:48 +0000100 }
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530101 spin_unlock(ptl);
David Gibsonf10a04c2006-04-28 15:02:51 +1000102 return 0;
103}
104
Becky Brucea1cd5412011-10-10 10:50:39 +0000105/*
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000106 * At this point we do the placement change only for BOOK3S 64. This would
107 * possibly work on other subarchs.
108 */
Peter Xuaec44e02021-05-04 18:33:00 -0700109pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
110 unsigned long addr, unsigned long sz)
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000111{
112 pgd_t *pg;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700113 p4d_t *p4;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000114 pud_t *pu;
115 pmd_t *pm;
116 hugepd_t *hpdp = NULL;
117 unsigned pshift = __ffs(sz);
118 unsigned pdshift = PGDIR_SHIFT;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530119 spinlock_t *ptl;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000120
121 addr &= ~(sz-1);
122 pg = pgd_offset(mm, addr);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700123 p4 = p4d_offset(pg, addr);
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000124
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100125#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000126 if (pshift == PGDIR_SHIFT)
127 /* 16GB huge page */
Mike Rapoport2fb47062020-06-04 16:46:44 -0700128 return (pte_t *) p4;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530129 else if (pshift > PUD_SHIFT) {
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000130 /*
131 * We need to use hugepd table
132 */
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530133 ptl = &mm->page_table_lock;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700134 hpdp = (hugepd_t *)p4;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530135 } else {
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000136 pdshift = PUD_SHIFT;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700137 pu = pud_alloc(mm, p4, addr);
Aneesh Kumar K.V2230ebf2019-05-28 11:06:24 +0530138 if (!pu)
139 return NULL;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000140 if (pshift == PUD_SHIFT)
141 return (pte_t *)pu;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530142 else if (pshift > PMD_SHIFT) {
143 ptl = pud_lockptr(mm, pu);
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000144 hpdp = (hugepd_t *)pu;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530145 } else {
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000146 pdshift = PMD_SHIFT;
147 pm = pmd_alloc(mm, pu, addr);
Aneesh Kumar K.V2230ebf2019-05-28 11:06:24 +0530148 if (!pm)
149 return NULL;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000150 if (pshift == PMD_SHIFT)
151 /* 16MB hugepage */
152 return (pte_t *)pm;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530153 else {
154 ptl = pmd_lockptr(mm, pm);
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000155 hpdp = (hugepd_t *)pm;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530156 }
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000157 }
158 }
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000159#else
David Gibsonfdf743c2018-07-17 14:24:30 +1000160 if (pshift >= PGDIR_SHIFT) {
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530161 ptl = &mm->page_table_lock;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700162 hpdp = (hugepd_t *)p4;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000163 } else {
164 pdshift = PUD_SHIFT;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700165 pu = pud_alloc(mm, p4, addr);
Aneesh Kumar K.V2230ebf2019-05-28 11:06:24 +0530166 if (!pu)
167 return NULL;
David Gibsonfdf743c2018-07-17 14:24:30 +1000168 if (pshift >= PUD_SHIFT) {
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530169 ptl = pud_lockptr(mm, pu);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000170 hpdp = (hugepd_t *)pu;
171 } else {
172 pdshift = PMD_SHIFT;
173 pm = pmd_alloc(mm, pu, addr);
Aneesh Kumar K.V2230ebf2019-05-28 11:06:24 +0530174 if (!pm)
175 return NULL;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530176 ptl = pmd_lockptr(mm, pm);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000177 hpdp = (hugepd_t *)pm;
178 }
179 }
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100180#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000181 if (!hpdp)
182 return NULL;
183
Christophe Leroye47168f2020-08-31 08:30:44 +0000184 if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
Christophe Leroyb250c8c2020-05-19 05:49:09 +0000185 return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
186
David Gibsona4fe3ce2009-10-26 19:24:31 +0000187 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
188
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530189 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
190 pdshift, pshift, ptl))
David Gibsona4fe3ce2009-10-26 19:24:31 +0000191 return NULL;
192
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530193 return hugepte_offset(*hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100194}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100195
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530196#ifdef CONFIG_PPC_BOOK3S_64
197/*
198 * Tracks gpages after the device tree is scanned and before the
199 * huge_boot_pages list is ready on pseries.
200 */
201#define MAX_NUMBER_GPAGES 1024
202__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
203__initdata static unsigned nr_gpages;
204
205/*
206 * Build list of addresses of gigantic pages. This function is used in early
Anton Blanchard14ed7402014-09-17 22:15:34 +1000207 * boot before the buddy allocator is setup.
Jon Tollefson658013e2008-07-23 21:27:54 -0700208 */
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530209void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700210{
211 if (!addr)
212 return;
213 while (number_of_pages > 0) {
214 gpage_freearray[nr_gpages] = addr;
215 nr_gpages++;
216 number_of_pages--;
217 addr += page_size;
218 }
219}
220
Cédric Le Goater94b87d72021-01-04 15:31:58 +0100221static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700222{
223 struct huge_bootmem_page *m;
224 if (nr_gpages == 0)
225 return 0;
226 m = phys_to_virt(gpage_freearray[--nr_gpages]);
227 gpage_freearray[nr_gpages] = 0;
228 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700229 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700230 return 1;
231}
Becky Bruce41151e72011-06-28 09:54:48 +0000232#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700233
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530234
235int __init alloc_bootmem_huge_page(struct hstate *h)
236{
237
238#ifdef CONFIG_PPC_BOOK3S_64
239 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
240 return pseries_alloc_bootmem_huge_page(h);
241#endif
242 return __alloc_bootmem_huge_page(h);
243}
244
Christophe Leroy4df4b272019-04-26 05:59:49 +0000245#ifndef CONFIG_PPC_BOOK3S_64
Becky Bruce41151e72011-06-28 09:54:48 +0000246#define HUGEPD_FREELIST_SIZE \
247 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
248
249struct hugepd_freelist {
250 struct rcu_head rcu;
251 unsigned int index;
Gustavo A. R. Silva02bddf22020-05-07 13:57:55 -0500252 void *ptes[];
Becky Bruce41151e72011-06-28 09:54:48 +0000253};
254
255static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
256
257static void hugepd_free_rcu_callback(struct rcu_head *head)
258{
259 struct hugepd_freelist *batch =
260 container_of(head, struct hugepd_freelist, rcu);
261 unsigned int i;
262
263 for (i = 0; i < batch->index; i++)
Christophe Leroy03566562018-11-29 14:07:05 +0000264 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
Becky Bruce41151e72011-06-28 09:54:48 +0000265
266 free_page((unsigned long)batch);
267}
268
269static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
270{
271 struct hugepd_freelist **batchp;
272
Sebastian Siewior08a5bb22016-03-08 10:03:56 +0100273 batchp = &get_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000274
275 if (atomic_read(&tlb->mm->mm_users) < 2 ||
Benjamin Herrenschmidtb426e4b2017-07-24 14:28:01 +1000276 mm_is_thread_local(tlb->mm)) {
Christophe Leroy03566562018-11-29 14:07:05 +0000277 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
Sebastian Siewior08a5bb22016-03-08 10:03:56 +0100278 put_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000279 return;
280 }
281
282 if (*batchp == NULL) {
283 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
284 (*batchp)->index = 0;
285 }
286
287 (*batchp)->ptes[(*batchp)->index++] = hugepte;
288 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
Paul E. McKenney04229112018-11-05 16:53:13 -0800289 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
Becky Bruce41151e72011-06-28 09:54:48 +0000290 *batchp = NULL;
291 }
Tiejun Chen94b09d72014-01-20 16:39:34 +0800292 put_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000293}
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100294#else
295static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
Becky Bruce41151e72011-06-28 09:54:48 +0000296#endif
297
Christophe Leroy7bfe54b2020-11-06 13:20:54 +0000298/* Return true when the entry to be freed maps more than the area being freed */
299static bool range_is_outside_limits(unsigned long start, unsigned long end,
300 unsigned long floor, unsigned long ceiling,
301 unsigned long mask)
302{
303 if ((start & mask) < floor)
304 return true;
305 if (ceiling) {
306 ceiling &= mask;
307 if (!ceiling)
308 return true;
309 }
310 return end - 1 > ceiling - 1;
311}
312
David Gibsona4fe3ce2009-10-26 19:24:31 +0000313static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
314 unsigned long start, unsigned long end,
315 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000316{
317 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000318 int i;
319
David Gibsona4fe3ce2009-10-26 19:24:31 +0000320 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000321 unsigned int num_hugepd = 1;
Becky Bruce881fde12011-10-10 10:50:40 +0000322 unsigned int shift = hugepd_shift(*hpdp);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100323
324 /* Note: On fsl the hpdp may be the first of several */
325 if (shift > pdshift)
326 num_hugepd = 1 << (shift - pdshift);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000327
Christophe Leroy7bfe54b2020-11-06 13:20:54 +0000328 if (range_is_outside_limits(start, end, floor, ceiling, pdmask))
David Gibsona4fe3ce2009-10-26 19:24:31 +0000329 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000330
Becky Bruce41151e72011-06-28 09:54:48 +0000331 for (i = 0; i < num_hugepd; i++, hpdp++)
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530332 *hpdp = __hugepd(0);
Becky Bruce41151e72011-06-28 09:54:48 +0000333
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100334 if (shift >= pdshift)
335 hugepd_free(tlb, hugepte);
336 else
Aneesh Kumar K.Vfadd03c2018-06-14 16:01:52 +0530337 pgtable_free_tlb(tlb, hugepte,
338 get_hugepd_cache_index(pdshift - shift));
David Gibsonf10a04c2006-04-28 15:02:51 +1000339}
340
Christophe Leroy542db122020-08-31 07:58:19 +0000341static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
342 unsigned long addr, unsigned long end,
343 unsigned long floor, unsigned long ceiling)
Christophe Leroyb250c8c2020-05-19 05:49:09 +0000344{
345 pgtable_t token = pmd_pgtable(*pmd);
346
Christophe Leroy7bfe54b2020-11-06 13:20:54 +0000347 if (range_is_outside_limits(addr, end, floor, ceiling, PMD_MASK))
Christophe Leroy542db122020-08-31 07:58:19 +0000348 return;
349
Christophe Leroyb250c8c2020-05-19 05:49:09 +0000350 pmd_clear(pmd);
351 pte_free_tlb(tlb, token, addr);
352 mm_dec_nr_ptes(tlb->mm);
353}
354
David Gibsonf10a04c2006-04-28 15:02:51 +1000355static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
356 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000357 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000358{
359 pmd_t *pmd;
360 unsigned long next;
361 unsigned long start;
362
363 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000364 do {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100365 unsigned long more;
366
Becky Brucea1cd5412011-10-10 10:50:39 +0000367 pmd = pmd_offset(pud, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000368 next = pmd_addr_end(addr, end);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530369 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
Christophe Leroyb250c8c2020-05-19 05:49:09 +0000370 if (pmd_none_or_clear_bad(pmd))
371 continue;
372
Aneesh Kumar K.V8bbd9f02013-06-19 12:04:26 +0530373 /*
374 * if it is not hugepd pointer, we should already find
375 * it cleared.
376 */
Christophe Leroyb250c8c2020-05-19 05:49:09 +0000377 WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
378
Christophe Leroy542db122020-08-31 07:58:19 +0000379 hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
Christophe Leroyb250c8c2020-05-19 05:49:09 +0000380
David Gibsonf10a04c2006-04-28 15:02:51 +1000381 continue;
Aneesh Kumar K.V8bbd9f02013-06-19 12:04:26 +0530382 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000383 /*
384 * Increment next by the size of the huge mapping since
385 * there may be more than one entry at this level for a
386 * single hugepage, but all of them point to
387 * the same kmem cache that holds the hugepte.
388 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100389 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
390 if (more > next)
391 next = more;
392
David Gibsona4fe3ce2009-10-26 19:24:31 +0000393 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
394 addr, next, floor, ceiling);
Becky Brucea1cd5412011-10-10 10:50:39 +0000395 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000396
Christophe Leroy7bfe54b2020-11-06 13:20:54 +0000397 if (range_is_outside_limits(start, end, floor, ceiling, PUD_MASK))
David Gibsonf10a04c2006-04-28 15:02:51 +1000398 return;
399
Christophe Leroy2198d4932020-12-12 13:41:25 +0000400 pmd = pmd_offset(pud, start & PUD_MASK);
David Gibsonf10a04c2006-04-28 15:02:51 +1000401 pud_clear(pud);
Christophe Leroy2198d4932020-12-12 13:41:25 +0000402 pmd_free_tlb(tlb, pmd, start & PUD_MASK);
Scott Wood50c6a662015-04-10 19:37:34 -0500403 mm_dec_nr_pmds(tlb->mm);
David Gibsonf10a04c2006-04-28 15:02:51 +1000404}
David Gibsonf10a04c2006-04-28 15:02:51 +1000405
Mike Rapoport2fb47062020-06-04 16:46:44 -0700406static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
David Gibsonf10a04c2006-04-28 15:02:51 +1000407 unsigned long addr, unsigned long end,
408 unsigned long floor, unsigned long ceiling)
409{
410 pud_t *pud;
411 unsigned long next;
412 unsigned long start;
413
414 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000415 do {
Mike Rapoport2fb47062020-06-04 16:46:44 -0700416 pud = pud_offset(p4d, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000417 next = pud_addr_end(addr, end);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530418 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100419 if (pud_none_or_clear_bad(pud))
420 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700421 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000422 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100423 } else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100424 unsigned long more;
Becky Brucea1cd5412011-10-10 10:50:39 +0000425 /*
426 * Increment next by the size of the huge mapping since
427 * there may be more than one entry at this level for a
428 * single hugepage, but all of them point to
429 * the same kmem cache that holds the hugepte.
430 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100431 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
432 if (more > next)
433 next = more;
434
David Gibsona4fe3ce2009-10-26 19:24:31 +0000435 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
436 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100437 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000438 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000439
Christophe Leroy7bfe54b2020-11-06 13:20:54 +0000440 if (range_is_outside_limits(start, end, floor, ceiling, PGDIR_MASK))
David Gibsonf10a04c2006-04-28 15:02:51 +1000441 return;
442
Christophe Leroy2198d4932020-12-12 13:41:25 +0000443 pud = pud_offset(p4d, start & PGDIR_MASK);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700444 p4d_clear(p4d);
Christophe Leroy2198d4932020-12-12 13:41:25 +0000445 pud_free_tlb(tlb, pud, start & PGDIR_MASK);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -0800446 mm_dec_nr_puds(tlb->mm);
David Gibsonf10a04c2006-04-28 15:02:51 +1000447}
448
449/*
450 * This function frees user-level page tables of a process.
David Gibsonf10a04c2006-04-28 15:02:51 +1000451 */
Jan Beulich42b77722008-07-23 21:27:10 -0700452void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000453 unsigned long addr, unsigned long end,
454 unsigned long floor, unsigned long ceiling)
455{
456 pgd_t *pgd;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700457 p4d_t *p4d;
David Gibsonf10a04c2006-04-28 15:02:51 +1000458 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000459
460 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000461 * Because there are a number of different possible pagetable
462 * layouts for hugepage ranges, we limit knowledge of how
463 * things should be laid out to the allocation path
464 * (huge_pte_alloc(), above). Everything else works out the
465 * structure as it goes from information in the hugepd
466 * pointers. That means that we can't here use the
467 * optimization used in the normal page free_pgd_range(), of
468 * checking whether we're actually covering a large enough
469 * range to have to do anything at the top level of the walk
470 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000471 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000472 * To make sense of this, you should probably go read the big
473 * block comment at the top of the normal free_pgd_range(),
474 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000475 */
476
David Gibsonf10a04c2006-04-28 15:02:51 +1000477 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000478 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000479 pgd = pgd_offset(tlb->mm, addr);
Mike Rapoport2fb47062020-06-04 16:46:44 -0700480 p4d = p4d_offset(pgd, addr);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530481 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
Mike Rapoport2fb47062020-06-04 16:46:44 -0700482 if (p4d_none_or_clear_bad(p4d))
David Gibson0b264252008-09-05 11:49:54 +1000483 continue;
Mike Rapoport2fb47062020-06-04 16:46:44 -0700484 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000485 } else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100486 unsigned long more;
Becky Bruce41151e72011-06-28 09:54:48 +0000487 /*
488 * Increment next by the size of the huge mapping since
Becky Bruce881fde12011-10-10 10:50:40 +0000489 * there may be more than one entry at the pgd level
490 * for a single hugepage, but all of them point to the
491 * same kmem cache that holds the hugepte.
Becky Bruce41151e72011-06-28 09:54:48 +0000492 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100493 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
494 if (more > next)
495 next = more;
496
Mike Rapoport2fb47062020-06-04 16:46:44 -0700497 free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000498 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000499 }
Becky Bruce41151e72011-06-28 09:54:48 +0000500 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000501}
502
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -0700503struct page *follow_huge_pd(struct vm_area_struct *vma,
504 unsigned long address, hugepd_t hpd,
505 int flags, int pdshift)
506{
507 pte_t *ptep;
508 spinlock_t *ptl;
509 struct page *page = NULL;
510 unsigned long mask;
511 int shift = hugepd_shift(hpd);
512 struct mm_struct *mm = vma->vm_mm;
513
514retry:
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530515 /*
516 * hugepage directory entries are protected by mm->page_table_lock
517 * Use this instead of huge_pte_lockptr
518 */
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -0700519 ptl = &mm->page_table_lock;
520 spin_lock(ptl);
521
522 ptep = hugepte_offset(hpd, address, pdshift);
523 if (pte_present(*ptep)) {
524 mask = (1UL << shift) - 1;
525 page = pte_page(*ptep);
526 page += ((address & mask) >> PAGE_SHIFT);
527 if (flags & FOLL_GET)
528 get_page(page);
529 } else {
530 if (is_hugetlb_entry_migration(*ptep)) {
531 spin_unlock(ptl);
532 __migration_entry_wait(mm, ptep, ptl);
533 goto retry;
534 }
535 }
536 spin_unlock(ptl);
537 return page;
538}
539
Becky Bruce76512952011-10-10 10:50:36 +0000540#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
542 unsigned long len, unsigned long pgoff,
543 unsigned long flags)
544{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700545 struct hstate *hstate = hstate_file(file);
546 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000547
Christophe Leroyaa0ab02b2018-02-22 15:27:26 +0100548#ifdef CONFIG_PPC_RADIX_MMU
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000549 if (radix_enabled())
550 return radix__hugetlb_get_unmapped_area(file, addr, len,
551 pgoff, flags);
Christophe Leroyaa0ab02b2018-02-22 15:27:26 +0100552#endif
Michel Lespinasse34d07172013-04-29 11:53:52 -0700553 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
Becky Bruce76512952011-10-10 10:50:36 +0000555#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Mel Gorman33402892009-01-06 14:38:54 -0800557unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
558{
Aneesh Kumar K.V2f5f0df2016-04-29 23:26:24 +1000559 /* With radix we don't use slice, so derive it from vma*/
Christophe Leroy43ed7902019-04-25 14:29:35 +0000560 if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000561 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
562
Aneesh Kumar K.V2f5f0df2016-04-29 23:26:24 +1000563 return 1UL << mmu_psize_to_shift(psize);
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000564 }
Dan Williams09135cc2018-04-05 16:24:21 -0700565 return vma_kernel_pagesize(vma);
Becky Bruce41151e72011-06-28 09:54:48 +0000566}
567
Mike Kravetzae94da82020-06-03 16:00:34 -0700568bool __init arch_hugetlb_valid_size(unsigned long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100569{
David Gibsond1837cb2009-10-26 19:24:31 +0000570 int shift = __ffs(size);
571 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000572
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100573 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000574 * that it fits within pagetable and slice limits. */
Christophe Leroy723f2682019-04-26 05:59:46 +0000575 if (size <= PAGE_SIZE || !is_power_of_2(size))
Mike Kravetzae94da82020-06-03 16:00:34 -0700576 return false;
David Gibsond1837cb2009-10-26 19:24:31 +0000577
Michael Ellerman73388742019-05-14 23:00:58 +1000578 mmu_psize = check_and_get_huge_psize(shift);
Christophe Leroy723f2682019-04-26 05:59:46 +0000579 if (mmu_psize < 0)
Mike Kravetzae94da82020-06-03 16:00:34 -0700580 return false;
Aneesh Kumar K.Va5251082017-03-21 22:59:56 +0530581
David Gibsond1837cb2009-10-26 19:24:31 +0000582 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
583
Mike Kravetzae94da82020-06-03 16:00:34 -0700584 return true;
585}
David Gibsond1837cb2009-10-26 19:24:31 +0000586
Mike Kravetzae94da82020-06-03 16:00:34 -0700587static int __init add_huge_page_size(unsigned long long size)
588{
589 int shift = __ffs(size);
David Gibsond1837cb2009-10-26 19:24:31 +0000590
Mike Kravetzae94da82020-06-03 16:00:34 -0700591 if (!arch_hugetlb_valid_size((unsigned long)size))
592 return -EINVAL;
593
Mike Kravetz38237832020-06-03 16:00:42 -0700594 hugetlb_add_hstate(shift - PAGE_SHIFT);
David Gibsond1837cb2009-10-26 19:24:31 +0000595 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100596}
597
Becky Bruce41151e72011-06-28 09:54:48 +0000598static int __init hugetlbpage_init(void)
599{
Aneesh Kumar K.Vac25ba62019-05-28 11:06:26 +0530600 bool configured = false;
Becky Bruce41151e72011-06-28 09:54:48 +0000601 int psize;
602
Hari Bathini85975382018-04-10 19:11:31 +0530603 if (hugetlb_disabled) {
604 pr_info("HugeTLB support is disabled!\n");
605 return 0;
606 }
607
Christophe Leroy4df4b272019-04-26 05:59:49 +0000608 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
609 !mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000610 return -ENODEV;
Christophe Leroy4df4b272019-04-26 05:59:49 +0000611
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700612 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000613 unsigned shift;
614 unsigned pdshift;
615
616 if (!mmu_psize_defs[psize].shift)
617 continue;
618
619 shift = mmu_psize_to_shift(psize);
620
Aneesh Kumar K.V6fa50482018-03-30 17:34:08 +0530621#ifdef CONFIG_PPC_BOOK3S_64
622 if (shift > PGDIR_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000623 continue;
Aneesh Kumar K.V6fa50482018-03-30 17:34:08 +0530624 else if (shift > PUD_SHIFT)
625 pdshift = PGDIR_SHIFT;
626 else if (shift > PMD_SHIFT)
627 pdshift = PUD_SHIFT;
628 else
629 pdshift = PMD_SHIFT;
630#else
David Gibsonfdf743c2018-07-17 14:24:30 +1000631 if (shift < PUD_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000632 pdshift = PMD_SHIFT;
David Gibsonfdf743c2018-07-17 14:24:30 +1000633 else if (shift < PGDIR_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000634 pdshift = PUD_SHIFT;
635 else
636 pdshift = PGDIR_SHIFT;
Aneesh Kumar K.V6fa50482018-03-30 17:34:08 +0530637#endif
638
639 if (add_huge_page_size(1ULL << shift) < 0)
640 continue;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000641 /*
642 * if we have pdshift and shift value same, we don't
643 * use pgt cache for hugepd.
644 */
Christophe Leroyf2b67ef2020-02-06 13:50:28 +0000645 if (pdshift > shift) {
646 if (!IS_ENABLED(CONFIG_PPC_8xx))
647 pgtable_cache_add(pdshift - shift);
648 } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
649 IS_ENABLED(CONFIG_PPC_8xx)) {
Christophe Leroy1e03c7e22018-11-29 14:07:07 +0000650 pgtable_cache_add(PTE_T_ORDER);
Christophe Leroyf2b67ef2020-02-06 13:50:28 +0000651 }
Aneesh Kumar K.Vac25ba62019-05-28 11:06:26 +0530652
653 configured = true;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700654 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000655
Aneesh Kumar K.Vac25ba62019-05-28 11:06:26 +0530656 if (configured) {
657 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
658 hugetlbpage_init_default();
659 } else
660 pr_info("Failed to initialize. Disabling HugeTLB");
Christophe Leroyc5710cd22019-04-26 05:59:48 +0000661
David Gibsonf10a04c2006-04-28 15:02:51 +1000662 return 0;
663}
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100664
Paul Gortmaker6f1142812015-05-01 20:08:21 -0400665arch_initcall(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000666
Aneesh Kumar K.Vef26b762020-07-13 20:37:48 +0530667void __init gigantic_hugetlb_cma_reserve(void)
668{
669 unsigned long order = 0;
670
671 if (radix_enabled())
672 order = PUD_SHIFT - PAGE_SHIFT;
673 else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
674 /*
675 * For pseries we do use ibm,expected#pages for reserving 16G pages.
676 */
677 order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
678
679 if (order) {
680 VM_WARN_ON(order < MAX_ORDER);
681 hugetlb_cma_reserve(order);
682 }
683}