blob: 8a9a49c138652ba2b971a265db233988e01aa7b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Paul Mackerras342d3db2011-12-12 12:38:05 +000015#include <linux/export.h>
Becky Bruce41151e72011-06-28 09:54:48 +000016#include <linux/of_fdt.h>
17#include <linux/memblock.h>
18#include <linux/bootmem.h>
Kumar Gala13020be2011-11-24 09:40:07 +000019#include <linux/moduleparam.h>
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -070020#include <linux/swap.h>
21#include <linux/swapops.h>
David Gibson883a3e52009-10-26 19:24:31 +000022#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/pgalloc.h>
24#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000025#include <asm/setup.h>
Aneesh Kumar K.V29409992013-06-20 14:30:16 +053026#include <asm/hugetlb.h>
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053027#include <asm/pte-walk.h>
28
Aneesh Kumar K.V29409992013-06-20 14:30:16 +053029
30#ifdef CONFIG_HUGETLB_PAGE
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Jon Tollefson91224342008-07-23 21:27:55 -070032#define PAGE_SHIFT_64K 16
Christophe Leroy4b9142862016-12-07 08:47:28 +010033#define PAGE_SHIFT_512K 19
34#define PAGE_SHIFT_8M 23
Jon Tollefson91224342008-07-23 21:27:55 -070035#define PAGE_SHIFT_16M 24
36#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110037
Hari Bathini85975382018-04-10 19:11:31 +053038bool hugetlb_disabled = false;
39
Becky Bruce41151e72011-06-28 09:54:48 +000040unsigned int HPAGE_SHIFT;
Oliver O'Halloran7a849a62017-06-30 16:52:35 +100041EXPORT_SYMBOL(HPAGE_SHIFT);
Becky Bruce41151e72011-06-28 09:54:48 +000042
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053043#define hugepd_none(hpd) (hpd_val(hpd) == 0)
David Gibsona4fe3ce2009-10-26 19:24:31 +000044
Punit Agrawal7868a202017-07-06 15:39:42 -070045pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
David Gibsona4fe3ce2009-10-26 19:24:31 +000046{
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053047 /*
48 * Only called for hugetlbfs pages, hence can ignore THP and the
49 * irq disabled walk.
50 */
51 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
David Gibsona4fe3ce2009-10-26 19:24:31 +000052}
53
54static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +053055 unsigned long address, unsigned int pdshift,
56 unsigned int pshift, spinlock_t *ptl)
David Gibsona4fe3ce2009-10-26 19:24:31 +000057{
Becky Bruce41151e72011-06-28 09:54:48 +000058 struct kmem_cache *cachep;
59 pte_t *new;
Becky Bruce41151e72011-06-28 09:54:48 +000060 int i;
Christophe Leroy03bb2d62016-12-07 08:47:26 +010061 int num_hugepd;
62
63 if (pshift >= pdshift) {
64 cachep = hugepte_cache;
65 num_hugepd = 1 << (pshift - pdshift);
66 } else {
67 cachep = PGT_CACHE(pdshift - pshift);
68 num_hugepd = 1;
69 }
Becky Bruce41151e72011-06-28 09:54:48 +000070
Balbir Singhd2485642017-05-02 15:17:06 +100071 new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
David Gibsonf10a04c2006-04-28 15:02:51 +100072
David Gibsona4fe3ce2009-10-26 19:24:31 +000073 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
74 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
75
David Gibsonf10a04c2006-04-28 15:02:51 +100076 if (! new)
77 return -ENOMEM;
78
Sukadev Bhattiprolu0eab46b2016-03-24 02:07:57 -040079 /*
80 * Make sure other cpus find the hugepd set only after a
81 * properly initialized page table is visible to them.
82 * For more details look for comment in __pte_alloc().
83 */
84 smp_wmb();
85
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +053086 spin_lock(ptl);
Becky Bruce41151e72011-06-28 09:54:48 +000087 /*
88 * We have multiple higher-level entries that point to the same
89 * actual pte location. Fill in each as we go and backtrack on error.
90 * We need all of these so the DTLB pgtable walk code can find the
91 * right higher-level entry without knowing if it's a hugepage or not.
92 */
93 for (i = 0; i < num_hugepd; i++, hpdp++) {
94 if (unlikely(!hugepd_none(*hpdp)))
95 break;
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053096 else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +010097#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053098 *hpdp = __hugepd(__pa(new) |
99 (shift_to_mmu_psize(pshift) << 2));
Christophe Leroy4b9142862016-12-07 08:47:28 +0100100#elif defined(CONFIG_PPC_8xx)
Christophe Leroyde0f9382018-01-12 13:45:31 +0100101 *hpdp = __hugepd(__pa(new) | _PMD_USER |
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530102 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
103 _PMD_PAGE_512K) | _PMD_PRESENT);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100104#else
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +0000105 /* We use the old format for PPC_FSL_BOOK3E */
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530106 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100107#endif
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530108 }
Becky Bruce41151e72011-06-28 09:54:48 +0000109 }
110 /* If we bailed from the for loop early, an error occurred, clean up */
111 if (i < num_hugepd) {
112 for (i = i - 1 ; i >= 0; i--, hpdp--)
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530113 *hpdp = __hugepd(0);
Becky Bruce41151e72011-06-28 09:54:48 +0000114 kmem_cache_free(cachep, new);
115 }
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530116 spin_unlock(ptl);
David Gibsonf10a04c2006-04-28 15:02:51 +1000117 return 0;
118}
119
Becky Brucea1cd5412011-10-10 10:50:39 +0000120/*
121 * These macros define how to determine which level of the page table holds
122 * the hpdp.
123 */
Christophe Leroy4b9142862016-12-07 08:47:28 +0100124#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
Becky Brucea1cd5412011-10-10 10:50:39 +0000125#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
126#define HUGEPD_PUD_SHIFT PUD_SHIFT
Becky Brucea1cd5412011-10-10 10:50:39 +0000127#endif
128
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000129/*
130 * At this point we do the placement change only for BOOK3S 64. This would
131 * possibly work on other subarchs.
132 */
133pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
134{
135 pgd_t *pg;
136 pud_t *pu;
137 pmd_t *pm;
138 hugepd_t *hpdp = NULL;
139 unsigned pshift = __ffs(sz);
140 unsigned pdshift = PGDIR_SHIFT;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530141 spinlock_t *ptl;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000142
143 addr &= ~(sz-1);
144 pg = pgd_offset(mm, addr);
145
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100146#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000147 if (pshift == PGDIR_SHIFT)
148 /* 16GB huge page */
149 return (pte_t *) pg;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530150 else if (pshift > PUD_SHIFT) {
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000151 /*
152 * We need to use hugepd table
153 */
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530154 ptl = &mm->page_table_lock;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000155 hpdp = (hugepd_t *)pg;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530156 } else {
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000157 pdshift = PUD_SHIFT;
158 pu = pud_alloc(mm, pg, addr);
159 if (pshift == PUD_SHIFT)
160 return (pte_t *)pu;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530161 else if (pshift > PMD_SHIFT) {
162 ptl = pud_lockptr(mm, pu);
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000163 hpdp = (hugepd_t *)pu;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530164 } else {
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000165 pdshift = PMD_SHIFT;
166 pm = pmd_alloc(mm, pu, addr);
167 if (pshift == PMD_SHIFT)
168 /* 16MB hugepage */
169 return (pte_t *)pm;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530170 else {
171 ptl = pmd_lockptr(mm, pm);
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000172 hpdp = (hugepd_t *)pm;
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530173 }
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000174 }
175 }
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000176#else
Becky Brucea1cd5412011-10-10 10:50:39 +0000177 if (pshift >= HUGEPD_PGD_SHIFT) {
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530178 ptl = &mm->page_table_lock;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000179 hpdp = (hugepd_t *)pg;
180 } else {
181 pdshift = PUD_SHIFT;
182 pu = pud_alloc(mm, pg, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000183 if (pshift >= HUGEPD_PUD_SHIFT) {
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530184 ptl = pud_lockptr(mm, pu);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000185 hpdp = (hugepd_t *)pu;
186 } else {
187 pdshift = PMD_SHIFT;
188 pm = pmd_alloc(mm, pu, addr);
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530189 ptl = pmd_lockptr(mm, pm);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000190 hpdp = (hugepd_t *)pm;
191 }
192 }
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100193#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000194 if (!hpdp)
195 return NULL;
196
197 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
198
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530199 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
200 pdshift, pshift, ptl))
David Gibsona4fe3ce2009-10-26 19:24:31 +0000201 return NULL;
202
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530203 return hugepte_offset(*hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100204}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100205
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530206#ifdef CONFIG_PPC_BOOK3S_64
207/*
208 * Tracks gpages after the device tree is scanned and before the
209 * huge_boot_pages list is ready on pseries.
210 */
211#define MAX_NUMBER_GPAGES 1024
212__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
213__initdata static unsigned nr_gpages;
214
215/*
216 * Build list of addresses of gigantic pages. This function is used in early
Anton Blanchard14ed7402014-09-17 22:15:34 +1000217 * boot before the buddy allocator is setup.
Jon Tollefson658013e2008-07-23 21:27:54 -0700218 */
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530219void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700220{
221 if (!addr)
222 return;
223 while (number_of_pages > 0) {
224 gpage_freearray[nr_gpages] = addr;
225 nr_gpages++;
226 number_of_pages--;
227 addr += page_size;
228 }
229}
230
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530231int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700232{
233 struct huge_bootmem_page *m;
234 if (nr_gpages == 0)
235 return 0;
236 m = phys_to_virt(gpage_freearray[--nr_gpages]);
237 gpage_freearray[nr_gpages] = 0;
238 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700239 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700240 return 1;
241}
Becky Bruce41151e72011-06-28 09:54:48 +0000242#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700243
Aneesh Kumar K.V79cc38d2017-07-28 10:31:26 +0530244
245int __init alloc_bootmem_huge_page(struct hstate *h)
246{
247
248#ifdef CONFIG_PPC_BOOK3S_64
249 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
250 return pseries_alloc_bootmem_huge_page(h);
251#endif
252 return __alloc_bootmem_huge_page(h);
253}
254
Christophe Leroy4b9142862016-12-07 08:47:28 +0100255#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
Becky Bruce41151e72011-06-28 09:54:48 +0000256#define HUGEPD_FREELIST_SIZE \
257 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
258
259struct hugepd_freelist {
260 struct rcu_head rcu;
261 unsigned int index;
262 void *ptes[0];
263};
264
265static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
266
267static void hugepd_free_rcu_callback(struct rcu_head *head)
268{
269 struct hugepd_freelist *batch =
270 container_of(head, struct hugepd_freelist, rcu);
271 unsigned int i;
272
273 for (i = 0; i < batch->index; i++)
274 kmem_cache_free(hugepte_cache, batch->ptes[i]);
275
276 free_page((unsigned long)batch);
277}
278
279static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
280{
281 struct hugepd_freelist **batchp;
282
Sebastian Siewior08a5bb22016-03-08 10:03:56 +0100283 batchp = &get_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000284
285 if (atomic_read(&tlb->mm->mm_users) < 2 ||
Benjamin Herrenschmidtb426e4b2017-07-24 14:28:01 +1000286 mm_is_thread_local(tlb->mm)) {
Becky Bruce41151e72011-06-28 09:54:48 +0000287 kmem_cache_free(hugepte_cache, hugepte);
Sebastian Siewior08a5bb22016-03-08 10:03:56 +0100288 put_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000289 return;
290 }
291
292 if (*batchp == NULL) {
293 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
294 (*batchp)->index = 0;
295 }
296
297 (*batchp)->ptes[(*batchp)->index++] = hugepte;
298 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
299 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
300 *batchp = NULL;
301 }
Tiejun Chen94b09d72014-01-20 16:39:34 +0800302 put_cpu_var(hugepd_freelist_cur);
Becky Bruce41151e72011-06-28 09:54:48 +0000303}
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100304#else
305static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
Becky Bruce41151e72011-06-28 09:54:48 +0000306#endif
307
David Gibsona4fe3ce2009-10-26 19:24:31 +0000308static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
309 unsigned long start, unsigned long end,
310 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000311{
312 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000313 int i;
314
David Gibsona4fe3ce2009-10-26 19:24:31 +0000315 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000316 unsigned int num_hugepd = 1;
Becky Bruce881fde12011-10-10 10:50:40 +0000317 unsigned int shift = hugepd_shift(*hpdp);
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100318
319 /* Note: On fsl the hpdp may be the first of several */
320 if (shift > pdshift)
321 num_hugepd = 1 << (shift - pdshift);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000322
323 start &= pdmask;
324 if (start < floor)
325 return;
326 if (ceiling) {
327 ceiling &= pdmask;
328 if (! ceiling)
329 return;
330 }
331 if (end - 1 > ceiling - 1)
332 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000333
Becky Bruce41151e72011-06-28 09:54:48 +0000334 for (i = 0; i < num_hugepd; i++, hpdp++)
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +0530335 *hpdp = __hugepd(0);
Becky Bruce41151e72011-06-28 09:54:48 +0000336
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100337 if (shift >= pdshift)
338 hugepd_free(tlb, hugepte);
339 else
Aneesh Kumar K.Vfadd03c2018-06-14 16:01:52 +0530340 pgtable_free_tlb(tlb, hugepte,
341 get_hugepd_cache_index(pdshift - shift));
David Gibsonf10a04c2006-04-28 15:02:51 +1000342}
343
David Gibsonf10a04c2006-04-28 15:02:51 +1000344static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
345 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000346 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000347{
348 pmd_t *pmd;
349 unsigned long next;
350 unsigned long start;
351
352 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000353 do {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100354 unsigned long more;
355
Becky Brucea1cd5412011-10-10 10:50:39 +0000356 pmd = pmd_offset(pud, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000357 next = pmd_addr_end(addr, end);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530358 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
Aneesh Kumar K.V8bbd9f02013-06-19 12:04:26 +0530359 /*
360 * if it is not hugepd pointer, we should already find
361 * it cleared.
362 */
363 WARN_ON(!pmd_none_or_clear_bad(pmd));
David Gibsonf10a04c2006-04-28 15:02:51 +1000364 continue;
Aneesh Kumar K.V8bbd9f02013-06-19 12:04:26 +0530365 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000366 /*
367 * Increment next by the size of the huge mapping since
368 * there may be more than one entry at this level for a
369 * single hugepage, but all of them point to
370 * the same kmem cache that holds the hugepte.
371 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100372 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
373 if (more > next)
374 next = more;
375
David Gibsona4fe3ce2009-10-26 19:24:31 +0000376 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
377 addr, next, floor, ceiling);
Becky Brucea1cd5412011-10-10 10:50:39 +0000378 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000379
380 start &= PUD_MASK;
381 if (start < floor)
382 return;
383 if (ceiling) {
384 ceiling &= PUD_MASK;
385 if (!ceiling)
386 return;
387 }
388 if (end - 1 > ceiling - 1)
389 return;
390
391 pmd = pmd_offset(pud, start);
392 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000393 pmd_free_tlb(tlb, pmd, start);
Scott Wood50c6a662015-04-10 19:37:34 -0500394 mm_dec_nr_pmds(tlb->mm);
David Gibsonf10a04c2006-04-28 15:02:51 +1000395}
David Gibsonf10a04c2006-04-28 15:02:51 +1000396
397static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
398 unsigned long addr, unsigned long end,
399 unsigned long floor, unsigned long ceiling)
400{
401 pud_t *pud;
402 unsigned long next;
403 unsigned long start;
404
405 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000406 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000407 pud = pud_offset(pgd, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000408 next = pud_addr_end(addr, end);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530409 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100410 if (pud_none_or_clear_bad(pud))
411 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700412 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000413 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100414 } else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100415 unsigned long more;
Becky Brucea1cd5412011-10-10 10:50:39 +0000416 /*
417 * Increment next by the size of the huge mapping since
418 * there may be more than one entry at this level for a
419 * single hugepage, but all of them point to
420 * the same kmem cache that holds the hugepte.
421 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100422 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
423 if (more > next)
424 next = more;
425
David Gibsona4fe3ce2009-10-26 19:24:31 +0000426 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
427 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100428 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000429 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000430
431 start &= PGDIR_MASK;
432 if (start < floor)
433 return;
434 if (ceiling) {
435 ceiling &= PGDIR_MASK;
436 if (!ceiling)
437 return;
438 }
439 if (end - 1 > ceiling - 1)
440 return;
441
442 pud = pud_offset(pgd, start);
443 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000444 pud_free_tlb(tlb, pud, start);
Kirill A. Shutemovb4e98d92017-11-15 17:35:33 -0800445 mm_dec_nr_puds(tlb->mm);
David Gibsonf10a04c2006-04-28 15:02:51 +1000446}
447
448/*
449 * This function frees user-level page tables of a process.
David Gibsonf10a04c2006-04-28 15:02:51 +1000450 */
Jan Beulich42b77722008-07-23 21:27:10 -0700451void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000452 unsigned long addr, unsigned long end,
453 unsigned long floor, unsigned long ceiling)
454{
455 pgd_t *pgd;
456 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000457
458 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000459 * Because there are a number of different possible pagetable
460 * layouts for hugepage ranges, we limit knowledge of how
461 * things should be laid out to the allocation path
462 * (huge_pte_alloc(), above). Everything else works out the
463 * structure as it goes from information in the hugepd
464 * pointers. That means that we can't here use the
465 * optimization used in the normal page free_pgd_range(), of
466 * checking whether we're actually covering a large enough
467 * range to have to do anything at the top level of the walk
468 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000469 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000470 * To make sense of this, you should probably go read the big
471 * block comment at the top of the normal free_pgd_range(),
472 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000473 */
474
David Gibsonf10a04c2006-04-28 15:02:51 +1000475 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000476 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000477 pgd = pgd_offset(tlb->mm, addr);
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530478 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
David Gibson0b264252008-09-05 11:49:54 +1000479 if (pgd_none_or_clear_bad(pgd))
480 continue;
481 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
482 } else {
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100483 unsigned long more;
Becky Bruce41151e72011-06-28 09:54:48 +0000484 /*
485 * Increment next by the size of the huge mapping since
Becky Bruce881fde12011-10-10 10:50:40 +0000486 * there may be more than one entry at the pgd level
487 * for a single hugepage, but all of them point to the
488 * same kmem cache that holds the hugepte.
Becky Bruce41151e72011-06-28 09:54:48 +0000489 */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100490 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
491 if (more > next)
492 next = more;
493
David Gibsona4fe3ce2009-10-26 19:24:31 +0000494 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
495 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000496 }
Becky Bruce41151e72011-06-28 09:54:48 +0000497 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000498}
499
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -0700500struct page *follow_huge_pd(struct vm_area_struct *vma,
501 unsigned long address, hugepd_t hpd,
502 int flags, int pdshift)
503{
504 pte_t *ptep;
505 spinlock_t *ptl;
506 struct page *page = NULL;
507 unsigned long mask;
508 int shift = hugepd_shift(hpd);
509 struct mm_struct *mm = vma->vm_mm;
510
511retry:
Aneesh Kumar K.Ved515b62018-06-01 13:54:24 +0530512 /*
513 * hugepage directory entries are protected by mm->page_table_lock
514 * Use this instead of huge_pte_lockptr
515 */
Aneesh Kumar K.V50791e62017-07-06 15:38:59 -0700516 ptl = &mm->page_table_lock;
517 spin_lock(ptl);
518
519 ptep = hugepte_offset(hpd, address, pdshift);
520 if (pte_present(*ptep)) {
521 mask = (1UL << shift) - 1;
522 page = pte_page(*ptep);
523 page += ((address & mask) >> PAGE_SHIFT);
524 if (flags & FOLL_GET)
525 get_page(page);
526 } else {
527 if (is_hugetlb_entry_migration(*ptep)) {
528 spin_unlock(ptl);
529 __migration_entry_wait(mm, ptep, ptl);
530 goto retry;
531 }
532 }
533 spin_unlock(ptl);
534 return page;
535}
536
David Gibson39adfa52009-11-23 20:03:40 +0000537static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
538 unsigned long sz)
539{
540 unsigned long __boundary = (addr + sz) & ~(sz-1);
541 return (__boundary - 1 < end - 1) ? __boundary : end;
542}
543
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530544int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
545 unsigned long end, int write, struct page **pages, int *nr)
David Gibsona4fe3ce2009-10-26 19:24:31 +0000546{
547 pte_t *ptep;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530548 unsigned long sz = 1UL << hugepd_shift(hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000549 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000550
551 ptep = hugepte_offset(hugepd, addr, pdshift);
552 do {
David Gibson39adfa52009-11-23 20:03:40 +0000553 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000554 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
555 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000556 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000557
558 return 1;
559}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Becky Bruce76512952011-10-10 10:50:36 +0000561#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
563 unsigned long len, unsigned long pgoff,
564 unsigned long flags)
565{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700566 struct hstate *hstate = hstate_file(file);
567 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000568
Christophe Leroyaa0ab02b2018-02-22 15:27:26 +0100569#ifdef CONFIG_PPC_RADIX_MMU
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000570 if (radix_enabled())
571 return radix__hugetlb_get_unmapped_area(file, addr, len,
572 pgoff, flags);
Christophe Leroyaa0ab02b2018-02-22 15:27:26 +0100573#endif
Michel Lespinasse34d07172013-04-29 11:53:52 -0700574 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
Becky Bruce76512952011-10-10 10:50:36 +0000576#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Mel Gorman33402892009-01-06 14:38:54 -0800578unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
579{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000580#ifdef CONFIG_PPC_MM_SLICES
Aneesh Kumar K.V2f5f0df2016-04-29 23:26:24 +1000581 /* With radix we don't use slice, so derive it from vma*/
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000582 if (!radix_enabled()) {
583 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
584
Aneesh Kumar K.V2f5f0df2016-04-29 23:26:24 +1000585 return 1UL << mmu_psize_to_shift(psize);
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000586 }
Aneesh Kumar K.V2f5f0df2016-04-29 23:26:24 +1000587#endif
Dan Williams09135cc2018-04-05 16:24:21 -0700588 return vma_kernel_pagesize(vma);
Becky Bruce41151e72011-06-28 09:54:48 +0000589}
590
591static inline bool is_power_of_4(unsigned long x)
592{
593 if (is_power_of_2(x))
594 return (__ilog2(x) % 2) ? false : true;
595 return false;
Mel Gorman33402892009-01-06 14:38:54 -0800596}
597
David Gibsond1837cb2009-10-26 19:24:31 +0000598static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100599{
David Gibsond1837cb2009-10-26 19:24:31 +0000600 int shift = __ffs(size);
601 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000602
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100603 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000604 * that it fits within pagetable and slice limits. */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100605 if (size <= PAGE_SIZE)
606 return -EINVAL;
Christophe Leroy4b9142862016-12-07 08:47:28 +0100607#if defined(CONFIG_PPC_FSL_BOOK3E)
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100608 if (!is_power_of_4(size))
Becky Bruce41151e72011-06-28 09:54:48 +0000609 return -EINVAL;
Christophe Leroy4b9142862016-12-07 08:47:28 +0100610#elif !defined(CONFIG_PPC_8xx)
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100611 if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
David Gibsond1837cb2009-10-26 19:24:31 +0000612 return -EINVAL;
Becky Bruce41151e72011-06-28 09:54:48 +0000613#endif
Jon Tollefson91224342008-07-23 21:27:55 -0700614
David Gibsond1837cb2009-10-26 19:24:31 +0000615 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
616 return -EINVAL;
617
Aneesh Kumar K.Va5251082017-03-21 22:59:56 +0530618#ifdef CONFIG_PPC_BOOK3S_64
619 /*
620 * We need to make sure that for different page sizes reported by
621 * firmware we only add hugetlb support for page sizes that can be
622 * supported by linux page table layout.
623 * For now we have
624 * Radix: 2M
625 * Hash: 16M and 16G
626 */
627 if (radix_enabled()) {
Aneesh Kumar K.V40692eb2017-07-06 15:39:20 -0700628 if (mmu_psize != MMU_PAGE_2M) {
629 if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
630 (mmu_psize != MMU_PAGE_1G))
631 return -EINVAL;
632 }
Aneesh Kumar K.Va5251082017-03-21 22:59:56 +0530633 } else {
634 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
635 return -EINVAL;
636 }
637#endif
638
David Gibsond1837cb2009-10-26 19:24:31 +0000639 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
640
641 /* Return if huge page size has already been setup */
642 if (size_to_hstate(size))
643 return 0;
644
645 hugetlb_add_hstate(shift - PAGE_SHIFT);
646
647 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100648}
649
650static int __init hugepage_setup_sz(char *str)
651{
652 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100653
654 size = memparse(str, &str);
655
Vaishali Thakkar71bf79c2016-05-19 17:11:14 -0700656 if (add_huge_page_size(size) != 0) {
657 hugetlb_bad_size();
658 pr_err("Invalid huge page size specified(%llu)\n", size);
659 }
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100660
661 return 1;
662}
663__setup("hugepagesz=", hugepage_setup_sz);
664
Becky Bruce41151e72011-06-28 09:54:48 +0000665struct kmem_cache *hugepte_cache;
666static int __init hugetlbpage_init(void)
667{
668 int psize;
669
Hari Bathini85975382018-04-10 19:11:31 +0530670 if (hugetlb_disabled) {
671 pr_info("HugeTLB support is disabled!\n");
672 return 0;
673 }
674
Christophe Leroy4b9142862016-12-07 08:47:28 +0100675#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000676 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000677 return -ENODEV;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100678#endif
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700679 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000680 unsigned shift;
681 unsigned pdshift;
682
683 if (!mmu_psize_defs[psize].shift)
684 continue;
685
686 shift = mmu_psize_to_shift(psize);
687
Aneesh Kumar K.V6fa50482018-03-30 17:34:08 +0530688#ifdef CONFIG_PPC_BOOK3S_64
689 if (shift > PGDIR_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000690 continue;
Aneesh Kumar K.V6fa50482018-03-30 17:34:08 +0530691 else if (shift > PUD_SHIFT)
692 pdshift = PGDIR_SHIFT;
693 else if (shift > PMD_SHIFT)
694 pdshift = PUD_SHIFT;
695 else
696 pdshift = PMD_SHIFT;
697#else
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100698 if (shift < HUGEPD_PUD_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000699 pdshift = PMD_SHIFT;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100700 else if (shift < HUGEPD_PGD_SHIFT)
David Gibsond1837cb2009-10-26 19:24:31 +0000701 pdshift = PUD_SHIFT;
702 else
703 pdshift = PGDIR_SHIFT;
Aneesh Kumar K.V6fa50482018-03-30 17:34:08 +0530704#endif
705
706 if (add_huge_page_size(1ULL << shift) < 0)
707 continue;
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000708 /*
709 * if we have pdshift and shift value same, we don't
710 * use pgt cache for hugepd.
711 */
Nicholas Pigginbf5ca682017-01-04 01:55:17 +1000712 if (pdshift > shift)
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000713 pgtable_cache_add(pdshift - shift, NULL);
Christophe Leroy4b9142862016-12-07 08:47:28 +0100714#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100715 else if (!hugepte_cache) {
716 /*
717 * Create a kmem cache for hugeptes. The bottom bits in
718 * the pte have size information encoded in them, so
719 * align them to allow this
720 */
721 hugepte_cache = kmem_cache_create("hugepte-cache",
722 sizeof(pte_t),
723 HUGEPD_SHIFT_MASK + 1,
724 0, NULL);
725 if (hugepte_cache == NULL)
726 panic("%s: Unable to create kmem cache "
727 "for hugeptes\n", __func__);
728
729 }
730#endif
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700731 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000732
Christophe Leroy4b9142862016-12-07 08:47:28 +0100733#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
734 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100735 if (mmu_psize_defs[MMU_PAGE_4M].shift)
736 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
Christophe Leroy4b9142862016-12-07 08:47:28 +0100737 else if (mmu_psize_defs[MMU_PAGE_512K].shift)
738 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100739#else
David Gibsond1837cb2009-10-26 19:24:31 +0000740 /* Set default large page size. Currently, we pick 16M or 1M
741 * depending on what is available
742 */
743 if (mmu_psize_defs[MMU_PAGE_16M].shift)
744 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
745 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
746 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000747 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
748 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100749#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000750 return 0;
751}
Christophe Leroy03bb2d62016-12-07 08:47:26 +0100752
Paul Gortmaker6f1142812015-05-01 20:08:21 -0400753arch_initcall(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000754
755void flush_dcache_icache_hugepage(struct page *page)
756{
757 int i;
Becky Bruce41151e72011-06-28 09:54:48 +0000758 void *start;
David Gibson0895ecd2009-10-26 19:24:31 +0000759
760 BUG_ON(!PageCompound(page));
761
Becky Bruce41151e72011-06-28 09:54:48 +0000762 for (i = 0; i < (1UL << compound_order(page)); i++) {
763 if (!PageHighMem(page)) {
764 __flush_dcache_icache(page_address(page+i));
765 } else {
Cong Wang2480b202011-11-25 23:14:16 +0800766 start = kmap_atomic(page+i);
Becky Bruce41151e72011-06-28 09:54:48 +0000767 __flush_dcache_icache(start);
Cong Wang2480b202011-11-25 23:14:16 +0800768 kunmap_atomic(start);
Becky Bruce41151e72011-06-28 09:54:48 +0000769 }
770 }
David Gibson0895ecd2009-10-26 19:24:31 +0000771}
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530772
773#endif /* CONFIG_HUGETLB_PAGE */
774
775/*
776 * We have 4 cases for pgds and pmds:
777 * (1) invalid (all zeroes)
778 * (2) pointer to next table, as normal; bottom 6 bits == 0
Aneesh Kumar K.V6a119ea2015-12-01 09:06:54 +0530779 * (3) leaf pte for huge page _PAGE_PTE set
780 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530781 *
782 * So long as we atomically load page table pointers we are safe against teardown,
783 * we can follow the address down to the the page and take a ref on it.
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530784 * This function need to be called with interrupts disabled. We use this variant
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530785 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530786 */
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530787pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
788 bool *is_thp, unsigned *hpage_shift)
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530789{
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530790 pgd_t pgd, *pgdp;
791 pud_t pud, *pudp;
792 pmd_t pmd, *pmdp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530793 pte_t *ret_pte;
794 hugepd_t *hpdp = NULL;
795 unsigned pdshift = PGDIR_SHIFT;
796
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530797 if (hpage_shift)
798 *hpage_shift = 0;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530799
Aneesh Kumar K.V891121e2015-10-09 08:32:21 +0530800 if (is_thp)
801 *is_thp = false;
802
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530803 pgdp = pgdir + pgd_index(ea);
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100804 pgd = READ_ONCE(*pgdp);
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530805 /*
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530806 * Always operate on the local stack value. This make sure the
807 * value don't get updated by a parallel THP split/collapse,
808 * page fault or a page unmap. The return pte_t * is still not
809 * stable. So should be checked there for above conditions.
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530810 */
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530811 if (pgd_none(pgd))
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530812 return NULL;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530813 else if (pgd_huge(pgd)) {
814 ret_pte = (pte_t *) pgdp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530815 goto out;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530816 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530817 hpdp = (hugepd_t *)&pgd;
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530818 else {
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530819 /*
820 * Even if we end up with an unmap, the pgtable will not
821 * be freed, because we do an rcu free and here we are
822 * irq disabled
823 */
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530824 pdshift = PUD_SHIFT;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530825 pudp = pud_offset(&pgd, ea);
Christian Borntraegerda1a2882015-01-06 22:47:41 +0100826 pud = READ_ONCE(*pudp);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530827
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530828 if (pud_none(pud))
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530829 return NULL;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530830 else if (pud_huge(pud)) {
831 ret_pte = (pte_t *) pudp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530832 goto out;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530833 } else if (is_hugepd(__hugepd(pud_val(pud))))
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530834 hpdp = (hugepd_t *)&pud;
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530835 else {
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530836 pdshift = PMD_SHIFT;
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530837 pmdp = pmd_offset(&pud, ea);
Christian Borntraegerda1a2882015-01-06 22:47:41 +0100838 pmd = READ_ONCE(*pmdp);
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530839 /*
840 * A hugepage collapse is captured by pmd_none, because
841 * it mark the pmd none and do a hpte invalidate.
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530842 */
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530843 if (pmd_none(pmd))
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530844 return NULL;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530845
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000846 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
Aneesh Kumar K.V891121e2015-10-09 08:32:21 +0530847 if (is_thp)
848 *is_thp = true;
849 ret_pte = (pte_t *) pmdp;
850 goto out;
851 }
852
853 if (pmd_huge(pmd)) {
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530854 ret_pte = (pte_t *) pmdp;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530855 goto out;
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530856 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530857 hpdp = (hugepd_t *)&pmd;
Aneesh Kumar K.Vac52ae42013-06-20 14:30:17 +0530858 else
Aneesh Kumar K.V0ac52dd2013-06-20 14:30:22 +0530859 return pte_offset_kernel(&pmd, ea);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530860 }
861 }
862 if (!hpdp)
863 return NULL;
864
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530865 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530866 pdshift = hugepd_shift(*hpdp);
867out:
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530868 if (hpage_shift)
869 *hpage_shift = pdshift;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530870 return ret_pte;
871}
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530872EXPORT_SYMBOL_GPL(__find_linux_pte);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530873
874int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
875 unsigned long end, int write, struct page **pages, int *nr)
876{
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530877 unsigned long pte_end;
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -0800878 struct page *head, *page;
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530879 pte_t pte;
880 int refs;
881
882 pte_end = (addr + sz) & ~(sz-1);
883 if (pte_end < end)
884 end = pte_end;
885
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100886 pte = READ_ONCE(*ptep);
Christophe Leroy6b8cb662016-09-19 12:58:54 +0200887
Aneesh Kumar K.V5fa5b162017-12-04 07:49:10 +0530888 if (!pte_access_permitted(pte, write))
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530889 return 0;
890
891 /* hugepages are never "special" */
892 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
893
894 refs = 0;
895 head = pte_page(pte);
896
897 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530898 do {
899 VM_BUG_ON(compound_head(page) != head);
900 pages[*nr] = page;
901 (*nr)++;
902 page++;
903 refs++;
904 } while (addr += PAGE_SIZE, addr != end);
905
906 if (!page_cache_add_speculative(head, refs)) {
907 *nr -= refs;
908 return 0;
909 }
910
911 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
912 /* Could be optimized better */
913 *nr -= refs;
914 while (refs--)
915 put_page(head);
916 return 0;
917 }
918
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530919 return 1;
920}