blob: c7ec5bb88334eab119ccf78002be2e7679291113 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PGALLOC_H
3#define _ASM_X86_PGALLOC_H
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07004
5#include <linux/threads.h>
6#include <linux/mm.h> /* for struct page */
7#include <linux/pagemap.h>
8
Mike Rapoport5fba4af42019-07-11 20:57:49 -07009#define __HAVE_ARCH_PTE_ALLOC_ONE
Mike Rapoportf9cb6542020-08-06 23:22:47 -070010#define __HAVE_ARCH_PGD_FREE
Mike Rapoport1355c312020-08-06 23:22:39 -070011#include <asm-generic/pgalloc.h>
Mike Rapoport5fba4af42019-07-11 20:57:49 -070012
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -040013static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
14
Juergen Grossfdc02692018-08-28 09:40:25 +020015#ifdef CONFIG_PARAVIRT_XXL
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070016#include <asm/paravirt.h>
17#else
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -040018#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
19static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
Jeremy Fitzhardinge286cd492008-03-17 16:37:06 -070020static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
21static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
22static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
23 unsigned long start, unsigned long count) {}
24static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
Kirill A. Shutemov335437f2017-03-30 11:07:28 +030025static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
Jeremy Fitzhardinge286cd492008-03-17 16:37:06 -070026static inline void paravirt_release_pte(unsigned long pfn) {}
27static inline void paravirt_release_pmd(unsigned long pfn) {}
28static inline void paravirt_release_pud(unsigned long pfn) {}
Kirill A. Shutemov335437f2017-03-30 11:07:28 +030029static inline void paravirt_release_p4d(unsigned long pfn) {}
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070030#endif
31
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070032/*
Ian Campbell14315592010-02-17 10:38:10 +000033 * Flags to use when allocating a user page table page.
34 */
35extern gfp_t __userpte_alloc_gfp;
36
Dave Hansend9e9a642017-12-04 15:07:39 +010037#ifdef CONFIG_PAGE_TABLE_ISOLATION
38/*
39 * Instead of one PGD, we acquire two PGDs. Being order-1, it is
40 * both 8k in size and 8k-aligned. That lets us just flip bit 12
41 * in a pointer to swap between the two 4k halves.
42 */
43#define PGD_ALLOCATION_ORDER 1
44#else
45#define PGD_ALLOCATION_ORDER 0
46#endif
47
Ian Campbell14315592010-02-17 10:38:10 +000048/*
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070049 * Allocate and free page tables.
50 */
51extern pgd_t *pgd_alloc(struct mm_struct *);
52extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
53
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -080054extern pgtable_t pte_alloc_one(struct mm_struct *);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070055
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100056extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
57
58static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
59 unsigned long address)
60{
61 ___pte_free_tlb(tlb, pte);
62}
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070063
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070064static inline void pmd_populate_kernel(struct mm_struct *mm,
65 pmd_t *pmd, pte_t *pte)
66{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070067 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070068 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
69}
70
Dan Williams0a9fe8c2018-12-04 13:37:21 -080071static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
72 pmd_t *pmd, pte_t *pte)
73{
74 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
75 set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
76}
77
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070078static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
79 struct page *pte)
80{
81 unsigned long pfn = page_to_pfn(pte);
82
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070083 paravirt_alloc_pte(mm, pfn);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070084 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
85}
86
Kirill A. Shutemov98233362015-04-14 15:46:14 -070087#if CONFIG_PGTABLE_LEVELS > 2
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100088extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
89
90static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040091 unsigned long address)
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100092{
93 ___pmd_free_tlb(tlb, pmd);
94}
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070095
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070096#ifdef CONFIG_X86_PAE
97extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
98#else /* !CONFIG_X86_PAE */
99static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
100{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700101 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700102 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
103}
Dan Williams0a9fe8c2018-12-04 13:37:21 -0800104
105static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
106{
107 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
108 set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
109}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700110#endif /* CONFIG_X86_PAE */
111
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700112#if CONFIG_PGTABLE_LEVELS > 3
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300113static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700114{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700115 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300116 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700117}
118
Dan Williams0a9fe8c2018-12-04 13:37:21 -0800119static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
120{
121 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
122 set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
123}
124
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000125extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
126
127static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
128 unsigned long address)
129{
130 ___pud_free_tlb(tlb, pud);
131}
132
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300133#if CONFIG_PGTABLE_LEVELS > 4
134static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
135{
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300136 if (!pgtable_l5_enabled())
Kirill A. Shutemov98219dd2018-02-14 21:25:40 +0300137 return;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300138 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
139 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
140}
141
Dan Williams0a9fe8c2018-12-04 13:37:21 -0800142static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
143{
144 if (!pgtable_l5_enabled())
145 return;
146 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
147 set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
148}
149
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300150static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
151{
152 gfp_t gfp = GFP_KERNEL_ACCOUNT;
153
154 if (mm == &init_mm)
155 gfp &= ~__GFP_ACCOUNT;
156 return (p4d_t *)get_zeroed_page(gfp);
157}
158
159static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
160{
Andrey Ryabinin0e311d232018-06-25 13:24:27 +0300161 if (!pgtable_l5_enabled())
162 return;
163
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300164 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
165 free_page((unsigned long)p4d);
166}
167
168extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
169
170static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
171 unsigned long address)
172{
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300173 if (pgtable_l5_enabled())
Kirill A. Shutemov98219dd2018-02-14 21:25:40 +0300174 ___p4d_free_tlb(tlb, p4d);
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300175}
176
177#endif /* CONFIG_PGTABLE_LEVELS > 4 */
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700178#endif /* CONFIG_PGTABLE_LEVELS > 3 */
179#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700180
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700181#endif /* _ASM_X86_PGALLOC_H */