Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Arnd Bergmann | 5c01b46 | 2009-05-13 22:56:36 +0000 | [diff] [blame] | 2 | #ifndef __ASM_GENERIC_PGALLOC_H |
| 3 | #define __ASM_GENERIC_PGALLOC_H |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 4 | |
Arnd Bergmann | 5c01b46 | 2009-05-13 22:56:36 +0000 | [diff] [blame] | 5 | #ifdef CONFIG_MMU |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 6 | |
| 7 | #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) |
| 8 | #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) |
| 9 | |
| 10 | /** |
| 11 | * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table |
| 12 | * @mm: the mm_struct of the current context |
| 13 | * |
| 14 | * This function is intended for architectures that need |
| 15 | * anything beyond simple page allocation. |
| 16 | * |
| 17 | * Return: pointer to the allocated memory or %NULL on error |
| 18 | */ |
| 19 | static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) |
| 20 | { |
| 21 | return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); |
| 22 | } |
| 23 | |
| 24 | #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL |
| 25 | /** |
| 26 | * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table |
| 27 | * @mm: the mm_struct of the current context |
| 28 | * |
| 29 | * Return: pointer to the allocated memory or %NULL on error |
| 30 | */ |
| 31 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) |
| 32 | { |
| 33 | return __pte_alloc_one_kernel(mm); |
| 34 | } |
Arnd Bergmann | 5c01b46 | 2009-05-13 22:56:36 +0000 | [diff] [blame] | 35 | #endif |
| 36 | |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 37 | /** |
| 38 | * pte_free_kernel - free PTE-level kernel page table page |
| 39 | * @mm: the mm_struct of the current context |
| 40 | * @pte: pointer to the memory containing the page table |
| 41 | */ |
| 42 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
| 43 | { |
| 44 | free_page((unsigned long)pte); |
| 45 | } |
| 46 | |
| 47 | /** |
| 48 | * __pte_alloc_one - allocate a page for PTE-level user page table |
| 49 | * @mm: the mm_struct of the current context |
| 50 | * @gfp: GFP flags to use for the allocation |
| 51 | * |
Mark Rutland | b4ed71f | 2019-09-25 16:49:46 -0700 | [diff] [blame] | 52 | * Allocates a page and runs the pgtable_pte_page_ctor(). |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 53 | * |
| 54 | * This function is intended for architectures that need |
| 55 | * anything beyond simple page allocation or must have custom GFP flags. |
| 56 | * |
| 57 | * Return: `struct page` initialized as page table or %NULL on error |
| 58 | */ |
| 59 | static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) |
| 60 | { |
| 61 | struct page *pte; |
| 62 | |
| 63 | pte = alloc_page(gfp); |
| 64 | if (!pte) |
| 65 | return NULL; |
Mark Rutland | b4ed71f | 2019-09-25 16:49:46 -0700 | [diff] [blame] | 66 | if (!pgtable_pte_page_ctor(pte)) { |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 67 | __free_page(pte); |
| 68 | return NULL; |
| 69 | } |
| 70 | |
| 71 | return pte; |
| 72 | } |
| 73 | |
| 74 | #ifndef __HAVE_ARCH_PTE_ALLOC_ONE |
| 75 | /** |
| 76 | * pte_alloc_one - allocate a page for PTE-level user page table |
| 77 | * @mm: the mm_struct of the current context |
| 78 | * |
Mark Rutland | b4ed71f | 2019-09-25 16:49:46 -0700 | [diff] [blame] | 79 | * Allocates a page and runs the pgtable_pte_page_ctor(). |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 80 | * |
| 81 | * Return: `struct page` initialized as page table or %NULL on error |
| 82 | */ |
| 83 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm) |
| 84 | { |
| 85 | return __pte_alloc_one(mm, GFP_PGTABLE_USER); |
| 86 | } |
| 87 | #endif |
| 88 | |
| 89 | /* |
| 90 | * Should really implement gc for free page table pages. This could be |
| 91 | * done with a reference count in struct page. |
| 92 | */ |
| 93 | |
| 94 | /** |
| 95 | * pte_free - free PTE-level user page table page |
| 96 | * @mm: the mm_struct of the current context |
| 97 | * @pte_page: the `struct page` representing the page table |
| 98 | */ |
| 99 | static inline void pte_free(struct mm_struct *mm, struct page *pte_page) |
| 100 | { |
Mark Rutland | b4ed71f | 2019-09-25 16:49:46 -0700 | [diff] [blame] | 101 | pgtable_pte_page_dtor(pte_page); |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 102 | __free_page(pte_page); |
| 103 | } |
| 104 | |
Mike Rapoport | 1355c31 | 2020-08-06 23:22:39 -0700 | [diff] [blame] | 105 | |
| 106 | #if CONFIG_PGTABLE_LEVELS > 2 |
| 107 | |
| 108 | #ifndef __HAVE_ARCH_PMD_ALLOC_ONE |
| 109 | /** |
| 110 | * pmd_alloc_one - allocate a page for PMD-level page table |
| 111 | * @mm: the mm_struct of the current context |
| 112 | * |
| 113 | * Allocates a page and runs the pgtable_pmd_page_ctor(). |
| 114 | * Allocations use %GFP_PGTABLE_USER in user context and |
| 115 | * %GFP_PGTABLE_KERNEL in kernel context. |
| 116 | * |
| 117 | * Return: pointer to the allocated memory or %NULL on error |
| 118 | */ |
| 119 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 120 | { |
| 121 | struct page *page; |
| 122 | gfp_t gfp = GFP_PGTABLE_USER; |
| 123 | |
| 124 | if (mm == &init_mm) |
| 125 | gfp = GFP_PGTABLE_KERNEL; |
| 126 | page = alloc_pages(gfp, 0); |
| 127 | if (!page) |
| 128 | return NULL; |
| 129 | if (!pgtable_pmd_page_ctor(page)) { |
| 130 | __free_pages(page, 0); |
| 131 | return NULL; |
| 132 | } |
| 133 | return (pmd_t *)page_address(page); |
| 134 | } |
| 135 | #endif |
| 136 | |
| 137 | #ifndef __HAVE_ARCH_PMD_FREE |
| 138 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
| 139 | { |
| 140 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
| 141 | pgtable_pmd_page_dtor(virt_to_page(pmd)); |
| 142 | free_page((unsigned long)pmd); |
| 143 | } |
| 144 | #endif |
| 145 | |
| 146 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
| 147 | |
Mike Rapoport | d9e8b92 | 2020-08-06 23:22:44 -0700 | [diff] [blame] | 148 | #if CONFIG_PGTABLE_LEVELS > 3 |
| 149 | |
Mike Rapoport | 9922c1d | 2020-08-14 17:30:04 -0700 | [diff] [blame^] | 150 | #ifndef __HAVE_ARCH_PUD_ALLOC_ONE |
Mike Rapoport | d9e8b92 | 2020-08-06 23:22:44 -0700 | [diff] [blame] | 151 | /** |
| 152 | * pud_alloc_one - allocate a page for PUD-level page table |
| 153 | * @mm: the mm_struct of the current context |
| 154 | * |
| 155 | * Allocates a page using %GFP_PGTABLE_USER for user context and |
| 156 | * %GFP_PGTABLE_KERNEL for kernel context. |
| 157 | * |
| 158 | * Return: pointer to the allocated memory or %NULL on error |
| 159 | */ |
| 160 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 161 | { |
| 162 | gfp_t gfp = GFP_PGTABLE_USER; |
| 163 | |
| 164 | if (mm == &init_mm) |
| 165 | gfp = GFP_PGTABLE_KERNEL; |
| 166 | return (pud_t *)get_zeroed_page(gfp); |
| 167 | } |
| 168 | #endif |
| 169 | |
| 170 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
| 171 | { |
| 172 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); |
| 173 | free_page((unsigned long)pud); |
| 174 | } |
| 175 | |
| 176 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
| 177 | |
Mike Rapoport | f9cb654 | 2020-08-06 23:22:47 -0700 | [diff] [blame] | 178 | #ifndef __HAVE_ARCH_PGD_FREE |
| 179 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 180 | { |
| 181 | free_page((unsigned long)pgd); |
| 182 | } |
| 183 | #endif |
| 184 | |
Mike Rapoport | 5fba4af4 | 2019-07-11 20:57:49 -0700 | [diff] [blame] | 185 | #endif /* CONFIG_MMU */ |
| 186 | |
Arnd Bergmann | 5c01b46 | 2009-05-13 22:56:36 +0000 | [diff] [blame] | 187 | #endif /* __ASM_GENERIC_PGALLOC_H */ |