Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 2 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 3 | #include <linux/gfp.h> |
Joerg Roedel | e3e2881 | 2018-04-11 17:24:38 +0200 | [diff] [blame] | 4 | #include <linux/hugetlb.h> |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 5 | #include <asm/pgalloc.h> |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 6 | #include <asm/pgtable.h> |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 7 | #include <asm/tlb.h> |
Ingo Molnar | a1d5a86 | 2008-06-20 15:34:46 +0200 | [diff] [blame] | 8 | #include <asm/fixmap.h> |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 9 | #include <asm/mtrr.h> |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 10 | |
Kirill A. Shutemov | 94d49eb | 2018-05-18 14:30:28 +0300 | [diff] [blame^] | 11 | #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK |
| 12 | phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; |
| 13 | EXPORT_SYMBOL(physical_mask); |
| 14 | #endif |
| 15 | |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 16 | #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) |
Vegard Nossum | 9e73023 | 2009-02-22 11:28:25 +0100 | [diff] [blame] | 17 | |
Ian Campbell | 1431559 | 2010-02-17 10:38:10 +0000 | [diff] [blame] | 18 | #ifdef CONFIG_HIGHPTE |
| 19 | #define PGALLOC_USER_GFP __GFP_HIGHMEM |
| 20 | #else |
| 21 | #define PGALLOC_USER_GFP 0 |
| 22 | #endif |
| 23 | |
| 24 | gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; |
| 25 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 26 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 27 | { |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 28 | return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 29 | } |
| 30 | |
| 31 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 32 | { |
| 33 | struct page *pte; |
| 34 | |
Ian Campbell | 1431559 | 2010-02-17 10:38:10 +0000 | [diff] [blame] | 35 | pte = alloc_pages(__userpte_alloc_gfp, 0); |
Kirill A. Shutemov | cecbd1b | 2013-11-14 14:31:47 -0800 | [diff] [blame] | 36 | if (!pte) |
| 37 | return NULL; |
| 38 | if (!pgtable_page_ctor(pte)) { |
| 39 | __free_page(pte); |
| 40 | return NULL; |
| 41 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 42 | return pte; |
| 43 | } |
| 44 | |
Ian Campbell | 1431559 | 2010-02-17 10:38:10 +0000 | [diff] [blame] | 45 | static int __init setup_userpte(char *arg) |
| 46 | { |
| 47 | if (!arg) |
| 48 | return -EINVAL; |
| 49 | |
| 50 | /* |
| 51 | * "userpte=nohigh" disables allocation of user pagetables in |
| 52 | * high memory. |
| 53 | */ |
| 54 | if (strcmp(arg, "nohigh") == 0) |
| 55 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; |
| 56 | else |
| 57 | return -EINVAL; |
| 58 | return 0; |
| 59 | } |
| 60 | early_param("userpte", setup_userpte); |
| 61 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 62 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
Jeremy Fitzhardinge | 397f687 | 2008-03-17 16:36:57 -0700 | [diff] [blame] | 63 | { |
| 64 | pgtable_page_dtor(pte); |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 65 | paravirt_release_pte(page_to_pfn(pte)); |
Vitaly Kuznetsov | 9e52fc2 | 2017-08-28 10:22:51 +0200 | [diff] [blame] | 66 | tlb_remove_table(tlb, pte); |
Jeremy Fitzhardinge | 397f687 | 2008-03-17 16:36:57 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 69 | #if CONFIG_PGTABLE_LEVELS > 2 |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 70 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
Jeremy Fitzhardinge | 170fdff | 2008-03-17 16:36:58 -0700 | [diff] [blame] | 71 | { |
Kirill A. Shutemov | c283610 | 2013-11-21 14:32:09 -0800 | [diff] [blame] | 72 | struct page *page = virt_to_page(pmd); |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 73 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
Dave Hansen | 1de14c3 | 2013-04-12 16:23:54 -0700 | [diff] [blame] | 74 | /* |
| 75 | * NOTE! For PAE, any changes to the top page-directory-pointer-table |
| 76 | * entries need a full cr3 reload to flush. |
| 77 | */ |
| 78 | #ifdef CONFIG_X86_PAE |
| 79 | tlb->need_flush_all = 1; |
| 80 | #endif |
Kirill A. Shutemov | c283610 | 2013-11-21 14:32:09 -0800 | [diff] [blame] | 81 | pgtable_pmd_page_dtor(page); |
Vitaly Kuznetsov | 9e52fc2 | 2017-08-28 10:22:51 +0200 | [diff] [blame] | 82 | tlb_remove_table(tlb, page); |
Jeremy Fitzhardinge | 170fdff | 2008-03-17 16:36:58 -0700 | [diff] [blame] | 83 | } |
Jeremy Fitzhardinge | 5a5f8f4 | 2008-03-17 16:36:59 -0700 | [diff] [blame] | 84 | |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 85 | #if CONFIG_PGTABLE_LEVELS > 3 |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 86 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
Jeremy Fitzhardinge | 5a5f8f4 | 2008-03-17 16:36:59 -0700 | [diff] [blame] | 87 | { |
Jeremy Fitzhardinge | 2761fa0 | 2008-03-17 16:37:02 -0700 | [diff] [blame] | 88 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
Vitaly Kuznetsov | 9e52fc2 | 2017-08-28 10:22:51 +0200 | [diff] [blame] | 89 | tlb_remove_table(tlb, virt_to_page(pud)); |
Jeremy Fitzhardinge | 5a5f8f4 | 2008-03-17 16:36:59 -0700 | [diff] [blame] | 90 | } |
Kirill A. Shutemov | b850405 | 2017-03-30 11:07:29 +0300 | [diff] [blame] | 91 | |
| 92 | #if CONFIG_PGTABLE_LEVELS > 4 |
| 93 | void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) |
| 94 | { |
| 95 | paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); |
Vitaly Kuznetsov | 9e52fc2 | 2017-08-28 10:22:51 +0200 | [diff] [blame] | 96 | tlb_remove_table(tlb, virt_to_page(p4d)); |
Kirill A. Shutemov | b850405 | 2017-03-30 11:07:29 +0300 | [diff] [blame] | 97 | } |
| 98 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 99 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
| 100 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
Jeremy Fitzhardinge | 170fdff | 2008-03-17 16:36:58 -0700 | [diff] [blame] | 101 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 102 | static inline void pgd_list_add(pgd_t *pgd) |
| 103 | { |
| 104 | struct page *page = virt_to_page(pgd); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 105 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 106 | list_add(&page->lru, &pgd_list); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | static inline void pgd_list_del(pgd_t *pgd) |
| 110 | { |
| 111 | struct page *page = virt_to_page(pgd); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 112 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 113 | list_del(&page->lru); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 114 | } |
| 115 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 116 | #define UNSHARED_PTRS_PER_PGD \ |
Jeremy Fitzhardinge | 68db065 | 2008-03-17 16:37:13 -0700 | [diff] [blame] | 117 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 118 | |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 119 | |
| 120 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) |
| 121 | { |
| 122 | BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); |
| 123 | virt_to_page(pgd)->index = (pgoff_t)mm; |
| 124 | } |
| 125 | |
| 126 | struct mm_struct *pgd_page_get_mm(struct page *page) |
| 127 | { |
| 128 | return (struct mm_struct *)page->index; |
| 129 | } |
| 130 | |
| 131 | static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 132 | { |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 133 | /* If the pgd points to a shared pagetable level (either the |
| 134 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
| 135 | references from swapper_pg_dir. */ |
Kirill A. Shutemov | 9823336 | 2015-04-14 15:46:14 -0700 | [diff] [blame] | 136 | if (CONFIG_PGTABLE_LEVELS == 2 || |
| 137 | (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
Kirill A. Shutemov | b850405 | 2017-03-30 11:07:29 +0300 | [diff] [blame] | 138 | CONFIG_PGTABLE_LEVELS >= 4) { |
Jeremy Fitzhardinge | 68db065 | 2008-03-17 16:37:13 -0700 | [diff] [blame] | 139 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
| 140 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 141 | KERNEL_PGD_PTRS); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | /* list required to sync kernel mapping updates */ |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 145 | if (!SHARED_KERNEL_PMD) { |
| 146 | pgd_set_mm(pgd, mm); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 147 | pgd_list_add(pgd); |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 148 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 149 | } |
| 150 | |
Jan Beulich | 17b7462 | 2008-08-29 12:51:32 +0100 | [diff] [blame] | 151 | static void pgd_dtor(pgd_t *pgd) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 152 | { |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 153 | if (SHARED_KERNEL_PMD) |
| 154 | return; |
| 155 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 156 | spin_lock(&pgd_lock); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 157 | pgd_list_del(pgd); |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 158 | spin_unlock(&pgd_lock); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 159 | } |
| 160 | |
Jeremy Fitzhardinge | 85958b4 | 2008-03-17 16:37:14 -0700 | [diff] [blame] | 161 | /* |
| 162 | * List of all pgd's needed for non-PAE so it can invalidate entries |
| 163 | * in both cached and uncached pgd's; not needed for PAE since the |
| 164 | * kernel pmd is shared. If PAE were not to share the pmd a similar |
| 165 | * tactic would be needed. This is essentially codepath-based locking |
| 166 | * against pageattr.c; it is the unique case in which a valid change |
| 167 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
| 168 | * vmalloc faults work because attached pagetables are never freed. |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 169 | * -- nyc |
Jeremy Fitzhardinge | 85958b4 | 2008-03-17 16:37:14 -0700 | [diff] [blame] | 170 | */ |
| 171 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 172 | #ifdef CONFIG_X86_PAE |
| 173 | /* |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 174 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when |
| 175 | * updating the top-level pagetable entries to guarantee the |
| 176 | * processor notices the update. Since this is expensive, and |
| 177 | * all 4 top-level entries are used almost immediately in a |
| 178 | * new process's life, we just pre-populate them here. |
| 179 | * |
| 180 | * Also, if we're in a paravirt environment where the kernel pmd is |
| 181 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate |
| 182 | * and initialize the kernel pmds here. |
| 183 | */ |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 184 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD |
Ingo Molnar | 1ec1fe7 | 2008-03-19 20:30:40 +0100 | [diff] [blame] | 185 | |
| 186 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) |
| 187 | { |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 188 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
Ingo Molnar | 1ec1fe7 | 2008-03-19 20:30:40 +0100 | [diff] [blame] | 189 | |
| 190 | /* Note: almost everything apart from _PAGE_PRESENT is |
| 191 | reserved at the pmd (PDPT) level. */ |
| 192 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); |
| 193 | |
| 194 | /* |
| 195 | * According to Intel App note "TLBs, Paging-Structure Caches, |
| 196 | * and Their Invalidation", April 2007, document 317080-001, |
| 197 | * section 8.1: in PAE mode we explicitly have to flush the |
| 198 | * TLB via cr3 if the top-level pgd is changed... |
| 199 | */ |
Shaohua Li | 4981d01 | 2011-03-16 11:37:29 +0800 | [diff] [blame] | 200 | flush_tlb_mm(mm); |
Ingo Molnar | 1ec1fe7 | 2008-03-19 20:30:40 +0100 | [diff] [blame] | 201 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 202 | #else /* !CONFIG_X86_PAE */ |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 203 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 204 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 205 | #define PREALLOCATED_PMDS 0 |
| 206 | |
| 207 | #endif /* CONFIG_X86_PAE */ |
| 208 | |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 209 | static void free_pmds(struct mm_struct *mm, pmd_t *pmds[]) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 210 | { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 211 | int i; |
| 212 | |
| 213 | for(i = 0; i < PREALLOCATED_PMDS; i++) |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 214 | if (pmds[i]) { |
| 215 | pgtable_pmd_page_dtor(virt_to_page(pmds[i])); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 216 | free_page((unsigned long)pmds[i]); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 217 | mm_dec_nr_pmds(mm); |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 218 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 221 | static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 222 | { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 223 | int i; |
| 224 | bool failed = false; |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 225 | gfp_t gfp = PGALLOC_GFP; |
| 226 | |
| 227 | if (mm == &init_mm) |
| 228 | gfp &= ~__GFP_ACCOUNT; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 229 | |
| 230 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
Vladimir Davydov | 3e79ec7 | 2016-07-26 15:24:30 -0700 | [diff] [blame] | 231 | pmd_t *pmd = (pmd_t *)__get_free_page(gfp); |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 232 | if (!pmd) |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 233 | failed = true; |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 234 | if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { |
Al Viro | 2a46eed | 2013-11-20 22:16:36 +0000 | [diff] [blame] | 235 | free_page((unsigned long)pmd); |
Kirill A. Shutemov | 09ef493 | 2013-11-14 14:31:13 -0800 | [diff] [blame] | 236 | pmd = NULL; |
| 237 | failed = true; |
| 238 | } |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 239 | if (pmd) |
| 240 | mm_inc_nr_pmds(mm); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 241 | pmds[i] = pmd; |
| 242 | } |
| 243 | |
| 244 | if (failed) { |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 245 | free_pmds(mm, pmds); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 246 | return -ENOMEM; |
| 247 | } |
| 248 | |
| 249 | return 0; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 250 | } |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 251 | |
| 252 | /* |
| 253 | * Mop up any pmd pages which may still be attached to the pgd. |
| 254 | * Normally they will be freed by munmap/exit_mmap, but any pmd we |
| 255 | * preallocate which never got a corresponding vma will need to be |
| 256 | * freed manually. |
| 257 | */ |
| 258 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) |
| 259 | { |
| 260 | int i; |
| 261 | |
| 262 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
| 263 | pgd_t pgd = pgdp[i]; |
| 264 | |
| 265 | if (pgd_val(pgd) != 0) { |
| 266 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); |
| 267 | |
| 268 | pgdp[i] = native_make_pgd(0); |
| 269 | |
| 270 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
| 271 | pmd_free(mm, pmd); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 272 | mm_dec_nr_pmds(mm); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 273 | } |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
| 278 | { |
Kirill A. Shutemov | e0c4f67 | 2017-03-13 17:33:05 +0300 | [diff] [blame] | 279 | p4d_t *p4d; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 280 | pud_t *pud; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 281 | int i; |
| 282 | |
Jeremy Fitzhardinge | cf3e505 | 2008-08-08 13:46:07 -0700 | [diff] [blame] | 283 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ |
| 284 | return; |
| 285 | |
Kirill A. Shutemov | e0c4f67 | 2017-03-13 17:33:05 +0300 | [diff] [blame] | 286 | p4d = p4d_offset(pgd, 0); |
| 287 | pud = pud_offset(p4d, 0); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 288 | |
Wanpeng Li | 73b44ff | 2013-07-08 16:00:17 -0700 | [diff] [blame] | 289 | for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 290 | pmd_t *pmd = pmds[i]; |
| 291 | |
| 292 | if (i >= KERNEL_PGD_BOUNDARY) |
| 293 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
| 294 | sizeof(pmd_t) * PTRS_PER_PMD); |
| 295 | |
| 296 | pud_populate(mm, pud, pmd); |
| 297 | } |
| 298 | } |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 299 | |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 300 | /* |
| 301 | * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also |
| 302 | * assumes that pgd should be in one page. |
| 303 | * |
| 304 | * But kernel with PAE paging that is not running as a Xen domain |
| 305 | * only needs to allocate 32 bytes for pgd instead of one page. |
| 306 | */ |
| 307 | #ifdef CONFIG_X86_PAE |
| 308 | |
| 309 | #include <linux/slab.h> |
| 310 | |
| 311 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) |
| 312 | #define PGD_ALIGN 32 |
| 313 | |
| 314 | static struct kmem_cache *pgd_cache; |
| 315 | |
| 316 | static int __init pgd_cache_init(void) |
| 317 | { |
| 318 | /* |
| 319 | * When PAE kernel is running as a Xen domain, it does not use |
| 320 | * shared kernel pmd. And this requires a whole page for pgd. |
| 321 | */ |
| 322 | if (!SHARED_KERNEL_PMD) |
| 323 | return 0; |
| 324 | |
| 325 | /* |
| 326 | * when PAE kernel is not running as a Xen domain, it uses |
| 327 | * shared kernel pmd. Shared kernel pmd does not require a whole |
| 328 | * page for pgd. We are able to just allocate a 32-byte for pgd. |
| 329 | * During boot time, we create a 32-byte slab for pgd table allocation. |
| 330 | */ |
| 331 | pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, |
| 332 | SLAB_PANIC, NULL); |
| 333 | if (!pgd_cache) |
| 334 | return -ENOMEM; |
| 335 | |
| 336 | return 0; |
| 337 | } |
| 338 | core_initcall(pgd_cache_init); |
| 339 | |
| 340 | static inline pgd_t *_pgd_alloc(void) |
| 341 | { |
| 342 | /* |
| 343 | * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. |
| 344 | * We allocate one page for pgd. |
| 345 | */ |
| 346 | if (!SHARED_KERNEL_PMD) |
| 347 | return (pgd_t *)__get_free_page(PGALLOC_GFP); |
| 348 | |
| 349 | /* |
| 350 | * Now PAE kernel is not running as a Xen domain. We can allocate |
| 351 | * a 32-byte slab for pgd to save memory space. |
| 352 | */ |
| 353 | return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); |
| 354 | } |
| 355 | |
| 356 | static inline void _pgd_free(pgd_t *pgd) |
| 357 | { |
| 358 | if (!SHARED_KERNEL_PMD) |
| 359 | free_page((unsigned long)pgd); |
| 360 | else |
| 361 | kmem_cache_free(pgd_cache, pgd); |
| 362 | } |
| 363 | #else |
Dave Hansen | d9e9a64 | 2017-12-04 15:07:39 +0100 | [diff] [blame] | 364 | |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 365 | static inline pgd_t *_pgd_alloc(void) |
| 366 | { |
Dave Hansen | d9e9a64 | 2017-12-04 15:07:39 +0100 | [diff] [blame] | 367 | return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | static inline void _pgd_free(pgd_t *pgd) |
| 371 | { |
Dave Hansen | d9e9a64 | 2017-12-04 15:07:39 +0100 | [diff] [blame] | 372 | free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 373 | } |
| 374 | #endif /* CONFIG_X86_PAE */ |
| 375 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 376 | pgd_t *pgd_alloc(struct mm_struct *mm) |
| 377 | { |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 378 | pgd_t *pgd; |
| 379 | pmd_t *pmds[PREALLOCATED_PMDS]; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 380 | |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 381 | pgd = _pgd_alloc(); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 382 | |
| 383 | if (pgd == NULL) |
| 384 | goto out; |
| 385 | |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 386 | mm->pgd = pgd; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 387 | |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 388 | if (preallocate_pmds(mm, pmds) != 0) |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 389 | goto out_free_pgd; |
| 390 | |
| 391 | if (paravirt_pgd_alloc(mm) != 0) |
| 392 | goto out_free_pmds; |
| 393 | |
| 394 | /* |
| 395 | * Make sure that pre-populating the pmds is atomic with |
| 396 | * respect to anything walking the pgd_list, so that they |
| 397 | * never see a partially populated pgd. |
| 398 | */ |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 399 | spin_lock(&pgd_lock); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 400 | |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 401 | pgd_ctor(mm, pgd); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 402 | pgd_prepopulate_pmd(mm, pgd, pmds); |
| 403 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 404 | spin_unlock(&pgd_lock); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 405 | |
| 406 | return pgd; |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 407 | |
| 408 | out_free_pmds: |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 409 | free_pmds(mm, pmds); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 410 | out_free_pgd: |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 411 | _pgd_free(pgd); |
Jeremy Fitzhardinge | d8d5900 | 2008-06-25 00:19:13 -0400 | [diff] [blame] | 412 | out: |
| 413 | return NULL; |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 417 | { |
| 418 | pgd_mop_up_pmds(mm, pgd); |
| 419 | pgd_dtor(pgd); |
Jeremy Fitzhardinge | eba0045 | 2008-06-25 00:19:12 -0400 | [diff] [blame] | 420 | paravirt_pgd_free(mm, pgd); |
Fenghua Yu | 1db491f | 2015-01-15 20:30:01 -0800 | [diff] [blame] | 421 | _pgd_free(pgd); |
Jeremy Fitzhardinge | 4f76cd3 | 2008-03-17 16:36:55 -0700 | [diff] [blame] | 422 | } |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 423 | |
Rik van Riel | 0f9a921 | 2012-11-06 09:54:47 +0000 | [diff] [blame] | 424 | /* |
| 425 | * Used to set accessed or dirty bits in the page table entries |
| 426 | * on other architectures. On x86, the accessed and dirty bits |
| 427 | * are tracked by hardware. However, do_wp_page calls this function |
| 428 | * to also make the pte writeable at the same time the dirty bit is |
| 429 | * set. In that case we do actually need to write the PTE. |
| 430 | */ |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 431 | int ptep_set_access_flags(struct vm_area_struct *vma, |
| 432 | unsigned long address, pte_t *ptep, |
| 433 | pte_t entry, int dirty) |
| 434 | { |
| 435 | int changed = !pte_same(*ptep, entry); |
| 436 | |
Juergen Gross | 8793001 | 2017-09-04 12:25:27 +0200 | [diff] [blame] | 437 | if (changed && dirty) |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 438 | *ptep = entry; |
Jeremy Fitzhardinge | ee5aa8d | 2008-03-17 16:37:03 -0700 | [diff] [blame] | 439 | |
| 440 | return changed; |
| 441 | } |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 442 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 443 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 444 | int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 445 | unsigned long address, pmd_t *pmdp, |
| 446 | pmd_t entry, int dirty) |
| 447 | { |
| 448 | int changed = !pmd_same(*pmdp, entry); |
| 449 | |
| 450 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 451 | |
| 452 | if (changed && dirty) { |
| 453 | *pmdp = entry; |
Ingo Molnar | 5e4bf1a | 2012-11-20 13:02:51 +0100 | [diff] [blame] | 454 | /* |
| 455 | * We had a write-protection fault here and changed the pmd |
| 456 | * to to more permissive. No need to flush the TLB for that, |
| 457 | * #PF is architecturally guaranteed to do that and in the |
| 458 | * worst-case we'll generate a spurious fault. |
| 459 | */ |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 460 | } |
| 461 | |
| 462 | return changed; |
| 463 | } |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 464 | |
| 465 | int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 466 | pud_t *pudp, pud_t entry, int dirty) |
| 467 | { |
| 468 | int changed = !pud_same(*pudp, entry); |
| 469 | |
| 470 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); |
| 471 | |
| 472 | if (changed && dirty) { |
| 473 | *pudp = entry; |
| 474 | /* |
| 475 | * We had a write-protection fault here and changed the pud |
| 476 | * to to more permissive. No need to flush the TLB for that, |
| 477 | * #PF is architecturally guaranteed to do that and in the |
| 478 | * worst-case we'll generate a spurious fault. |
| 479 | */ |
| 480 | } |
| 481 | |
| 482 | return changed; |
| 483 | } |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 484 | #endif |
| 485 | |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 486 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 487 | unsigned long addr, pte_t *ptep) |
| 488 | { |
| 489 | int ret = 0; |
| 490 | |
| 491 | if (pte_young(*ptep)) |
| 492 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
Thomas Gleixner | 48e2395 | 2008-05-24 17:24:34 +0200 | [diff] [blame] | 493 | (unsigned long *) &ptep->pte); |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 494 | |
Jeremy Fitzhardinge | f9fbf1a | 2008-03-17 16:37:04 -0700 | [diff] [blame] | 495 | return ret; |
| 496 | } |
Jeremy Fitzhardinge | c20311e | 2008-03-17 16:37:05 -0700 | [diff] [blame] | 497 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 498 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 499 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 500 | unsigned long addr, pmd_t *pmdp) |
| 501 | { |
| 502 | int ret = 0; |
| 503 | |
| 504 | if (pmd_young(*pmdp)) |
| 505 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
Johannes Weiner | f2d6bfe | 2011-01-13 15:47:01 -0800 | [diff] [blame] | 506 | (unsigned long *)pmdp); |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 507 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 508 | return ret; |
| 509 | } |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 510 | int pudp_test_and_clear_young(struct vm_area_struct *vma, |
| 511 | unsigned long addr, pud_t *pudp) |
| 512 | { |
| 513 | int ret = 0; |
| 514 | |
| 515 | if (pud_young(*pudp)) |
| 516 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
| 517 | (unsigned long *)pudp); |
| 518 | |
| 519 | return ret; |
| 520 | } |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 521 | #endif |
| 522 | |
Jeremy Fitzhardinge | c20311e | 2008-03-17 16:37:05 -0700 | [diff] [blame] | 523 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 524 | unsigned long address, pte_t *ptep) |
| 525 | { |
Shaohua Li | b13b1d2 | 2014-04-08 15:58:09 +0800 | [diff] [blame] | 526 | /* |
| 527 | * On x86 CPUs, clearing the accessed bit without a TLB flush |
| 528 | * doesn't cause data corruption. [ It could cause incorrect |
| 529 | * page aging and the (mistaken) reclaim of hot pages, but the |
| 530 | * chance of that should be relatively low. ] |
| 531 | * |
| 532 | * So as a performance optimization don't flush the TLB when |
| 533 | * clearing the accessed bit, it will eventually be flushed by |
| 534 | * a context switch or a VM operation anyway. [ In the rare |
| 535 | * event of it not getting flushed for a long time the delay |
| 536 | * shouldn't really matter because there's no real memory |
| 537 | * pressure for swapout to react to. ] |
| 538 | */ |
| 539 | return ptep_test_and_clear_young(vma, address, ptep); |
Jeremy Fitzhardinge | c20311e | 2008-03-17 16:37:05 -0700 | [diff] [blame] | 540 | } |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 541 | |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 542 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 543 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 544 | unsigned long address, pmd_t *pmdp) |
| 545 | { |
| 546 | int young; |
| 547 | |
| 548 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 549 | |
| 550 | young = pmdp_test_and_clear_young(vma, address, pmdp); |
| 551 | if (young) |
| 552 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
| 553 | |
| 554 | return young; |
| 555 | } |
Andrea Arcangeli | db3eb96f | 2011-01-13 15:46:41 -0800 | [diff] [blame] | 556 | #endif |
| 557 | |
Gustavo F. Padovan | fd862dd | 2009-02-15 21:48:54 -0300 | [diff] [blame] | 558 | /** |
| 559 | * reserve_top_address - reserves a hole in the top of kernel address space |
| 560 | * @reserve - size of hole to reserve |
| 561 | * |
| 562 | * Can be used to relocate the fixmap area and poke a hole in the top |
| 563 | * of kernel address space to make room for a hypervisor. |
| 564 | */ |
| 565 | void __init reserve_top_address(unsigned long reserve) |
| 566 | { |
| 567 | #ifdef CONFIG_X86_32 |
| 568 | BUG_ON(fixmaps_set > 0); |
Andy Lutomirski | 73159fd | 2014-05-05 12:19:31 -0700 | [diff] [blame] | 569 | __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; |
| 570 | printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", |
| 571 | -reserve, __FIXADDR_TOP + PAGE_SIZE); |
Gustavo F. Padovan | fd862dd | 2009-02-15 21:48:54 -0300 | [diff] [blame] | 572 | #endif |
| 573 | } |
| 574 | |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 575 | int fixmaps_set; |
| 576 | |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 577 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 578 | { |
| 579 | unsigned long address = __fix_to_virt(idx); |
| 580 | |
| 581 | if (idx >= __end_of_fixed_addresses) { |
| 582 | BUG(); |
| 583 | return; |
| 584 | } |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 585 | set_pte_vaddr(address, pte); |
Jeremy Fitzhardinge | 7c7e6e0 | 2008-06-17 11:41:54 -0700 | [diff] [blame] | 586 | fixmaps_set++; |
| 587 | } |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 588 | |
Masami Hiramatsu | 3b3809a | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 589 | void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, |
| 590 | pgprot_t flags) |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 591 | { |
Dave Hansen | fb43d6c | 2018-04-06 13:55:09 -0700 | [diff] [blame] | 592 | /* Sanitize 'prot' against any unsupported bits: */ |
| 593 | pgprot_val(flags) &= __default_kernel_pte_mask; |
| 594 | |
Jeremy Fitzhardinge | aeaaa59 | 2008-06-17 11:42:01 -0700 | [diff] [blame] | 595 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); |
| 596 | } |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 597 | |
| 598 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
Kirill A. Shutemov | b850405 | 2017-03-30 11:07:29 +0300 | [diff] [blame] | 599 | #ifdef CONFIG_X86_5LEVEL |
| 600 | /** |
| 601 | * p4d_set_huge - setup kernel P4D mapping |
| 602 | * |
| 603 | * No 512GB pages yet -- always return 0 |
| 604 | */ |
| 605 | int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) |
| 606 | { |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | /** |
| 611 | * p4d_clear_huge - clear kernel P4D mapping when it is set |
| 612 | * |
| 613 | * No 512GB pages yet -- always return 0 |
| 614 | */ |
| 615 | int p4d_clear_huge(p4d_t *p4d) |
| 616 | { |
| 617 | return 0; |
| 618 | } |
| 619 | #endif |
| 620 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 621 | /** |
| 622 | * pud_set_huge - setup kernel PUD mapping |
| 623 | * |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 624 | * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this |
| 625 | * function sets up a huge page only if any of the following conditions are met: |
| 626 | * |
| 627 | * - MTRRs are disabled, or |
| 628 | * |
| 629 | * - MTRRs are enabled and the range is completely covered by a single MTRR, or |
| 630 | * |
| 631 | * - MTRRs are enabled and the corresponding MTRR memory type is WB, which |
| 632 | * has no effect on the requested PAT memory type. |
| 633 | * |
| 634 | * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger |
| 635 | * page mapping attempt fails. |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 636 | * |
| 637 | * Returns 1 on success and 0 on failure. |
| 638 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 639 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
| 640 | { |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 641 | u8 mtrr, uniform; |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 642 | |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 643 | mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform); |
| 644 | if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && |
| 645 | (mtrr != MTRR_TYPE_WRBACK)) |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 646 | return 0; |
| 647 | |
Joerg Roedel | e3e2881 | 2018-04-11 17:24:38 +0200 | [diff] [blame] | 648 | /* Bail out if we are we on a populated non-leaf entry: */ |
| 649 | if (pud_present(*pud) && !pud_huge(*pud)) |
| 650 | return 0; |
| 651 | |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 652 | prot = pgprot_4k_2_large(prot); |
| 653 | |
| 654 | set_pte((pte_t *)pud, pfn_pte( |
| 655 | (u64)addr >> PAGE_SHIFT, |
| 656 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
| 657 | |
| 658 | return 1; |
| 659 | } |
| 660 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 661 | /** |
| 662 | * pmd_set_huge - setup kernel PMD mapping |
| 663 | * |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 664 | * See text over pud_set_huge() above. |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 665 | * |
| 666 | * Returns 1 on success and 0 on failure. |
| 667 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 668 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) |
| 669 | { |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 670 | u8 mtrr, uniform; |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 671 | |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 672 | mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform); |
| 673 | if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && |
| 674 | (mtrr != MTRR_TYPE_WRBACK)) { |
| 675 | pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n", |
| 676 | __func__, addr, addr + PMD_SIZE); |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 677 | return 0; |
Toshi Kani | b73522e | 2015-05-26 10:28:10 +0200 | [diff] [blame] | 678 | } |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 679 | |
Joerg Roedel | e3e2881 | 2018-04-11 17:24:38 +0200 | [diff] [blame] | 680 | /* Bail out if we are we on a populated non-leaf entry: */ |
| 681 | if (pmd_present(*pmd) && !pmd_huge(*pmd)) |
| 682 | return 0; |
| 683 | |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 684 | prot = pgprot_4k_2_large(prot); |
| 685 | |
| 686 | set_pte((pte_t *)pmd, pfn_pte( |
| 687 | (u64)addr >> PAGE_SHIFT, |
| 688 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
| 689 | |
| 690 | return 1; |
| 691 | } |
| 692 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 693 | /** |
| 694 | * pud_clear_huge - clear kernel PUD mapping when it is set |
| 695 | * |
| 696 | * Returns 1 on success and 0 on failure (no PUD map is found). |
| 697 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 698 | int pud_clear_huge(pud_t *pud) |
| 699 | { |
| 700 | if (pud_large(*pud)) { |
| 701 | pud_clear(pud); |
| 702 | return 1; |
| 703 | } |
| 704 | |
| 705 | return 0; |
| 706 | } |
| 707 | |
Toshi Kani | 3d3ca41 | 2015-05-26 10:28:07 +0200 | [diff] [blame] | 708 | /** |
| 709 | * pmd_clear_huge - clear kernel PMD mapping when it is set |
| 710 | * |
| 711 | * Returns 1 on success and 0 on failure (no PMD map is found). |
| 712 | */ |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 713 | int pmd_clear_huge(pmd_t *pmd) |
| 714 | { |
| 715 | if (pmd_large(*pmd)) { |
| 716 | pmd_clear(pmd); |
| 717 | return 1; |
| 718 | } |
| 719 | |
| 720 | return 0; |
| 721 | } |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 722 | |
| 723 | /** |
| 724 | * pud_free_pmd_page - Clear pud entry and free pmd page. |
| 725 | * @pud: Pointer to a PUD. |
| 726 | * |
| 727 | * Context: The pud range has been unmaped and TLB purged. |
| 728 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
| 729 | */ |
| 730 | int pud_free_pmd_page(pud_t *pud) |
| 731 | { |
Toshi Kani | 28ee90f | 2018-03-22 16:17:24 -0700 | [diff] [blame] | 732 | pmd_t *pmd; |
| 733 | int i; |
| 734 | |
| 735 | if (pud_none(*pud)) |
| 736 | return 1; |
| 737 | |
| 738 | pmd = (pmd_t *)pud_page_vaddr(*pud); |
| 739 | |
| 740 | for (i = 0; i < PTRS_PER_PMD; i++) |
| 741 | if (!pmd_free_pte_page(&pmd[i])) |
| 742 | return 0; |
| 743 | |
| 744 | pud_clear(pud); |
| 745 | free_page((unsigned long)pmd); |
| 746 | |
| 747 | return 1; |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | /** |
| 751 | * pmd_free_pte_page - Clear pmd entry and free pte page. |
| 752 | * @pmd: Pointer to a PMD. |
| 753 | * |
| 754 | * Context: The pmd range has been unmaped and TLB purged. |
| 755 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
| 756 | */ |
| 757 | int pmd_free_pte_page(pmd_t *pmd) |
| 758 | { |
Toshi Kani | 28ee90f | 2018-03-22 16:17:24 -0700 | [diff] [blame] | 759 | pte_t *pte; |
| 760 | |
| 761 | if (pmd_none(*pmd)) |
| 762 | return 1; |
| 763 | |
| 764 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
| 765 | pmd_clear(pmd); |
| 766 | free_page((unsigned long)pte); |
| 767 | |
| 768 | return 1; |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 769 | } |
Toshi Kani | 6b63783 | 2015-04-14 15:47:32 -0700 | [diff] [blame] | 770 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |