Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * SPARC64 Huge TLB page support. |
| 3 | * |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/fs.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/hugetlb.h> |
| 10 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/sysctl.h> |
| 12 | |
| 13 | #include <asm/mman.h> |
| 14 | #include <asm/pgalloc.h> |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 15 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/tlb.h> |
| 17 | #include <asm/tlbflush.h> |
| 18 | #include <asm/cacheflush.h> |
| 19 | #include <asm/mmu_context.h> |
| 20 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 21 | /* Slightly simplified from the non-hugepage variant because by |
| 22 | * definition we don't have to worry about any page coloring stuff |
| 23 | */ |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 24 | |
| 25 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, |
| 26 | unsigned long addr, |
| 27 | unsigned long len, |
| 28 | unsigned long pgoff, |
| 29 | unsigned long flags) |
| 30 | { |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 31 | unsigned long task_size = TASK_SIZE; |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 32 | struct vm_unmapped_area_info info; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 33 | |
| 34 | if (test_thread_flag(TIF_32BIT)) |
| 35 | task_size = STACK_TOP32; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 36 | |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 37 | info.flags = 0; |
| 38 | info.length = len; |
| 39 | info.low_limit = TASK_UNMAPPED_BASE; |
| 40 | info.high_limit = min(task_size, VA_EXCLUDE_START); |
| 41 | info.align_mask = PAGE_MASK & ~HPAGE_MASK; |
| 42 | info.align_offset = 0; |
| 43 | addr = vm_unmapped_area(&info); |
| 44 | |
| 45 | if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { |
| 46 | VM_BUG_ON(addr != -ENOMEM); |
| 47 | info.low_limit = VA_EXCLUDE_END; |
| 48 | info.high_limit = task_size; |
| 49 | addr = vm_unmapped_area(&info); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 50 | } |
| 51 | |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 52 | return addr; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static unsigned long |
| 56 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 57 | const unsigned long len, |
| 58 | const unsigned long pgoff, |
| 59 | const unsigned long flags) |
| 60 | { |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 61 | struct mm_struct *mm = current->mm; |
| 62 | unsigned long addr = addr0; |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 63 | struct vm_unmapped_area_info info; |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 64 | |
| 65 | /* This should only ever run for 32-bit processes. */ |
| 66 | BUG_ON(!test_thread_flag(TIF_32BIT)); |
| 67 | |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 68 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 69 | info.length = len; |
| 70 | info.low_limit = PAGE_SIZE; |
| 71 | info.high_limit = mm->mmap_base; |
| 72 | info.align_mask = PAGE_MASK & ~HPAGE_MASK; |
| 73 | info.align_offset = 0; |
| 74 | addr = vm_unmapped_area(&info); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 75 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 76 | /* |
| 77 | * A failed mmap() very likely causes application failure, |
| 78 | * so fall back to the bottom-up function here. This scenario |
| 79 | * can happen with large stack limits and large mmap() |
| 80 | * allocations. |
| 81 | */ |
Michel Lespinasse | 2aea28b | 2012-12-11 16:02:25 -0800 | [diff] [blame] | 82 | if (addr & ~PAGE_MASK) { |
| 83 | VM_BUG_ON(addr != -ENOMEM); |
| 84 | info.flags = 0; |
| 85 | info.low_limit = TASK_UNMAPPED_BASE; |
| 86 | info.high_limit = STACK_TOP32; |
| 87 | addr = vm_unmapped_area(&info); |
| 88 | } |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 89 | |
| 90 | return addr; |
| 91 | } |
| 92 | |
| 93 | unsigned long |
| 94 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 95 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 96 | { |
| 97 | struct mm_struct *mm = current->mm; |
| 98 | struct vm_area_struct *vma; |
| 99 | unsigned long task_size = TASK_SIZE; |
| 100 | |
| 101 | if (test_thread_flag(TIF_32BIT)) |
| 102 | task_size = STACK_TOP32; |
| 103 | |
| 104 | if (len & ~HPAGE_MASK) |
| 105 | return -EINVAL; |
| 106 | if (len > task_size) |
| 107 | return -ENOMEM; |
| 108 | |
Benjamin Herrenschmidt | ac35ee4 | 2007-05-06 14:50:10 -0700 | [diff] [blame] | 109 | if (flags & MAP_FIXED) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 110 | if (prepare_hugepage_range(file, addr, len)) |
Benjamin Herrenschmidt | ac35ee4 | 2007-05-06 14:50:10 -0700 | [diff] [blame] | 111 | return -EINVAL; |
| 112 | return addr; |
| 113 | } |
| 114 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 115 | if (addr) { |
| 116 | addr = ALIGN(addr, HPAGE_SIZE); |
| 117 | vma = find_vma(mm, addr); |
| 118 | if (task_size - len >= addr && |
| 119 | (!vma || addr + len <= vma->vm_start)) |
| 120 | return addr; |
| 121 | } |
| 122 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
| 123 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 124 | pgoff, flags); |
| 125 | else |
| 126 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 127 | pgoff, flags); |
| 128 | } |
| 129 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 130 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
| 131 | unsigned long addr, unsigned long sz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { |
| 133 | pgd_t *pgd; |
| 134 | pud_t *pud; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | pte_t *pte = NULL; |
| 136 | |
| 137 | pgd = pgd_offset(mm, addr); |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 138 | pud = pud_alloc(mm, pgd, addr); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 139 | if (pud) |
| 140 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | return pte; |
| 143 | } |
| 144 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 145 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { |
| 147 | pgd_t *pgd; |
| 148 | pud_t *pud; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | pte_t *pte = NULL; |
| 150 | |
| 151 | pgd = pgd_offset(mm, addr); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 152 | if (!pgd_none(*pgd)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | pud = pud_offset(pgd, addr); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 154 | if (!pud_none(*pud)) |
| 155 | pte = (pte_t *)pmd_offset(pud, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | } |
| 157 | return pte; |
| 158 | } |
| 159 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 160 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 161 | pte_t *ptep, pte_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | { |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 163 | pte_t orig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 165 | if (!pte_present(*ptep) && pte_present(entry)) |
Mike Kravetz | af1b1a9 | 2016-07-15 13:08:42 -0700 | [diff] [blame] | 166 | mm->context.hugetlb_pte_count++; |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 167 | |
David S. Miller | bb8236f | 2007-03-12 22:55:39 -0700 | [diff] [blame] | 168 | addr &= HPAGE_MASK; |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 169 | orig = *ptep; |
| 170 | *ptep = entry; |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 171 | |
| 172 | /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 173 | maybe_tlb_batch_add(mm, addr, ptep, orig, 0); |
| 174 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | } |
| 176 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 177 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 178 | pte_t *ptep) |
| 179 | { |
| 180 | pte_t entry; |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 181 | |
| 182 | entry = *ptep; |
David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 183 | if (pte_present(entry)) |
Mike Kravetz | af1b1a9 | 2016-07-15 13:08:42 -0700 | [diff] [blame] | 184 | mm->context.hugetlb_pte_count--; |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 185 | |
David S. Miller | bb8236f | 2007-03-12 22:55:39 -0700 | [diff] [blame] | 186 | addr &= HPAGE_MASK; |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 187 | *ptep = __pte(0UL); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 188 | |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 189 | /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 190 | maybe_tlb_batch_add(mm, addr, ptep, entry, 0); |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 191 | maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 192 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 193 | return entry; |
| 194 | } |
| 195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | int pmd_huge(pmd_t pmd) |
| 197 | { |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 198 | return !pmd_none(pmd) && |
| 199 | (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } |
| 201 | |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 202 | int pud_huge(pud_t pud) |
| 203 | { |
| 204 | return 0; |
| 205 | } |
Nitin Gupta | 7bc3777 | 2016-07-29 00:54:21 -0700 | [diff] [blame] | 206 | |
| 207 | static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, |
| 208 | unsigned long addr) |
| 209 | { |
| 210 | pgtable_t token = pmd_pgtable(*pmd); |
| 211 | |
| 212 | pmd_clear(pmd); |
| 213 | pte_free_tlb(tlb, token, addr); |
| 214 | atomic_long_dec(&tlb->mm->nr_ptes); |
| 215 | } |
| 216 | |
| 217 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
| 218 | unsigned long addr, unsigned long end, |
| 219 | unsigned long floor, unsigned long ceiling) |
| 220 | { |
| 221 | pmd_t *pmd; |
| 222 | unsigned long next; |
| 223 | unsigned long start; |
| 224 | |
| 225 | start = addr; |
| 226 | pmd = pmd_offset(pud, addr); |
| 227 | do { |
| 228 | next = pmd_addr_end(addr, end); |
| 229 | if (pmd_none(*pmd)) |
| 230 | continue; |
| 231 | if (is_hugetlb_pmd(*pmd)) |
| 232 | pmd_clear(pmd); |
| 233 | else |
| 234 | hugetlb_free_pte_range(tlb, pmd, addr); |
| 235 | } while (pmd++, addr = next, addr != end); |
| 236 | |
| 237 | start &= PUD_MASK; |
| 238 | if (start < floor) |
| 239 | return; |
| 240 | if (ceiling) { |
| 241 | ceiling &= PUD_MASK; |
| 242 | if (!ceiling) |
| 243 | return; |
| 244 | } |
| 245 | if (end - 1 > ceiling - 1) |
| 246 | return; |
| 247 | |
| 248 | pmd = pmd_offset(pud, start); |
| 249 | pud_clear(pud); |
| 250 | pmd_free_tlb(tlb, pmd, start); |
| 251 | mm_dec_nr_pmds(tlb->mm); |
| 252 | } |
| 253 | |
| 254 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
| 255 | unsigned long addr, unsigned long end, |
| 256 | unsigned long floor, unsigned long ceiling) |
| 257 | { |
| 258 | pud_t *pud; |
| 259 | unsigned long next; |
| 260 | unsigned long start; |
| 261 | |
| 262 | start = addr; |
| 263 | pud = pud_offset(pgd, addr); |
| 264 | do { |
| 265 | next = pud_addr_end(addr, end); |
| 266 | if (pud_none_or_clear_bad(pud)) |
| 267 | continue; |
| 268 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
| 269 | ceiling); |
| 270 | } while (pud++, addr = next, addr != end); |
| 271 | |
| 272 | start &= PGDIR_MASK; |
| 273 | if (start < floor) |
| 274 | return; |
| 275 | if (ceiling) { |
| 276 | ceiling &= PGDIR_MASK; |
| 277 | if (!ceiling) |
| 278 | return; |
| 279 | } |
| 280 | if (end - 1 > ceiling - 1) |
| 281 | return; |
| 282 | |
| 283 | pud = pud_offset(pgd, start); |
| 284 | pgd_clear(pgd); |
| 285 | pud_free_tlb(tlb, pud, start); |
| 286 | } |
| 287 | |
| 288 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
| 289 | unsigned long addr, unsigned long end, |
| 290 | unsigned long floor, unsigned long ceiling) |
| 291 | { |
| 292 | pgd_t *pgd; |
| 293 | unsigned long next; |
| 294 | |
| 295 | pgd = pgd_offset(tlb->mm, addr); |
| 296 | do { |
| 297 | next = pgd_addr_end(addr, end); |
| 298 | if (pgd_none_or_clear_bad(pgd)) |
| 299 | continue; |
| 300 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
| 301 | } while (pgd++, addr = next, addr != end); |
| 302 | } |