Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* arch/sparc64/mm/tlb.c |
| 2 | * |
| 3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/percpu.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/swap.h> |
David S. Miller | c9f2946 | 2006-04-30 22:54:27 -0700 | [diff] [blame] | 10 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | #include <asm/pgtable.h> |
| 13 | #include <asm/pgalloc.h> |
| 14 | #include <asm/tlbflush.h> |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/mmu_context.h> |
| 17 | #include <asm/tlb.h> |
| 18 | |
| 19 | /* Heavily inspired by the ppc64 code. */ |
| 20 | |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 21 | static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | void flush_tlb_pending(void) |
| 24 | { |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 25 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 26 | struct mm_struct *mm = tb->mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 28 | if (!tb->tlb_nr) |
| 29 | goto out; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 30 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 31 | flush_tsb_user(tb); |
| 32 | |
| 33 | if (CTX_VALID(mm->context)) { |
| 34 | if (tb->tlb_nr == 1) { |
| 35 | global_flush_tlb_page(mm, tb->vaddrs[0]); |
| 36 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #ifdef CONFIG_SMP |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 38 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
| 39 | &tb->vaddrs[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #else |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 41 | __flush_tlb_pending(CTX_HWBITS(tb->mm->context), |
| 42 | tb->tlb_nr, &tb->vaddrs[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #endif |
| 44 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | } |
David S. Miller | c9f2946 | 2006-04-30 22:54:27 -0700 | [diff] [blame] | 46 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 47 | tb->tlb_nr = 0; |
| 48 | |
| 49 | out: |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 50 | put_cpu_var(tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | } |
| 52 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 53 | void arch_enter_lazy_mmu_mode(void) |
| 54 | { |
Christoph Lameter | 494fc42 | 2014-08-17 12:30:54 -0500 | [diff] [blame] | 55 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 56 | |
| 57 | tb->active = 1; |
| 58 | } |
| 59 | |
| 60 | void arch_leave_lazy_mmu_mode(void) |
| 61 | { |
Christoph Lameter | 494fc42 | 2014-08-17 12:30:54 -0500 | [diff] [blame] | 62 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 63 | |
| 64 | if (tb->tlb_nr) |
| 65 | flush_tlb_pending(); |
| 66 | tb->active = 0; |
| 67 | } |
| 68 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 69 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 70 | bool exec, unsigned int hugepage_shift) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | { |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 72 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | unsigned long nr; |
| 74 | |
| 75 | vaddr &= PAGE_MASK; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 76 | if (exec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | vaddr |= 0x1UL; |
| 78 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 79 | nr = tb->tlb_nr; |
| 80 | |
| 81 | if (unlikely(nr != 0 && mm != tb->mm)) { |
| 82 | flush_tlb_pending(); |
| 83 | nr = 0; |
| 84 | } |
| 85 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 86 | if (!tb->active) { |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 87 | flush_tsb_user_page(mm, vaddr, hugepage_shift); |
Dave Kleikamp | 23a0113 | 2013-06-18 09:05:36 -0500 | [diff] [blame] | 88 | global_flush_tlb_page(mm, vaddr); |
David S. Miller | f0af970 | 2013-04-24 16:52:18 -0700 | [diff] [blame] | 89 | goto out; |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 90 | } |
| 91 | |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 92 | if (nr == 0) { |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 93 | tb->mm = mm; |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 94 | tb->hugepage_shift = hugepage_shift; |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 95 | } |
| 96 | |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 97 | if (tb->hugepage_shift != hugepage_shift) { |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 98 | flush_tlb_pending(); |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 99 | tb->hugepage_shift = hugepage_shift; |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 100 | nr = 0; |
| 101 | } |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 102 | |
| 103 | tb->vaddrs[nr] = vaddr; |
| 104 | tb->tlb_nr = ++nr; |
| 105 | if (nr >= TLB_BATCH_NR) |
| 106 | flush_tlb_pending(); |
| 107 | |
David S. Miller | f0af970 | 2013-04-24 16:52:18 -0700 | [diff] [blame] | 108 | out: |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 109 | put_cpu_var(tlb_batch); |
| 110 | } |
| 111 | |
| 112 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 113 | pte_t *ptep, pte_t orig, int fullmm, |
| 114 | unsigned int hugepage_shift) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 115 | { |
David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 116 | if (tlb_type != hypervisor && |
| 117 | pte_dirty(orig)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | unsigned long paddr, pfn = pte_pfn(orig); |
| 119 | struct address_space *mapping; |
| 120 | struct page *page; |
| 121 | |
| 122 | if (!pfn_valid(pfn)) |
| 123 | goto no_cache_flush; |
| 124 | |
| 125 | page = pfn_to_page(pfn); |
| 126 | if (PageReserved(page)) |
| 127 | goto no_cache_flush; |
| 128 | |
| 129 | /* A real file page? */ |
| 130 | mapping = page_mapping(page); |
| 131 | if (!mapping) |
| 132 | goto no_cache_flush; |
| 133 | |
| 134 | paddr = (unsigned long) page_address(page); |
| 135 | if ((paddr ^ vaddr) & (1 << 13)) |
| 136 | flush_dcache_page_all(mm, page); |
| 137 | } |
| 138 | |
| 139 | no_cache_flush: |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 140 | if (!fullmm) |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 141 | tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | } |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 143 | |
| 144 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 145 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, |
David S. Miller | 5b1e94f | 2014-04-20 21:55:01 -0400 | [diff] [blame] | 146 | pmd_t pmd) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 147 | { |
| 148 | unsigned long end; |
| 149 | pte_t *pte; |
| 150 | |
| 151 | pte = pte_offset_map(&pmd, vaddr); |
| 152 | end = vaddr + HPAGE_SIZE; |
| 153 | while (vaddr < end) { |
David S. Miller | 5b1e94f | 2014-04-20 21:55:01 -0400 | [diff] [blame] | 154 | if (pte_val(*pte) & _PAGE_VALID) { |
| 155 | bool exec = pte_exec(*pte); |
| 156 | |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 157 | tlb_batch_add_one(mm, vaddr, exec, false); |
David S. Miller | 5b1e94f | 2014-04-20 21:55:01 -0400 | [diff] [blame] | 158 | } |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 159 | pte++; |
| 160 | vaddr += PAGE_SIZE; |
| 161 | } |
| 162 | pte_unmap(pte); |
| 163 | } |
| 164 | |
| 165 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 166 | pmd_t *pmdp, pmd_t pmd) |
| 167 | { |
| 168 | pmd_t orig = *pmdp; |
| 169 | |
| 170 | *pmdp = pmd; |
| 171 | |
| 172 | if (mm == &init_mm) |
| 173 | return; |
| 174 | |
David S. Miller | a7b9403 | 2013-09-26 13:45:15 -0700 | [diff] [blame] | 175 | if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { |
Mike Kravetz | 1e953d8 | 2016-08-31 13:48:19 -0700 | [diff] [blame] | 176 | /* |
| 177 | * Note that this routine only sets pmds for THP pages. |
| 178 | * Hugetlb pages are handled elsewhere. We need to check |
| 179 | * for huge zero page. Huge zero pages are like hugetlb |
| 180 | * pages in that there is no RSS, but there is the need |
| 181 | * for TSB entries. So, huge zero page counts go into |
| 182 | * hugetlb_pte_count. |
| 183 | */ |
| 184 | if (pmd_val(pmd) & _PAGE_PMD_HUGE) { |
| 185 | if (is_huge_zero_page(pmd_page(pmd))) |
| 186 | mm->context.hugetlb_pte_count++; |
| 187 | else |
| 188 | mm->context.thp_pte_count++; |
| 189 | } else { |
| 190 | if (is_huge_zero_page(pmd_page(orig))) |
| 191 | mm->context.hugetlb_pte_count--; |
| 192 | else |
| 193 | mm->context.thp_pte_count--; |
| 194 | } |
David S. Miller | 0fbebed | 2013-02-19 22:34:10 -0800 | [diff] [blame] | 195 | |
| 196 | /* Do not try to allocate the TSB hash table if we |
| 197 | * don't have one already. We have various locks held |
| 198 | * and thus we'll end up doing a GFP_KERNEL allocation |
| 199 | * in an atomic context. |
| 200 | * |
| 201 | * Instead, we let the first TLB miss on a hugepage |
| 202 | * take care of this. |
| 203 | */ |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | if (!pmd_none(orig)) { |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 207 | addr &= HPAGE_MASK; |
David S. Miller | a7b9403 | 2013-09-26 13:45:15 -0700 | [diff] [blame] | 208 | if (pmd_trans_huge(orig)) { |
David S. Miller | 5b1e94f | 2014-04-20 21:55:01 -0400 | [diff] [blame] | 209 | pte_t orig_pte = __pte(pmd_val(orig)); |
| 210 | bool exec = pte_exec(orig_pte); |
| 211 | |
Nitin Gupta | 24e49ee | 2016-03-30 11:17:13 -0700 | [diff] [blame] | 212 | tlb_batch_add_one(mm, addr, exec, true); |
| 213 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, |
| 214 | true); |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 215 | } else { |
David S. Miller | 5b1e94f | 2014-04-20 21:55:01 -0400 | [diff] [blame] | 216 | tlb_batch_pmd_scan(mm, addr, orig); |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 217 | } |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 218 | } |
| 219 | } |
| 220 | |
Mike Kravetz | 1e953d8 | 2016-08-31 13:48:19 -0700 | [diff] [blame] | 221 | /* |
| 222 | * This routine is only called when splitting a THP |
| 223 | */ |
David S. Miller | 51e5ef1 | 2014-04-24 13:58:02 -0700 | [diff] [blame] | 224 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 225 | pmd_t *pmdp) |
| 226 | { |
| 227 | pmd_t entry = *pmdp; |
| 228 | |
| 229 | pmd_val(entry) &= ~_PAGE_VALID; |
| 230 | |
| 231 | set_pmd_at(vma->vm_mm, address, pmdp, entry); |
| 232 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Mike Kravetz | 1e953d8 | 2016-08-31 13:48:19 -0700 | [diff] [blame] | 233 | |
| 234 | /* |
| 235 | * set_pmd_at() will not be called in a way to decrement |
| 236 | * thp_pte_count when splitting a THP, so do it now. |
| 237 | * Sanity check pmd before doing the actual decrement. |
| 238 | */ |
| 239 | if ((pmd_val(entry) & _PAGE_PMD_HUGE) && |
| 240 | !is_huge_zero_page(pmd_page(entry))) |
| 241 | (vma->vm_mm)->context.thp_pte_count--; |
David S. Miller | 51e5ef1 | 2014-04-24 13:58:02 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 244 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 245 | pgtable_t pgtable) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 246 | { |
| 247 | struct list_head *lh = (struct list_head *) pgtable; |
| 248 | |
| 249 | assert_spin_locked(&mm->page_table_lock); |
| 250 | |
| 251 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 252 | if (!pmd_huge_pte(mm, pmdp)) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 253 | INIT_LIST_HEAD(lh); |
| 254 | else |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 255 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 256 | pmd_huge_pte(mm, pmdp) = pgtable; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 257 | } |
| 258 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 259 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 260 | { |
| 261 | struct list_head *lh; |
| 262 | pgtable_t pgtable; |
| 263 | |
| 264 | assert_spin_locked(&mm->page_table_lock); |
| 265 | |
| 266 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 267 | pgtable = pmd_huge_pte(mm, pmdp); |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 268 | lh = (struct list_head *) pgtable; |
| 269 | if (list_empty(lh)) |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 270 | pmd_huge_pte(mm, pmdp) = NULL; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 271 | else { |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 272 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 273 | list_del(lh); |
| 274 | } |
| 275 | pte_val(pgtable[0]) = 0; |
| 276 | pte_val(pgtable[1]) = 0; |
| 277 | |
| 278 | return pgtable; |
| 279 | } |
| 280 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |