Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 1 | /* |
Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | * arch/xtensa/mm/tlb.c |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 3 | * |
| 4 | * Logic that manipulates the Xtensa MMU. Derived from MIPS. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | * |
| 10 | * Copyright (C) 2001 - 2003 Tensilica Inc. |
| 11 | * |
| 12 | * Joe Taylor |
| 13 | * Chris Zankel <chris@zankel.net> |
| 14 | * Marc Gauthier |
| 15 | */ |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <asm/processor.h> |
| 19 | #include <asm/mmu_context.h> |
| 20 | #include <asm/tlbflush.h> |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 21 | #include <asm/cacheflush.h> |
| 22 | |
| 23 | |
| 24 | static inline void __flush_itlb_all (void) |
| 25 | { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 26 | int w, i; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 27 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 28 | for (w = 0; w < ITLB_ARF_WAYS; w++) { |
| 29 | for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) { |
| 30 | int e = w + (i << PAGE_SHIFT); |
| 31 | invalidate_itlb_entry_no_isync(e); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 32 | } |
| 33 | } |
| 34 | asm volatile ("isync\n"); |
| 35 | } |
| 36 | |
| 37 | static inline void __flush_dtlb_all (void) |
| 38 | { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 39 | int w, i; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 40 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 41 | for (w = 0; w < DTLB_ARF_WAYS; w++) { |
| 42 | for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) { |
| 43 | int e = w + (i << PAGE_SHIFT); |
| 44 | invalidate_dtlb_entry_no_isync(e); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 45 | } |
| 46 | } |
| 47 | asm volatile ("isync\n"); |
| 48 | } |
| 49 | |
| 50 | |
| 51 | void flush_tlb_all (void) |
| 52 | { |
| 53 | __flush_itlb_all(); |
| 54 | __flush_dtlb_all(); |
| 55 | } |
| 56 | |
| 57 | /* If mm is current, we simply assign the current task a new ASID, thus, |
| 58 | * invalidating all previous tlb entries. If mm is someone else's user mapping, |
| 59 | * wie invalidate the context, thus, when that user mapping is swapped in, |
| 60 | * a new context will be assigned to it. |
| 61 | */ |
| 62 | |
| 63 | void flush_tlb_mm(struct mm_struct *mm) |
| 64 | { |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 65 | if (mm == current->active_mm) { |
Max Filippov | 382cb5b | 2012-11-05 07:44:03 +0400 | [diff] [blame] | 66 | unsigned long flags; |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 67 | local_irq_save(flags); |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 68 | __get_new_mmu_context(mm); |
| 69 | __load_mmu_context(mm); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 70 | local_irq_restore(flags); |
| 71 | } |
| 72 | else |
| 73 | mm->context = 0; |
| 74 | } |
| 75 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 76 | #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) |
| 77 | #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) |
| 78 | #if _ITLB_ENTRIES > _DTLB_ENTRIES |
| 79 | # define _TLB_ENTRIES _ITLB_ENTRIES |
| 80 | #else |
| 81 | # define _TLB_ENTRIES _DTLB_ENTRIES |
| 82 | #endif |
| 83 | |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 84 | void flush_tlb_range (struct vm_area_struct *vma, |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 85 | unsigned long start, unsigned long end) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 86 | { |
| 87 | struct mm_struct *mm = vma->vm_mm; |
| 88 | unsigned long flags; |
| 89 | |
| 90 | if (mm->context == NO_CONTEXT) |
| 91 | return; |
| 92 | |
| 93 | #if 0 |
| 94 | printk("[tlbrange<%02lx,%08lx,%08lx>]\n", |
| 95 | (unsigned long)mm->context, start, end); |
| 96 | #endif |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 97 | local_irq_save(flags); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 98 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 99 | if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 100 | int oldpid = get_rasid_register(); |
| 101 | set_rasid_register (ASID_INSERT(mm->context)); |
| 102 | start &= PAGE_MASK; |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 103 | if (vma->vm_flags & VM_EXEC) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 104 | while(start < end) { |
| 105 | invalidate_itlb_mapping(start); |
| 106 | invalidate_dtlb_mapping(start); |
| 107 | start += PAGE_SIZE; |
| 108 | } |
| 109 | else |
| 110 | while(start < end) { |
| 111 | invalidate_dtlb_mapping(start); |
| 112 | start += PAGE_SIZE; |
| 113 | } |
| 114 | |
| 115 | set_rasid_register(oldpid); |
| 116 | } else { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 117 | flush_tlb_mm(mm); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 118 | } |
| 119 | local_irq_restore(flags); |
| 120 | } |
| 121 | |
| 122 | void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) |
| 123 | { |
| 124 | struct mm_struct* mm = vma->vm_mm; |
| 125 | unsigned long flags; |
| 126 | int oldpid; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 127 | |
| 128 | if(mm->context == NO_CONTEXT) |
| 129 | return; |
| 130 | |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 131 | local_irq_save(flags); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 132 | |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 133 | oldpid = get_rasid_register(); |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 134 | set_rasid_register(ASID_INSERT(mm->context)); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 135 | |
| 136 | if (vma->vm_flags & VM_EXEC) |
| 137 | invalidate_itlb_mapping(page); |
| 138 | invalidate_dtlb_mapping(page); |
| 139 | |
| 140 | set_rasid_register(oldpid); |
| 141 | |
| 142 | local_irq_restore(flags); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 143 | } |
Max Filippov | a99e07e | 2013-05-15 19:34:05 +0400 | [diff] [blame] | 144 | |
| 145 | #ifdef CONFIG_DEBUG_TLB_SANITY |
| 146 | |
| 147 | static unsigned get_pte_for_vaddr(unsigned vaddr) |
| 148 | { |
| 149 | struct task_struct *task = get_current(); |
| 150 | struct mm_struct *mm = task->mm; |
| 151 | pgd_t *pgd; |
| 152 | pmd_t *pmd; |
| 153 | pte_t *pte; |
| 154 | |
| 155 | if (!mm) |
| 156 | mm = task->active_mm; |
| 157 | pgd = pgd_offset(mm, vaddr); |
| 158 | if (pgd_none_or_clear_bad(pgd)) |
| 159 | return 0; |
| 160 | pmd = pmd_offset(pgd, vaddr); |
| 161 | if (pmd_none_or_clear_bad(pmd)) |
| 162 | return 0; |
| 163 | pte = pte_offset_map(pmd, vaddr); |
| 164 | if (!pte) |
| 165 | return 0; |
| 166 | return pte_val(*pte); |
| 167 | } |
| 168 | |
| 169 | enum { |
| 170 | TLB_SUSPICIOUS = 1, |
| 171 | TLB_INSANE = 2, |
| 172 | }; |
| 173 | |
| 174 | static void tlb_insane(void) |
| 175 | { |
| 176 | BUG_ON(1); |
| 177 | } |
| 178 | |
| 179 | static void tlb_suspicious(void) |
| 180 | { |
| 181 | WARN_ON(1); |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), |
| 186 | * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. |
| 187 | * |
| 188 | * Check that valid TLB entries either have the same PA as the PTE, or PTE is |
| 189 | * marked as non-present. Non-present PTE and the page with non-zero refcount |
| 190 | * and zero mapcount is normal for batched TLB flush operation. Zero refcount |
| 191 | * means that the page was freed prematurely. Non-zero mapcount is unusual, |
| 192 | * but does not necessary means an error, thus marked as suspicious. |
| 193 | */ |
| 194 | static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) |
| 195 | { |
| 196 | unsigned tlbidx = w | (e << PAGE_SHIFT); |
| 197 | unsigned r0 = dtlb ? |
| 198 | read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); |
| 199 | unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); |
| 200 | unsigned pte = get_pte_for_vaddr(vpn); |
| 201 | unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; |
| 202 | unsigned tlb_asid = r0 & ASID_MASK; |
| 203 | bool kernel = tlb_asid == 1; |
| 204 | int rc = 0; |
| 205 | |
| 206 | if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { |
| 207 | pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", |
| 208 | dtlb ? 'D' : 'I', w, e, vpn, |
| 209 | kernel ? "kernel" : "user"); |
| 210 | rc |= TLB_INSANE; |
| 211 | } |
| 212 | |
| 213 | if (tlb_asid == mm_asid) { |
| 214 | unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : |
| 215 | read_itlb_translation(tlbidx); |
| 216 | if ((pte ^ r1) & PAGE_MASK) { |
| 217 | pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", |
| 218 | dtlb ? 'D' : 'I', w, e, r0, r1, pte); |
| 219 | if (pte == 0 || !pte_present(__pte(pte))) { |
| 220 | struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); |
| 221 | pr_err("page refcount: %d, mapcount: %d\n", |
| 222 | page_count(p), |
| 223 | page_mapcount(p)); |
| 224 | if (!page_count(p)) |
| 225 | rc |= TLB_INSANE; |
| 226 | else if (page_mapped(p)) |
| 227 | rc |= TLB_SUSPICIOUS; |
| 228 | } else { |
| 229 | rc |= TLB_INSANE; |
| 230 | } |
| 231 | } |
| 232 | } |
| 233 | return rc; |
| 234 | } |
| 235 | |
| 236 | void check_tlb_sanity(void) |
| 237 | { |
| 238 | unsigned long flags; |
| 239 | unsigned w, e; |
| 240 | int bug = 0; |
| 241 | |
| 242 | local_irq_save(flags); |
| 243 | for (w = 0; w < DTLB_ARF_WAYS; ++w) |
| 244 | for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) |
| 245 | bug |= check_tlb_entry(w, e, true); |
| 246 | for (w = 0; w < ITLB_ARF_WAYS; ++w) |
| 247 | for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) |
| 248 | bug |= check_tlb_entry(w, e, false); |
| 249 | if (bug & TLB_INSANE) |
| 250 | tlb_insane(); |
| 251 | if (bug & TLB_SUSPICIOUS) |
| 252 | tlb_suspicious(); |
| 253 | local_irq_restore(flags); |
| 254 | } |
| 255 | |
| 256 | #endif /* CONFIG_DEBUG_TLB_SANITY */ |