Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 1 | /* |
| 2 | * TLB flush routines for radix kernels. |
| 3 | * |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/hugetlb.h> |
| 14 | #include <linux/memblock.h> |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 15 | #include <asm/ppc-opcode.h> |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 16 | |
| 17 | #include <asm/tlb.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | |
| 20 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
| 21 | |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 22 | #define RIC_FLUSH_TLB 0 |
| 23 | #define RIC_FLUSH_PWC 1 |
| 24 | #define RIC_FLUSH_ALL 2 |
| 25 | |
| 26 | static inline void __tlbiel_pid(unsigned long pid, int set, |
| 27 | unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 28 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 29 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 30 | |
| 31 | rb = PPC_BIT(53); /* IS = 1 */ |
| 32 | rb |= set << PPC_BITLSHIFT(51); |
| 33 | rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); |
| 34 | prs = 1; /* process scoped */ |
| 35 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 36 | |
| 37 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 38 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 39 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 40 | asm volatile("ptesync": : :"memory"); |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * We use 128 set in radix mode and 256 set in hpt mode. |
| 45 | */ |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 46 | static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 47 | { |
| 48 | int set; |
| 49 | |
| 50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 51 | __tlbiel_pid(pid, set, ric); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 52 | } |
Michael Neuling | 96ed1fe | 2016-11-18 14:08:56 +1100 | [diff] [blame] | 53 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) |
| 54 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 55 | return; |
| 56 | } |
| 57 | |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 58 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 59 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 60 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 61 | |
| 62 | rb = PPC_BIT(53); /* IS = 1 */ |
| 63 | rs = pid << PPC_BITLSHIFT(31); |
| 64 | prs = 1; /* process scoped */ |
| 65 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 66 | |
| 67 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 68 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 69 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 70 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 71 | } |
| 72 | |
| 73 | static inline void _tlbiel_va(unsigned long va, unsigned long pid, |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 74 | unsigned long ap, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 75 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 76 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 77 | |
| 78 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 79 | rb |= ap << PPC_BITLSHIFT(58); |
| 80 | rs = pid << PPC_BITLSHIFT(31); |
| 81 | prs = 1; /* process scoped */ |
| 82 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 83 | |
| 84 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 85 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 86 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 87 | asm volatile("ptesync": : :"memory"); |
Michael Neuling | 96ed1fe | 2016-11-18 14:08:56 +1100 | [diff] [blame] | 88 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) |
| 89 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static inline void _tlbie_va(unsigned long va, unsigned long pid, |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 93 | unsigned long ap, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 94 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 95 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 96 | |
| 97 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 98 | rb |= ap << PPC_BITLSHIFT(58); |
| 99 | rs = pid << PPC_BITLSHIFT(31); |
| 100 | prs = 1; /* process scoped */ |
| 101 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 102 | |
| 103 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 104 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 105 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 106 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 107 | } |
| 108 | |
| 109 | /* |
| 110 | * Base TLB flushing operations: |
| 111 | * |
| 112 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 113 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 114 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 115 | * - flush_tlb_kernel_range(start, end) flushes kernel pages |
| 116 | * |
| 117 | * - local_* variants of page and mm only apply to the current |
| 118 | * processor |
| 119 | */ |
| 120 | void radix__local_flush_tlb_mm(struct mm_struct *mm) |
| 121 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 122 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 123 | |
| 124 | preempt_disable(); |
| 125 | pid = mm->context.id; |
| 126 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 127 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 128 | preempt_enable(); |
| 129 | } |
| 130 | EXPORT_SYMBOL(radix__local_flush_tlb_mm); |
| 131 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 132 | void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
| 133 | { |
| 134 | unsigned long pid; |
| 135 | struct mm_struct *mm = tlb->mm; |
| 136 | |
| 137 | preempt_disable(); |
| 138 | |
| 139 | pid = mm->context.id; |
| 140 | if (pid != MMU_NO_CONTEXT) |
| 141 | _tlbiel_pid(pid, RIC_FLUSH_PWC); |
| 142 | |
| 143 | preempt_enable(); |
| 144 | } |
| 145 | EXPORT_SYMBOL(radix__local_flush_tlb_pwc); |
| 146 | |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 147 | void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 148 | int psize) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 149 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 150 | unsigned long pid; |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 151 | unsigned long ap = mmu_get_ap(psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 152 | |
| 153 | preempt_disable(); |
| 154 | pid = mm ? mm->context.id : 0; |
| 155 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 156 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 157 | preempt_enable(); |
| 158 | } |
| 159 | |
| 160 | void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 161 | { |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 162 | #ifdef CONFIG_HUGETLB_PAGE |
| 163 | /* need the return fix for nohash.c */ |
| 164 | if (vma && is_vm_hugetlb_page(vma)) |
| 165 | return __local_flush_hugetlb_page(vma, vmaddr); |
| 166 | #endif |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 167 | radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 168 | mmu_virtual_psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 169 | } |
| 170 | EXPORT_SYMBOL(radix__local_flush_tlb_page); |
| 171 | |
| 172 | #ifdef CONFIG_SMP |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 173 | void radix__flush_tlb_mm(struct mm_struct *mm) |
| 174 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 175 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 176 | |
| 177 | preempt_disable(); |
| 178 | pid = mm->context.id; |
| 179 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 180 | goto no_context; |
| 181 | |
Aneesh Kumar K.V | bd77c44 | 2016-10-24 08:50:43 +0530 | [diff] [blame] | 182 | if (!mm_is_thread_local(mm)) { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 183 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 184 | |
| 185 | if (lock_tlbie) |
| 186 | raw_spin_lock(&native_tlbie_lock); |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 187 | _tlbie_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 188 | if (lock_tlbie) |
| 189 | raw_spin_unlock(&native_tlbie_lock); |
| 190 | } else |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 191 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 192 | no_context: |
| 193 | preempt_enable(); |
| 194 | } |
| 195 | EXPORT_SYMBOL(radix__flush_tlb_mm); |
| 196 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 197 | void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
| 198 | { |
| 199 | unsigned long pid; |
| 200 | struct mm_struct *mm = tlb->mm; |
| 201 | |
| 202 | preempt_disable(); |
| 203 | |
| 204 | pid = mm->context.id; |
| 205 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 206 | goto no_context; |
| 207 | |
Aneesh Kumar K.V | bd77c44 | 2016-10-24 08:50:43 +0530 | [diff] [blame] | 208 | if (!mm_is_thread_local(mm)) { |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 209 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 210 | |
| 211 | if (lock_tlbie) |
| 212 | raw_spin_lock(&native_tlbie_lock); |
| 213 | _tlbie_pid(pid, RIC_FLUSH_PWC); |
| 214 | if (lock_tlbie) |
| 215 | raw_spin_unlock(&native_tlbie_lock); |
| 216 | } else |
| 217 | _tlbiel_pid(pid, RIC_FLUSH_PWC); |
| 218 | no_context: |
| 219 | preempt_enable(); |
| 220 | } |
| 221 | EXPORT_SYMBOL(radix__flush_tlb_pwc); |
| 222 | |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 223 | void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 224 | int psize) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 225 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 226 | unsigned long pid; |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 227 | unsigned long ap = mmu_get_ap(psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 228 | |
| 229 | preempt_disable(); |
| 230 | pid = mm ? mm->context.id : 0; |
| 231 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 232 | goto bail; |
Aneesh Kumar K.V | bd77c44 | 2016-10-24 08:50:43 +0530 | [diff] [blame] | 233 | if (!mm_is_thread_local(mm)) { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 234 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 235 | |
| 236 | if (lock_tlbie) |
| 237 | raw_spin_lock(&native_tlbie_lock); |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 238 | _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 239 | if (lock_tlbie) |
| 240 | raw_spin_unlock(&native_tlbie_lock); |
| 241 | } else |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 242 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 243 | bail: |
| 244 | preempt_enable(); |
| 245 | } |
| 246 | |
| 247 | void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 248 | { |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 249 | #ifdef CONFIG_HUGETLB_PAGE |
| 250 | if (vma && is_vm_hugetlb_page(vma)) |
| 251 | return flush_hugetlb_page(vma, vmaddr); |
| 252 | #endif |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 253 | radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 254 | mmu_virtual_psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 255 | } |
| 256 | EXPORT_SYMBOL(radix__flush_tlb_page); |
| 257 | |
| 258 | #endif /* CONFIG_SMP */ |
| 259 | |
| 260 | void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 261 | { |
| 262 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 263 | |
| 264 | if (lock_tlbie) |
| 265 | raw_spin_lock(&native_tlbie_lock); |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 266 | _tlbie_pid(0, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 267 | if (lock_tlbie) |
| 268 | raw_spin_unlock(&native_tlbie_lock); |
| 269 | } |
| 270 | EXPORT_SYMBOL(radix__flush_tlb_kernel_range); |
| 271 | |
| 272 | /* |
| 273 | * Currently, for range flushing, we just do a full mm flush. Because |
| 274 | * we use this in code path where we don' track the page size. |
| 275 | */ |
| 276 | void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 277 | unsigned long end) |
| 278 | |
| 279 | { |
| 280 | struct mm_struct *mm = vma->vm_mm; |
| 281 | radix__flush_tlb_mm(mm); |
| 282 | } |
| 283 | EXPORT_SYMBOL(radix__flush_tlb_range); |
| 284 | |
Aneesh Kumar K.V | 912cc87 | 2016-07-13 15:05:29 +0530 | [diff] [blame] | 285 | static int radix_get_mmu_psize(int page_size) |
| 286 | { |
| 287 | int psize; |
| 288 | |
| 289 | if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift)) |
| 290 | psize = mmu_virtual_psize; |
| 291 | else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift)) |
| 292 | psize = MMU_PAGE_2M; |
| 293 | else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift)) |
| 294 | psize = MMU_PAGE_1G; |
| 295 | else |
| 296 | return -1; |
| 297 | return psize; |
| 298 | } |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 299 | |
| 300 | void radix__tlb_flush(struct mmu_gather *tlb) |
| 301 | { |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 302 | int psize = 0; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 303 | struct mm_struct *mm = tlb->mm; |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 304 | int page_size = tlb->page_size; |
| 305 | |
| 306 | psize = radix_get_mmu_psize(page_size); |
| 307 | /* |
| 308 | * if page size is not something we understand, do a full mm flush |
| 309 | */ |
| 310 | if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all) |
| 311 | radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize); |
| 312 | else |
| 313 | radix__flush_tlb_mm(mm); |
| 314 | } |
| 315 | |
| 316 | #define TLB_FLUSH_ALL -1UL |
| 317 | /* |
| 318 | * Number of pages above which we will do a bcast tlbie. Just a |
| 319 | * number at this point copied from x86 |
| 320 | */ |
| 321 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
| 322 | |
| 323 | void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, |
| 324 | unsigned long end, int psize) |
| 325 | { |
| 326 | unsigned long pid; |
| 327 | unsigned long addr; |
Aneesh Kumar K.V | bd77c44 | 2016-10-24 08:50:43 +0530 | [diff] [blame] | 328 | int local = mm_is_thread_local(mm); |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 329 | unsigned long ap = mmu_get_ap(psize); |
| 330 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 331 | unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; |
| 332 | |
| 333 | |
| 334 | preempt_disable(); |
| 335 | pid = mm ? mm->context.id : 0; |
| 336 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 337 | goto err_out; |
| 338 | |
| 339 | if (end == TLB_FLUSH_ALL || |
| 340 | (end - start) > tlb_single_page_flush_ceiling * page_size) { |
| 341 | if (local) |
| 342 | _tlbiel_pid(pid, RIC_FLUSH_TLB); |
| 343 | else |
| 344 | _tlbie_pid(pid, RIC_FLUSH_TLB); |
| 345 | goto err_out; |
| 346 | } |
| 347 | for (addr = start; addr < end; addr += page_size) { |
| 348 | |
| 349 | if (local) |
| 350 | _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); |
| 351 | else { |
| 352 | if (lock_tlbie) |
| 353 | raw_spin_lock(&native_tlbie_lock); |
| 354 | _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); |
| 355 | if (lock_tlbie) |
| 356 | raw_spin_unlock(&native_tlbie_lock); |
| 357 | } |
| 358 | } |
| 359 | err_out: |
| 360 | preempt_enable(); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 361 | } |
Aneesh Kumar K.V | 912cc87 | 2016-07-13 15:05:29 +0530 | [diff] [blame] | 362 | |
| 363 | void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, |
| 364 | unsigned long page_size) |
| 365 | { |
| 366 | unsigned long rb,rs,prs,r; |
| 367 | unsigned long ap; |
| 368 | unsigned long ric = RIC_FLUSH_TLB; |
| 369 | |
| 370 | ap = mmu_get_ap(radix_get_mmu_psize(page_size)); |
| 371 | rb = gpa & ~(PPC_BITMASK(52, 63)); |
| 372 | rb |= ap << PPC_BITLSHIFT(58); |
| 373 | rs = lpid & ((1UL << 32) - 1); |
| 374 | prs = 0; /* process scoped */ |
| 375 | r = 1; /* raidx format */ |
| 376 | |
| 377 | asm volatile("ptesync": : :"memory"); |
| 378 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 379 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 380 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 381 | } |
| 382 | EXPORT_SYMBOL(radix__flush_tlb_lpid_va); |
| 383 | |
| 384 | void radix__flush_tlb_lpid(unsigned long lpid) |
| 385 | { |
| 386 | unsigned long rb,rs,prs,r; |
| 387 | unsigned long ric = RIC_FLUSH_ALL; |
| 388 | |
| 389 | rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ |
| 390 | rs = lpid & ((1UL << 32) - 1); |
| 391 | prs = 0; /* partition scoped */ |
| 392 | r = 1; /* raidx format */ |
| 393 | |
| 394 | asm volatile("ptesync": : :"memory"); |
| 395 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 396 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 397 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 398 | } |
| 399 | EXPORT_SYMBOL(radix__flush_tlb_lpid); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 400 | |
| 401 | void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, |
| 402 | unsigned long start, unsigned long end) |
| 403 | { |
| 404 | radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); |
| 405 | } |
| 406 | EXPORT_SYMBOL(radix__flush_pmd_tlb_range); |
Aneesh Kumar K.V | be34d30 | 2016-08-23 16:27:48 +0530 | [diff] [blame] | 407 | |
| 408 | void radix__flush_tlb_all(void) |
| 409 | { |
| 410 | unsigned long rb,prs,r,rs; |
| 411 | unsigned long ric = RIC_FLUSH_ALL; |
| 412 | |
| 413 | rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */ |
| 414 | prs = 0; /* partition scoped */ |
| 415 | r = 1; /* raidx format */ |
| 416 | rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ |
| 417 | |
| 418 | asm volatile("ptesync": : :"memory"); |
| 419 | /* |
| 420 | * now flush guest entries by passing PRS = 1 and LPID != 0 |
| 421 | */ |
| 422 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 423 | : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); |
| 424 | /* |
| 425 | * now flush host entires by passing PRS = 0 and LPID == 0 |
| 426 | */ |
| 427 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 428 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); |
| 429 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 430 | } |
Aneesh Kumar K.V | 6d3a037 | 2016-11-28 11:47:01 +0530 | [diff] [blame] | 431 | |
| 432 | void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, |
| 433 | unsigned long address) |
| 434 | { |
| 435 | /* |
| 436 | * We track page size in pte only for DD1, So we can |
| 437 | * call this only on DD1. |
| 438 | */ |
| 439 | if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) { |
| 440 | VM_WARN_ON(1); |
| 441 | return; |
| 442 | } |
| 443 | |
| 444 | if (old_pte & _PAGE_LARGE) |
| 445 | radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M); |
| 446 | else |
| 447 | radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); |
| 448 | } |