David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 1 | /* arch/sparc64/mm/tsb.c |
| 2 | * |
| 3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <asm/system.h> |
| 8 | #include <asm/page.h> |
| 9 | #include <asm/tlbflush.h> |
| 10 | #include <asm/tlb.h> |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 11 | #include <asm/mmu_context.h> |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 12 | #include <asm/pgtable.h> |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 13 | #include <asm/tsb.h> |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 14 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 15 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
| 16 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 17 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 18 | { |
| 19 | vaddr >>= PAGE_SHIFT; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 20 | return vaddr & (nentries - 1); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 21 | } |
| 22 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 23 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 24 | { |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 25 | return (tag == (vaddr >> 22)); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | /* TSB flushes need only occur on the processor initiating the address |
| 29 | * space modification, not on each cpu the address space has run on. |
| 30 | * Only the TLB flush needs that treatment. |
| 31 | */ |
| 32 | |
| 33 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) |
| 34 | { |
| 35 | unsigned long v; |
| 36 | |
| 37 | for (v = start; v < end; v += PAGE_SIZE) { |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 38 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); |
| 39 | struct tsb *ent = &swapper_tsb[hash]; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 40 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 41 | if (tag_compare(ent->tag, v)) { |
| 42 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 43 | membar_storeload_storestore(); |
| 44 | } |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | void flush_tsb_user(struct mmu_gather *mp) |
| 49 | { |
| 50 | struct mm_struct *mm = mp->mm; |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 51 | unsigned long nentries, base, flags; |
| 52 | struct tsb *tsb; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 53 | int i; |
| 54 | |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 55 | spin_lock_irqsave(&mm->context.lock, flags); |
| 56 | |
| 57 | tsb = mm->context.tsb; |
| 58 | nentries = mm->context.tsb_nentries; |
| 59 | |
David S. Miller | de635d8 | 2006-02-15 21:01:31 -0800 | [diff] [blame] | 60 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 61 | base = __pa(tsb); |
| 62 | else |
| 63 | base = (unsigned long) tsb; |
| 64 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 65 | for (i = 0; i < mp->tlb_nr; i++) { |
| 66 | unsigned long v = mp->vaddrs[i]; |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 67 | unsigned long tag, ent, hash; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 68 | |
| 69 | v &= ~0x1UL; |
| 70 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 71 | hash = tsb_hash(v, nentries); |
| 72 | ent = base + (hash * sizeof(struct tsb)); |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 73 | tag = (v >> 22UL); |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 74 | |
| 75 | tsb_flush(ent, tag); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 76 | } |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 77 | |
| 78 | spin_unlock_irqrestore(&mm->context.lock, flags); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 79 | } |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 80 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 81 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) |
| 82 | { |
| 83 | unsigned long tsb_reg, base, tsb_paddr; |
| 84 | unsigned long page_sz, tte; |
| 85 | |
| 86 | mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); |
| 87 | |
| 88 | base = TSBMAP_BASE; |
David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 89 | tte = pgprot_val(PAGE_KERNEL_LOCKED); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 90 | tsb_paddr = __pa(mm->context.tsb); |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 91 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 92 | |
| 93 | /* Use the smallest page size that can map the whole TSB |
| 94 | * in one TLB entry. |
| 95 | */ |
| 96 | switch (tsb_bytes) { |
| 97 | case 8192 << 0: |
| 98 | tsb_reg = 0x0UL; |
| 99 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 100 | base += (tsb_paddr & 8192); |
| 101 | #endif |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 102 | page_sz = 8192; |
| 103 | break; |
| 104 | |
| 105 | case 8192 << 1: |
| 106 | tsb_reg = 0x1UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 107 | page_sz = 64 * 1024; |
| 108 | break; |
| 109 | |
| 110 | case 8192 << 2: |
| 111 | tsb_reg = 0x2UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 112 | page_sz = 64 * 1024; |
| 113 | break; |
| 114 | |
| 115 | case 8192 << 3: |
| 116 | tsb_reg = 0x3UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 117 | page_sz = 64 * 1024; |
| 118 | break; |
| 119 | |
| 120 | case 8192 << 4: |
| 121 | tsb_reg = 0x4UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 122 | page_sz = 512 * 1024; |
| 123 | break; |
| 124 | |
| 125 | case 8192 << 5: |
| 126 | tsb_reg = 0x5UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 127 | page_sz = 512 * 1024; |
| 128 | break; |
| 129 | |
| 130 | case 8192 << 6: |
| 131 | tsb_reg = 0x6UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 132 | page_sz = 512 * 1024; |
| 133 | break; |
| 134 | |
| 135 | case 8192 << 7: |
| 136 | tsb_reg = 0x7UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 137 | page_sz = 4 * 1024 * 1024; |
| 138 | break; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 139 | |
| 140 | default: |
| 141 | BUG(); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 142 | }; |
David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 143 | tte |= pte_sz_bits(page_sz); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 144 | |
David S. Miller | 618e9ed | 2006-02-09 17:21:53 -0800 | [diff] [blame] | 145 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 146 | /* Physical mapping, no locked TLB entry for TSB. */ |
| 147 | tsb_reg |= tsb_paddr; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 148 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 149 | mm->context.tsb_reg_val = tsb_reg; |
| 150 | mm->context.tsb_map_vaddr = 0; |
| 151 | mm->context.tsb_map_pte = 0; |
| 152 | } else { |
| 153 | tsb_reg |= base; |
| 154 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); |
| 155 | tte |= (tsb_paddr & ~(page_sz - 1UL)); |
| 156 | |
| 157 | mm->context.tsb_reg_val = tsb_reg; |
| 158 | mm->context.tsb_map_vaddr = base; |
| 159 | mm->context.tsb_map_pte = tte; |
| 160 | } |
| 161 | |
David S. Miller | 618e9ed | 2006-02-09 17:21:53 -0800 | [diff] [blame] | 162 | /* Setup the Hypervisor TSB descriptor. */ |
| 163 | if (tlb_type == hypervisor) { |
| 164 | struct hv_tsb_descr *hp = &mm->context.tsb_descr; |
| 165 | |
| 166 | switch (PAGE_SIZE) { |
| 167 | case 8192: |
| 168 | default: |
| 169 | hp->pgsz_idx = HV_PGSZ_IDX_8K; |
| 170 | break; |
| 171 | |
| 172 | case 64 * 1024: |
| 173 | hp->pgsz_idx = HV_PGSZ_IDX_64K; |
| 174 | break; |
| 175 | |
| 176 | case 512 * 1024: |
| 177 | hp->pgsz_idx = HV_PGSZ_IDX_512K; |
| 178 | break; |
| 179 | |
| 180 | case 4 * 1024 * 1024: |
| 181 | hp->pgsz_idx = HV_PGSZ_IDX_4MB; |
| 182 | break; |
| 183 | }; |
| 184 | hp->assoc = 1; |
| 185 | hp->num_ttes = tsb_bytes / 16; |
| 186 | hp->ctx_idx = 0; |
| 187 | switch (PAGE_SIZE) { |
| 188 | case 8192: |
| 189 | default: |
| 190 | hp->pgsz_mask = HV_PGSZ_MASK_8K; |
| 191 | break; |
| 192 | |
| 193 | case 64 * 1024: |
| 194 | hp->pgsz_mask = HV_PGSZ_MASK_64K; |
| 195 | break; |
| 196 | |
| 197 | case 512 * 1024: |
| 198 | hp->pgsz_mask = HV_PGSZ_MASK_512K; |
| 199 | break; |
| 200 | |
| 201 | case 4 * 1024 * 1024: |
| 202 | hp->pgsz_mask = HV_PGSZ_MASK_4MB; |
| 203 | break; |
| 204 | }; |
| 205 | hp->tsb_base = tsb_paddr; |
| 206 | hp->resv = 0; |
| 207 | } |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 208 | } |
| 209 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 210 | /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 211 | * do_sparc64_fault() invokes this routine to try and grow the TSB. |
| 212 | * |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 213 | * When we reach the maximum TSB size supported, we stick ~0UL into |
| 214 | * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() |
| 215 | * will not trigger any longer. |
| 216 | * |
| 217 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers |
| 218 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB |
David S. Miller | b52439c | 2006-03-17 23:40:47 -0800 | [diff] [blame^] | 219 | * must be 512K aligned. It also must be physically contiguous, so we |
| 220 | * cannot use vmalloc(). |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 221 | * |
| 222 | * The idea here is to grow the TSB when the RSS of the process approaches |
| 223 | * the number of entries that the current TSB can hold at once. Currently, |
| 224 | * we trigger when the RSS hits 3/4 of the TSB capacity. |
| 225 | */ |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 226 | void tsb_grow(struct mm_struct *mm, unsigned long rss) |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 227 | { |
| 228 | unsigned long max_tsb_size = 1 * 1024 * 1024; |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 229 | unsigned long size, old_size, flags; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 230 | struct page *page; |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 231 | struct tsb *old_tsb, *new_tsb; |
David S. Miller | b52439c | 2006-03-17 23:40:47 -0800 | [diff] [blame^] | 232 | unsigned long order, new_rss_limit; |
| 233 | gfp_t gfp_flags; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 234 | |
| 235 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) |
| 236 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); |
| 237 | |
| 238 | for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { |
| 239 | unsigned long n_entries = size / sizeof(struct tsb); |
| 240 | |
| 241 | n_entries = (n_entries * 3) / 4; |
| 242 | if (n_entries > rss) |
| 243 | break; |
| 244 | } |
| 245 | |
David S. Miller | b52439c | 2006-03-17 23:40:47 -0800 | [diff] [blame^] | 246 | if (size == max_tsb_size) |
| 247 | new_rss_limit = ~0UL; |
| 248 | else |
| 249 | new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; |
| 250 | |
| 251 | retry_page_alloc: |
| 252 | order = get_order(size); |
| 253 | gfp_flags = GFP_KERNEL; |
| 254 | if (order > 1) |
| 255 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; |
| 256 | |
| 257 | page = alloc_pages(gfp_flags, order); |
| 258 | if (unlikely(!page)) { |
| 259 | /* Not being able to fork due to a high-order TSB |
| 260 | * allocation failure is very bad behavior. Just back |
| 261 | * down to a 0-order allocation and force no TSB |
| 262 | * growing for this address space. |
| 263 | */ |
| 264 | if (mm->context.tsb == NULL && order > 0) { |
| 265 | size = PAGE_SIZE; |
| 266 | new_rss_limit = ~0UL; |
| 267 | goto retry_page_alloc; |
| 268 | } |
| 269 | |
| 270 | /* If we failed on a TSB grow, we are under serious |
| 271 | * memory pressure so don't try to grow any more. |
| 272 | */ |
| 273 | if (mm->context.tsb != NULL) |
| 274 | mm->context.tsb_rss_limit = ~0UL; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 275 | return; |
David S. Miller | b52439c | 2006-03-17 23:40:47 -0800 | [diff] [blame^] | 276 | } |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 277 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 278 | /* Mark all tags as invalid. */ |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 279 | new_tsb = page_address(page); |
| 280 | memset(new_tsb, 0x40, size); |
| 281 | |
| 282 | /* Ok, we are about to commit the changes. If we are |
| 283 | * growing an existing TSB the locking is very tricky, |
| 284 | * so WATCH OUT! |
| 285 | * |
| 286 | * We have to hold mm->context.lock while committing to the |
| 287 | * new TSB, this synchronizes us with processors in |
| 288 | * flush_tsb_user() and switch_mm() for this address space. |
| 289 | * |
| 290 | * But even with that lock held, processors run asynchronously |
| 291 | * accessing the old TSB via TLB miss handling. This is OK |
| 292 | * because those actions are just propagating state from the |
| 293 | * Linux page tables into the TSB, page table mappings are not |
| 294 | * being changed. If a real fault occurs, the processor will |
| 295 | * synchronize with us when it hits flush_tsb_user(), this is |
| 296 | * also true for the case where vmscan is modifying the page |
| 297 | * tables. The only thing we need to be careful with is to |
| 298 | * skip any locked TSB entries during copy_tsb(). |
| 299 | * |
| 300 | * When we finish committing to the new TSB, we have to drop |
| 301 | * the lock and ask all other cpus running this address space |
| 302 | * to run tsb_context_switch() to see the new TSB table. |
| 303 | */ |
| 304 | spin_lock_irqsave(&mm->context.lock, flags); |
| 305 | |
| 306 | old_tsb = mm->context.tsb; |
| 307 | old_size = mm->context.tsb_nentries * sizeof(struct tsb); |
| 308 | |
| 309 | /* Handle multiple threads trying to grow the TSB at the same time. |
| 310 | * One will get in here first, and bump the size and the RSS limit. |
| 311 | * The others will get in here next and hit this check. |
| 312 | */ |
| 313 | if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { |
| 314 | spin_unlock_irqrestore(&mm->context.lock, flags); |
| 315 | |
| 316 | free_pages((unsigned long) new_tsb, get_order(size)); |
| 317 | return; |
| 318 | } |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 319 | |
David S. Miller | b52439c | 2006-03-17 23:40:47 -0800 | [diff] [blame^] | 320 | mm->context.tsb_rss_limit = new_rss_limit; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 321 | |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 322 | if (old_tsb) { |
| 323 | extern void copy_tsb(unsigned long old_tsb_base, |
| 324 | unsigned long old_tsb_size, |
| 325 | unsigned long new_tsb_base, |
| 326 | unsigned long new_tsb_size); |
| 327 | unsigned long old_tsb_base = (unsigned long) old_tsb; |
| 328 | unsigned long new_tsb_base = (unsigned long) new_tsb; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 329 | |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 330 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { |
| 331 | old_tsb_base = __pa(old_tsb_base); |
| 332 | new_tsb_base = __pa(new_tsb_base); |
| 333 | } |
| 334 | copy_tsb(old_tsb_base, old_size, new_tsb_base, size); |
| 335 | } |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 336 | |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 337 | mm->context.tsb = new_tsb; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 338 | setup_tsb_params(mm, size); |
| 339 | |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 340 | spin_unlock_irqrestore(&mm->context.lock, flags); |
| 341 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 342 | /* If old_tsb is NULL, we're being invoked for the first time |
| 343 | * from init_new_context(). |
| 344 | */ |
| 345 | if (old_tsb) { |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 346 | /* Reload it on the local cpu. */ |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 347 | tsb_context_switch(mm); |
| 348 | |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 349 | /* Now force other processors to do the same. */ |
| 350 | smp_tsb_sync(mm); |
| 351 | |
| 352 | /* Now it is safe to free the old tsb. */ |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 353 | free_pages((unsigned long) old_tsb, get_order(old_size)); |
| 354 | } |
| 355 | } |
| 356 | |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 357 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 358 | { |
David S. Miller | a77754b | 2006-03-06 19:59:50 -0800 | [diff] [blame] | 359 | spin_lock_init(&mm->context.lock); |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 360 | |
| 361 | mm->context.sparc64_ctx_val = 0UL; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 362 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 363 | /* copy_mm() copies over the parent's mm_struct before calling |
| 364 | * us, so we need to zero out the TSB pointer or else tsb_grow() |
| 365 | * will be confused and think there is an older TSB to free up. |
| 366 | */ |
| 367 | mm->context.tsb = NULL; |
David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 368 | |
| 369 | /* If this is fork, inherit the parent's TSB size. We would |
| 370 | * grow it to that size on the first page fault anyways. |
| 371 | */ |
| 372 | tsb_grow(mm, get_mm_rss(mm)); |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 373 | |
| 374 | if (unlikely(!mm->context.tsb)) |
| 375 | return -ENOMEM; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
| 380 | void destroy_context(struct mm_struct *mm) |
| 381 | { |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 382 | unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); |
David S. Miller | 77b838f | 2006-02-23 21:40:15 -0800 | [diff] [blame] | 383 | unsigned long flags; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 384 | |
| 385 | free_pages((unsigned long) mm->context.tsb, get_order(size)); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 386 | |
| 387 | /* We can remove these later, but for now it's useful |
| 388 | * to catch any bogus post-destroy_context() references |
| 389 | * to the TSB. |
| 390 | */ |
| 391 | mm->context.tsb = NULL; |
| 392 | mm->context.tsb_reg_val = 0UL; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 393 | |
David S. Miller | 77b838f | 2006-02-23 21:40:15 -0800 | [diff] [blame] | 394 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 395 | |
| 396 | if (CTX_VALID(mm->context)) { |
| 397 | unsigned long nr = CTX_NRBITS(mm->context); |
| 398 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); |
| 399 | } |
| 400 | |
David S. Miller | 77b838f | 2006-02-23 21:40:15 -0800 | [diff] [blame] | 401 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 402 | } |