blob: 33ca9360f5919bffd94fb0913460aa30c722adfe [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
David S. Millera3cf5e62008-08-03 00:01:05 -07003 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
David S. Miller74bf4312006-01-31 18:29:18 -08004 */
5
6#include <linux/kernel.h>
David S. Millera3cf5e62008-08-03 00:01:05 -07007#include <linux/preempt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
David S. Miller74bf4312006-01-31 18:29:18 -08009#include <asm/page.h>
David S. Miller98c55842006-01-31 18:31:20 -080010#include <asm/pgtable.h>
David S. Millerf36391d2013-04-19 17:26:26 -040011#include <asm/mmu_context.h>
Sam Ravnborg8c7260c2014-05-16 23:26:02 +020012#include <asm/setup.h>
David S. Millerbd407912006-01-31 18:31:38 -080013#include <asm/tsb.h>
David S. Millerf36391d2013-04-19 17:26:26 -040014#include <asm/tlb.h>
David S. Miller9b4006d2006-03-18 18:12:42 -080015#include <asm/oplib.h>
David S. Miller74bf4312006-01-31 18:29:18 -080016
David S. Miller74bf4312006-01-31 18:29:18 -080017extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
18
David S. Millerdcc1e8d2006-03-22 00:49:59 -080019static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080020{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080021 vaddr >>= hash_shift;
David S. Miller98c55842006-01-31 18:31:20 -080022 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080023}
24
David S. Miller8b234272006-02-17 18:01:02 -080025static inline int tag_compare(unsigned long tag, unsigned long vaddr)
David S. Miller74bf4312006-01-31 18:29:18 -080026{
David S. Miller8b234272006-02-17 18:01:02 -080027 return (tag == (vaddr >> 22));
David S. Miller74bf4312006-01-31 18:29:18 -080028}
29
30/* TSB flushes need only occur on the processor initiating the address
31 * space modification, not on each cpu the address space has run on.
32 * Only the TLB flush needs that treatment.
33 */
34
35void flush_tsb_kernel_range(unsigned long start, unsigned long end)
36{
37 unsigned long v;
38
39 for (v = start; v < end; v += PAGE_SIZE) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -080040 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
41 KERNEL_TSB_NENTRIES);
David S. Miller98c55842006-01-31 18:31:20 -080042 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080043
David S. Miller293666b2008-11-15 13:33:25 -080044 if (tag_compare(ent->tag, v))
David S. Miller8b234272006-02-17 18:01:02 -080045 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
David S. Miller74bf4312006-01-31 18:29:18 -080046 }
47}
48
David S. Millerf36391d2013-04-19 17:26:26 -040049static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
50 unsigned long hash_shift,
51 unsigned long nentries)
52{
53 unsigned long tag, ent, hash;
54
55 v &= ~0x1UL;
56 hash = tsb_hash(v, hash_shift, nentries);
57 ent = tsb + (hash * sizeof(struct tsb));
58 tag = (v >> 22UL);
59
60 tsb_flush(ent, tag);
61}
62
Peter Zijlstra90f08e32011-05-24 17:11:50 -070063static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
64 unsigned long tsb, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080065{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080066 unsigned long i;
David S. Miller74bf4312006-01-31 18:29:18 -080067
David S. Millerf36391d2013-04-19 17:26:26 -040068 for (i = 0; i < tb->tlb_nr; i++)
69 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080070}
David S. Miller7a1ac522006-03-16 02:02:32 -080071
Peter Zijlstra90f08e32011-05-24 17:11:50 -070072void flush_tsb_user(struct tlb_batch *tb)
David S. Millerdcc1e8d2006-03-22 00:49:59 -080073{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070074 struct mm_struct *mm = tb->mm;
David S. Millerdcc1e8d2006-03-22 00:49:59 -080075 unsigned long nentries, base, flags;
76
77 spin_lock_irqsave(&mm->context.lock, flags);
78
79 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
80 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
81 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
82 base = __pa(base);
Peter Zijlstra90f08e32011-05-24 17:11:50 -070083 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080084
David Miller9e695d22012-10-08 16:34:29 -070085#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -080086 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
87 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
88 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
89 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
90 base = __pa(base);
David S. Miller37b3a8f2013-09-25 13:48:49 -070091 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080092 }
93#endif
David S. Miller7a1ac522006-03-16 02:02:32 -080094 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Miller74bf4312006-01-31 18:29:18 -080095}
David S. Miller09f94282006-01-31 18:31:06 -080096
David S. Millerf36391d2013-04-19 17:26:26 -040097void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
98{
99 unsigned long nentries, base, flags;
100
101 spin_lock_irqsave(&mm->context.lock, flags);
102
103 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
104 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
105 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
106 base = __pa(base);
107 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
108
109#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
110 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
111 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
112 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
113 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
114 base = __pa(base);
David S. Miller37b3a8f2013-09-25 13:48:49 -0700115 __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
David S. Millerf36391d2013-04-19 17:26:26 -0400116 }
117#endif
118 spin_unlock_irqrestore(&mm->context.lock, flags);
119}
120
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800121#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
122#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800123
David Miller9e695d22012-10-08 16:34:29 -0700124#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800125#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
126#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800127#endif
128
129static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
David S. Miller98c55842006-01-31 18:31:20 -0800130{
131 unsigned long tsb_reg, base, tsb_paddr;
132 unsigned long page_sz, tte;
133
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800134 mm->context.tsb_block[tsb_idx].tsb_nentries =
135 tsb_bytes / sizeof(struct tsb);
David S. Miller98c55842006-01-31 18:31:20 -0800136
137 base = TSBMAP_BASE;
David S. Millerc4bce902006-02-11 21:57:54 -0800138 tte = pgprot_val(PAGE_KERNEL_LOCKED);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800139 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
David S. Miller517af332006-02-01 15:55:21 -0800140 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
David S. Miller98c55842006-01-31 18:31:20 -0800141
142 /* Use the smallest page size that can map the whole TSB
143 * in one TLB entry.
144 */
145 switch (tsb_bytes) {
146 case 8192 << 0:
147 tsb_reg = 0x0UL;
148#ifdef DCACHE_ALIASING_POSSIBLE
149 base += (tsb_paddr & 8192);
150#endif
David S. Miller98c55842006-01-31 18:31:20 -0800151 page_sz = 8192;
152 break;
153
154 case 8192 << 1:
155 tsb_reg = 0x1UL;
David S. Miller98c55842006-01-31 18:31:20 -0800156 page_sz = 64 * 1024;
157 break;
158
159 case 8192 << 2:
160 tsb_reg = 0x2UL;
David S. Miller98c55842006-01-31 18:31:20 -0800161 page_sz = 64 * 1024;
162 break;
163
164 case 8192 << 3:
165 tsb_reg = 0x3UL;
David S. Miller98c55842006-01-31 18:31:20 -0800166 page_sz = 64 * 1024;
167 break;
168
169 case 8192 << 4:
170 tsb_reg = 0x4UL;
David S. Miller98c55842006-01-31 18:31:20 -0800171 page_sz = 512 * 1024;
172 break;
173
174 case 8192 << 5:
175 tsb_reg = 0x5UL;
David S. Miller98c55842006-01-31 18:31:20 -0800176 page_sz = 512 * 1024;
177 break;
178
179 case 8192 << 6:
180 tsb_reg = 0x6UL;
David S. Miller98c55842006-01-31 18:31:20 -0800181 page_sz = 512 * 1024;
182 break;
183
184 case 8192 << 7:
185 tsb_reg = 0x7UL;
David S. Miller98c55842006-01-31 18:31:20 -0800186 page_sz = 4 * 1024 * 1024;
187 break;
David S. Millerbd407912006-01-31 18:31:38 -0800188
189 default:
David S. Miller7e5766f2007-10-29 00:36:09 -0700190 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
191 current->comm, current->pid, tsb_bytes);
192 do_exit(SIGSEGV);
Joe Perches6cb79b32011-06-03 14:45:23 +0000193 }
David S. Millerc4bce902006-02-11 21:57:54 -0800194 tte |= pte_sz_bits(page_sz);
David S. Miller98c55842006-01-31 18:31:20 -0800195
David S. Miller618e9ed2006-02-09 17:21:53 -0800196 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800197 /* Physical mapping, no locked TLB entry for TSB. */
198 tsb_reg |= tsb_paddr;
David S. Miller98c55842006-01-31 18:31:20 -0800199
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800200 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
201 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
202 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
David S. Miller517af332006-02-01 15:55:21 -0800203 } else {
204 tsb_reg |= base;
205 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
206 tte |= (tsb_paddr & ~(page_sz - 1UL));
207
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800208 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
209 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
210 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
David S. Miller517af332006-02-01 15:55:21 -0800211 }
212
David S. Miller618e9ed2006-02-09 17:21:53 -0800213 /* Setup the Hypervisor TSB descriptor. */
214 if (tlb_type == hypervisor) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800215 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
David S. Miller618e9ed2006-02-09 17:21:53 -0800216
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800217 switch (tsb_idx) {
218 case MM_TSB_BASE:
219 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
220 break;
David Miller9e695d22012-10-08 16:34:29 -0700221#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800222 case MM_TSB_HUGE:
223 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
224 break;
225#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800226 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800227 BUG();
Joe Perches6cb79b32011-06-03 14:45:23 +0000228 }
David S. Miller618e9ed2006-02-09 17:21:53 -0800229 hp->assoc = 1;
230 hp->num_ttes = tsb_bytes / 16;
231 hp->ctx_idx = 0;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800232 switch (tsb_idx) {
233 case MM_TSB_BASE:
234 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
235 break;
David Miller9e695d22012-10-08 16:34:29 -0700236#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800237 case MM_TSB_HUGE:
238 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
239 break;
240#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800241 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800242 BUG();
Joe Perches6cb79b32011-06-03 14:45:23 +0000243 }
David S. Miller618e9ed2006-02-09 17:21:53 -0800244 hp->tsb_base = tsb_paddr;
245 hp->resv = 0;
246 }
David S. Miller98c55842006-01-31 18:31:20 -0800247}
248
David S. Miller4dedbf82011-07-25 17:12:20 -0700249struct kmem_cache *pgtable_cache __read_mostly;
250
Christoph Lametere18b8902006-12-06 20:33:20 -0800251static struct kmem_cache *tsb_caches[8] __read_mostly;
David S. Miller9b4006d2006-03-18 18:12:42 -0800252
253static const char *tsb_cache_names[8] = {
254 "tsb_8KB",
255 "tsb_16KB",
256 "tsb_32KB",
257 "tsb_64KB",
258 "tsb_128KB",
259 "tsb_256KB",
260 "tsb_512KB",
261 "tsb_1MB",
262};
263
David Miller3a2cba92007-05-06 14:49:51 -0700264void __init pgtable_cache_init(void)
David S. Miller9b4006d2006-03-18 18:12:42 -0800265{
266 unsigned long i;
267
David S. Miller4dedbf82011-07-25 17:12:20 -0700268 pgtable_cache = kmem_cache_create("pgtable_cache",
269 PAGE_SIZE, PAGE_SIZE,
270 0,
271 _clear_page);
272 if (!pgtable_cache) {
273 prom_printf("pgtable_cache_init(): Could not create!\n");
274 prom_halt();
275 }
276
Doug Wilson151b6282014-03-07 16:29:03 +0530277 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
David S. Miller9b4006d2006-03-18 18:12:42 -0800278 unsigned long size = 8192 << i;
279 const char *name = tsb_cache_names[i];
280
281 tsb_caches[i] = kmem_cache_create(name,
282 size, size,
Paul Mundt20c2df82007-07-20 10:11:58 +0900283 0, NULL);
David S. Miller9b4006d2006-03-18 18:12:42 -0800284 if (!tsb_caches[i]) {
285 prom_printf("Could not create %s cache\n", name);
286 prom_halt();
287 }
288 }
289}
290
David S. Miller08714202008-11-16 23:49:24 -0800291int sysctl_tsb_ratio = -2;
292
293static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
294{
295 unsigned long num_ents = (new_size / sizeof(struct tsb));
296
297 if (sysctl_tsb_ratio < 0)
298 return num_ents - (num_ents >> -sysctl_tsb_ratio);
299 else
300 return num_ents + (num_ents >> sysctl_tsb_ratio);
301}
302
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800303/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
304 * do_sparc64_fault() invokes this routine to try and grow it.
David S. Miller7a1ac522006-03-16 02:02:32 -0800305 *
David S. Millerbd407912006-01-31 18:31:38 -0800306 * When we reach the maximum TSB size supported, we stick ~0UL into
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800307 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
David S. Millerbd407912006-01-31 18:31:38 -0800308 * will not trigger any longer.
309 *
310 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
311 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
David S. Millerb52439c2006-03-17 23:40:47 -0800312 * must be 512K aligned. It also must be physically contiguous, so we
313 * cannot use vmalloc().
David S. Millerbd407912006-01-31 18:31:38 -0800314 *
315 * The idea here is to grow the TSB when the RSS of the process approaches
316 * the number of entries that the current TSB can hold at once. Currently,
317 * we trigger when the RSS hits 3/4 of the TSB capacity.
318 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800319void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
David S. Millerbd407912006-01-31 18:31:38 -0800320{
321 unsigned long max_tsb_size = 1 * 1024 * 1024;
David S. Miller9b4006d2006-03-18 18:12:42 -0800322 unsigned long new_size, old_size, flags;
David S. Miller7a1ac522006-03-16 02:02:32 -0800323 struct tsb *old_tsb, *new_tsb;
David S. Miller9b4006d2006-03-18 18:12:42 -0800324 unsigned long new_cache_index, old_cache_index;
325 unsigned long new_rss_limit;
David S. Millerb52439c2006-03-17 23:40:47 -0800326 gfp_t gfp_flags;
David S. Millerbd407912006-01-31 18:31:38 -0800327
328 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
329 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
330
David S. Miller9b4006d2006-03-18 18:12:42 -0800331 new_cache_index = 0;
332 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
David S. Miller08714202008-11-16 23:49:24 -0800333 new_rss_limit = tsb_size_to_rss_limit(new_size);
334 if (new_rss_limit > rss)
David S. Millerbd407912006-01-31 18:31:38 -0800335 break;
David S. Miller9b4006d2006-03-18 18:12:42 -0800336 new_cache_index++;
David S. Millerbd407912006-01-31 18:31:38 -0800337 }
338
David S. Miller9b4006d2006-03-18 18:12:42 -0800339 if (new_size == max_tsb_size)
David S. Millerb52439c2006-03-17 23:40:47 -0800340 new_rss_limit = ~0UL;
David S. Millerb52439c2006-03-17 23:40:47 -0800341
David S. Miller9b4006d2006-03-18 18:12:42 -0800342retry_tsb_alloc:
David S. Millerb52439c2006-03-17 23:40:47 -0800343 gfp_flags = GFP_KERNEL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800344 if (new_size > (PAGE_SIZE * 2))
David S. Millera55ee1f2013-02-19 12:56:18 -0800345 gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
David S. Millerb52439c2006-03-17 23:40:47 -0800346
David S. Miller1f261ef2008-03-19 04:53:58 -0700347 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
348 gfp_flags, numa_node_id());
David S. Miller9b4006d2006-03-18 18:12:42 -0800349 if (unlikely(!new_tsb)) {
David S. Millerb52439c2006-03-17 23:40:47 -0800350 /* Not being able to fork due to a high-order TSB
351 * allocation failure is very bad behavior. Just back
352 * down to a 0-order allocation and force no TSB
353 * growing for this address space.
354 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800355 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
356 new_cache_index > 0) {
David S. Miller9b4006d2006-03-18 18:12:42 -0800357 new_cache_index = 0;
358 new_size = 8192;
David S. Millerb52439c2006-03-17 23:40:47 -0800359 new_rss_limit = ~0UL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800360 goto retry_tsb_alloc;
David S. Millerb52439c2006-03-17 23:40:47 -0800361 }
362
363 /* If we failed on a TSB grow, we are under serious
364 * memory pressure so don't try to grow any more.
365 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800366 if (mm->context.tsb_block[tsb_index].tsb != NULL)
367 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
David S. Millerbd407912006-01-31 18:31:38 -0800368 return;
David S. Millerb52439c2006-03-17 23:40:47 -0800369 }
David S. Millerbd407912006-01-31 18:31:38 -0800370
David S. Miller8b234272006-02-17 18:01:02 -0800371 /* Mark all tags as invalid. */
David S. Millerbb8646d2006-03-18 23:55:11 -0800372 tsb_init(new_tsb, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800373
374 /* Ok, we are about to commit the changes. If we are
375 * growing an existing TSB the locking is very tricky,
376 * so WATCH OUT!
377 *
378 * We have to hold mm->context.lock while committing to the
379 * new TSB, this synchronizes us with processors in
380 * flush_tsb_user() and switch_mm() for this address space.
381 *
382 * But even with that lock held, processors run asynchronously
383 * accessing the old TSB via TLB miss handling. This is OK
384 * because those actions are just propagating state from the
385 * Linux page tables into the TSB, page table mappings are not
386 * being changed. If a real fault occurs, the processor will
387 * synchronize with us when it hits flush_tsb_user(), this is
388 * also true for the case where vmscan is modifying the page
389 * tables. The only thing we need to be careful with is to
390 * skip any locked TSB entries during copy_tsb().
391 *
392 * When we finish committing to the new TSB, we have to drop
393 * the lock and ask all other cpus running this address space
394 * to run tsb_context_switch() to see the new TSB table.
395 */
396 spin_lock_irqsave(&mm->context.lock, flags);
397
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800398 old_tsb = mm->context.tsb_block[tsb_index].tsb;
399 old_cache_index =
400 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
401 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
402 sizeof(struct tsb));
David S. Miller7a1ac522006-03-16 02:02:32 -0800403
David S. Miller9b4006d2006-03-18 18:12:42 -0800404
David S. Miller7a1ac522006-03-16 02:02:32 -0800405 /* Handle multiple threads trying to grow the TSB at the same time.
406 * One will get in here first, and bump the size and the RSS limit.
407 * The others will get in here next and hit this check.
408 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800409 if (unlikely(old_tsb &&
410 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800411 spin_unlock_irqrestore(&mm->context.lock, flags);
412
David S. Miller9b4006d2006-03-18 18:12:42 -0800413 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
David S. Miller7a1ac522006-03-16 02:02:32 -0800414 return;
415 }
David S. Miller8b234272006-02-17 18:01:02 -0800416
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800417 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
David S. Millerbd407912006-01-31 18:31:38 -0800418
David S. Miller7a1ac522006-03-16 02:02:32 -0800419 if (old_tsb) {
420 extern void copy_tsb(unsigned long old_tsb_base,
421 unsigned long old_tsb_size,
422 unsigned long new_tsb_base,
423 unsigned long new_tsb_size);
424 unsigned long old_tsb_base = (unsigned long) old_tsb;
425 unsigned long new_tsb_base = (unsigned long) new_tsb;
David S. Millerbd407912006-01-31 18:31:38 -0800426
David S. Miller7a1ac522006-03-16 02:02:32 -0800427 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
428 old_tsb_base = __pa(old_tsb_base);
429 new_tsb_base = __pa(new_tsb_base);
430 }
David S. Miller9b4006d2006-03-18 18:12:42 -0800431 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800432 }
David S. Millerbd407912006-01-31 18:31:38 -0800433
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800434 mm->context.tsb_block[tsb_index].tsb = new_tsb;
435 setup_tsb_params(mm, tsb_index, new_size);
David S. Millerbd407912006-01-31 18:31:38 -0800436
David S. Miller7a1ac522006-03-16 02:02:32 -0800437 spin_unlock_irqrestore(&mm->context.lock, flags);
438
David S. Millerbd407912006-01-31 18:31:38 -0800439 /* If old_tsb is NULL, we're being invoked for the first time
440 * from init_new_context().
441 */
442 if (old_tsb) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800443 /* Reload it on the local cpu. */
David S. Millerbd407912006-01-31 18:31:38 -0800444 tsb_context_switch(mm);
445
David S. Miller7a1ac522006-03-16 02:02:32 -0800446 /* Now force other processors to do the same. */
David S. Millera3cf5e62008-08-03 00:01:05 -0700447 preempt_disable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800448 smp_tsb_sync(mm);
David S. Millera3cf5e62008-08-03 00:01:05 -0700449 preempt_enable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800450
451 /* Now it is safe to free the old tsb. */
David S. Miller9b4006d2006-03-18 18:12:42 -0800452 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
David S. Millerbd407912006-01-31 18:31:38 -0800453 }
454}
455
David S. Miller09f94282006-01-31 18:31:06 -0800456int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
457{
David Miller9e695d22012-10-08 16:34:29 -0700458#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800459 unsigned long huge_pte_count;
460#endif
461 unsigned int i;
462
David S. Millera77754b2006-03-06 19:59:50 -0800463 spin_lock_init(&mm->context.lock);
David S. Miller09f94282006-01-31 18:31:06 -0800464
465 mm->context.sparc64_ctx_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800466
David Miller9e695d22012-10-08 16:34:29 -0700467#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800468 /* We reset it to zero because the fork() page copying
469 * will re-increment the counters as the parent PTEs are
470 * copied into the child address space.
471 */
472 huge_pte_count = mm->context.huge_pte_count;
473 mm->context.huge_pte_count = 0;
474#endif
475
David S. Millerbd407912006-01-31 18:31:38 -0800476 /* copy_mm() copies over the parent's mm_struct before calling
477 * us, so we need to zero out the TSB pointer or else tsb_grow()
478 * will be confused and think there is an older TSB to free up.
479 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800480 for (i = 0; i < MM_NUM_TSBS; i++)
481 mm->context.tsb_block[i].tsb = NULL;
David S. Miller7a1ac522006-03-16 02:02:32 -0800482
483 /* If this is fork, inherit the parent's TSB size. We would
484 * grow it to that size on the first page fault anyways.
485 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800486 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
David S. Millerbd407912006-01-31 18:31:38 -0800487
David Miller9e695d22012-10-08 16:34:29 -0700488#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800489 if (unlikely(huge_pte_count))
490 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
491#endif
492
493 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
David S. Millerbd407912006-01-31 18:31:38 -0800494 return -ENOMEM;
David S. Miller09f94282006-01-31 18:31:06 -0800495
496 return 0;
497}
498
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800499static void tsb_destroy_one(struct tsb_config *tp)
500{
501 unsigned long cache_index;
502
503 if (!tp->tsb)
504 return;
505 cache_index = tp->tsb_reg_val & 0x7UL;
506 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
507 tp->tsb = NULL;
508 tp->tsb_reg_val = 0UL;
509}
510
David S. Miller09f94282006-01-31 18:31:06 -0800511void destroy_context(struct mm_struct *mm)
512{
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800513 unsigned long flags, i;
David S. Millerbd407912006-01-31 18:31:38 -0800514
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800515 for (i = 0; i < MM_NUM_TSBS; i++)
516 tsb_destroy_one(&mm->context.tsb_block[i]);
David S. Miller09f94282006-01-31 18:31:06 -0800517
David S. Miller77b838f2006-02-23 21:40:15 -0800518 spin_lock_irqsave(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800519
520 if (CTX_VALID(mm->context)) {
521 unsigned long nr = CTX_NRBITS(mm->context);
522 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
523 }
524
David S. Miller77b838f2006-02-23 21:40:15 -0800525 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800526}