blob: 4ccca32bd1e12985c35f9d8059895e63c7d83e90 [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
David S. Millera3cf5e62008-08-03 00:01:05 -07003 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
David S. Miller74bf4312006-01-31 18:29:18 -08004 */
5
6#include <linux/kernel.h>
David S. Millera3cf5e62008-08-03 00:01:05 -07007#include <linux/preempt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
David S. Miller74bf4312006-01-31 18:29:18 -08009#include <asm/page.h>
David S. Miller98c55842006-01-31 18:31:20 -080010#include <asm/pgtable.h>
David S. Millerf36391d2013-04-19 17:26:26 -040011#include <asm/mmu_context.h>
Sam Ravnborg8c7260c2014-05-16 23:26:02 +020012#include <asm/setup.h>
David S. Millerbd407912006-01-31 18:31:38 -080013#include <asm/tsb.h>
David S. Millerf36391d2013-04-19 17:26:26 -040014#include <asm/tlb.h>
David S. Miller9b4006d2006-03-18 18:12:42 -080015#include <asm/oplib.h>
David S. Miller74bf4312006-01-31 18:29:18 -080016
David S. Miller74bf4312006-01-31 18:29:18 -080017extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
18
David S. Millerdcc1e8d2006-03-22 00:49:59 -080019static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080020{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080021 vaddr >>= hash_shift;
David S. Miller98c55842006-01-31 18:31:20 -080022 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080023}
24
David S. Miller8b234272006-02-17 18:01:02 -080025static inline int tag_compare(unsigned long tag, unsigned long vaddr)
David S. Miller74bf4312006-01-31 18:29:18 -080026{
David S. Miller8b234272006-02-17 18:01:02 -080027 return (tag == (vaddr >> 22));
David S. Miller74bf4312006-01-31 18:29:18 -080028}
29
David S. Miller849c4982016-10-25 19:43:17 -070030static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
31{
32 unsigned long idx;
33
34 for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
35 struct tsb *ent = &swapper_tsb[idx];
36 unsigned long match = idx << 13;
37
38 match |= (ent->tag << 22);
39 if (match >= start && match < end)
40 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
41 }
42}
43
David S. Miller74bf4312006-01-31 18:29:18 -080044/* TSB flushes need only occur on the processor initiating the address
45 * space modification, not on each cpu the address space has run on.
46 * Only the TLB flush needs that treatment.
47 */
48
49void flush_tsb_kernel_range(unsigned long start, unsigned long end)
50{
51 unsigned long v;
52
David S. Miller849c4982016-10-25 19:43:17 -070053 if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
54 return flush_tsb_kernel_range_scan(start, end);
55
David S. Miller74bf4312006-01-31 18:29:18 -080056 for (v = start; v < end; v += PAGE_SIZE) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -080057 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
58 KERNEL_TSB_NENTRIES);
David S. Miller98c55842006-01-31 18:31:20 -080059 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080060
David S. Miller293666b2008-11-15 13:33:25 -080061 if (tag_compare(ent->tag, v))
David S. Miller8b234272006-02-17 18:01:02 -080062 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
David S. Miller74bf4312006-01-31 18:29:18 -080063 }
64}
65
David S. Millerf36391d2013-04-19 17:26:26 -040066static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
67 unsigned long hash_shift,
68 unsigned long nentries)
69{
70 unsigned long tag, ent, hash;
71
72 v &= ~0x1UL;
73 hash = tsb_hash(v, hash_shift, nentries);
74 ent = tsb + (hash * sizeof(struct tsb));
75 tag = (v >> 22UL);
76
77 tsb_flush(ent, tag);
78}
79
Peter Zijlstra90f08e32011-05-24 17:11:50 -070080static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
81 unsigned long tsb, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080082{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080083 unsigned long i;
David S. Miller74bf4312006-01-31 18:29:18 -080084
David S. Millerf36391d2013-04-19 17:26:26 -040085 for (i = 0; i < tb->tlb_nr; i++)
86 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080087}
David S. Miller7a1ac522006-03-16 02:02:32 -080088
Nitin Guptac7d9f772017-02-01 16:16:36 -080089#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
90static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
91 unsigned long hash_shift,
92 unsigned long nentries,
93 unsigned int hugepage_shift)
94{
95 unsigned int hpage_entries;
96 unsigned int i;
97
98 hpage_entries = 1 << (hugepage_shift - hash_shift);
99 for (i = 0; i < hpage_entries; i++)
100 __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
101 nentries);
102}
103
104static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
105 unsigned long tsb, unsigned long nentries,
106 unsigned int hugepage_shift)
107{
108 unsigned long i;
109
110 for (i = 0; i < tb->tlb_nr; i++)
111 __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
112 nentries, hugepage_shift);
113}
114#endif
115
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700116void flush_tsb_user(struct tlb_batch *tb)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800117{
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700118 struct mm_struct *mm = tb->mm;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800119 unsigned long nentries, base, flags;
120
121 spin_lock_irqsave(&mm->context.lock, flags);
122
Nitin Guptac7d9f772017-02-01 16:16:36 -0800123 if (tb->hugepage_shift == PAGE_SHIFT) {
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700124 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
125 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
126 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
127 base = __pa(base);
128 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
129 }
David Miller9e695d22012-10-08 16:34:29 -0700130#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Nitin Guptac7d9f772017-02-01 16:16:36 -0800131 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800132 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
133 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
134 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
135 base = __pa(base);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800136 __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
137 tb->hugepage_shift);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800138 }
139#endif
David S. Miller7a1ac522006-03-16 02:02:32 -0800140 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Miller74bf4312006-01-31 18:29:18 -0800141}
David S. Miller09f94282006-01-31 18:31:06 -0800142
Nitin Guptac7d9f772017-02-01 16:16:36 -0800143void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
144 unsigned int hugepage_shift)
David S. Millerf36391d2013-04-19 17:26:26 -0400145{
146 unsigned long nentries, base, flags;
147
148 spin_lock_irqsave(&mm->context.lock, flags);
149
Nitin Guptac7d9f772017-02-01 16:16:36 -0800150 if (hugepage_shift == PAGE_SHIFT) {
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700151 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
152 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
153 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
154 base = __pa(base);
155 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
156 }
David S. Millerf36391d2013-04-19 17:26:26 -0400157#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Nitin Guptac7d9f772017-02-01 16:16:36 -0800158 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
David S. Millerf36391d2013-04-19 17:26:26 -0400159 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
160 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
162 base = __pa(base);
Nitin Guptac7d9f772017-02-01 16:16:36 -0800163 __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
164 nentries, hugepage_shift);
David S. Millerf36391d2013-04-19 17:26:26 -0400165 }
166#endif
167 spin_unlock_irqrestore(&mm->context.lock, flags);
168}
169
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800170#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
171#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800172
David Miller9e695d22012-10-08 16:34:29 -0700173#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800174#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
175#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800176#endif
177
178static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
David S. Miller98c55842006-01-31 18:31:20 -0800179{
180 unsigned long tsb_reg, base, tsb_paddr;
181 unsigned long page_sz, tte;
182
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800183 mm->context.tsb_block[tsb_idx].tsb_nentries =
184 tsb_bytes / sizeof(struct tsb);
David S. Miller98c55842006-01-31 18:31:20 -0800185
David S. Millerb18eb2d2014-05-07 14:07:32 -0700186 switch (tsb_idx) {
187 case MM_TSB_BASE:
188 base = TSBMAP_8K_BASE;
189 break;
190#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
191 case MM_TSB_HUGE:
192 base = TSBMAP_4M_BASE;
193 break;
194#endif
195 default:
196 BUG();
197 }
198
David S. Millerc4bce902006-02-11 21:57:54 -0800199 tte = pgprot_val(PAGE_KERNEL_LOCKED);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800200 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
David S. Miller517af332006-02-01 15:55:21 -0800201 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
David S. Miller98c55842006-01-31 18:31:20 -0800202
203 /* Use the smallest page size that can map the whole TSB
204 * in one TLB entry.
205 */
206 switch (tsb_bytes) {
207 case 8192 << 0:
208 tsb_reg = 0x0UL;
209#ifdef DCACHE_ALIASING_POSSIBLE
210 base += (tsb_paddr & 8192);
211#endif
David S. Miller98c55842006-01-31 18:31:20 -0800212 page_sz = 8192;
213 break;
214
215 case 8192 << 1:
216 tsb_reg = 0x1UL;
David S. Miller98c55842006-01-31 18:31:20 -0800217 page_sz = 64 * 1024;
218 break;
219
220 case 8192 << 2:
221 tsb_reg = 0x2UL;
David S. Miller98c55842006-01-31 18:31:20 -0800222 page_sz = 64 * 1024;
223 break;
224
225 case 8192 << 3:
226 tsb_reg = 0x3UL;
David S. Miller98c55842006-01-31 18:31:20 -0800227 page_sz = 64 * 1024;
228 break;
229
230 case 8192 << 4:
231 tsb_reg = 0x4UL;
David S. Miller98c55842006-01-31 18:31:20 -0800232 page_sz = 512 * 1024;
233 break;
234
235 case 8192 << 5:
236 tsb_reg = 0x5UL;
David S. Miller98c55842006-01-31 18:31:20 -0800237 page_sz = 512 * 1024;
238 break;
239
240 case 8192 << 6:
241 tsb_reg = 0x6UL;
David S. Miller98c55842006-01-31 18:31:20 -0800242 page_sz = 512 * 1024;
243 break;
244
245 case 8192 << 7:
246 tsb_reg = 0x7UL;
David S. Miller98c55842006-01-31 18:31:20 -0800247 page_sz = 4 * 1024 * 1024;
248 break;
David S. Millerbd407912006-01-31 18:31:38 -0800249
250 default:
David S. Miller7e5766f2007-10-29 00:36:09 -0700251 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
252 current->comm, current->pid, tsb_bytes);
253 do_exit(SIGSEGV);
Joe Perches6cb79b32011-06-03 14:45:23 +0000254 }
David S. Millerc4bce902006-02-11 21:57:54 -0800255 tte |= pte_sz_bits(page_sz);
David S. Miller98c55842006-01-31 18:31:20 -0800256
David S. Miller618e9ed2006-02-09 17:21:53 -0800257 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800258 /* Physical mapping, no locked TLB entry for TSB. */
259 tsb_reg |= tsb_paddr;
David S. Miller98c55842006-01-31 18:31:20 -0800260
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800261 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
262 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
263 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
David S. Miller517af332006-02-01 15:55:21 -0800264 } else {
265 tsb_reg |= base;
266 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
267 tte |= (tsb_paddr & ~(page_sz - 1UL));
268
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800269 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
270 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
271 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
David S. Miller517af332006-02-01 15:55:21 -0800272 }
273
David S. Miller618e9ed2006-02-09 17:21:53 -0800274 /* Setup the Hypervisor TSB descriptor. */
275 if (tlb_type == hypervisor) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800276 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
David S. Miller618e9ed2006-02-09 17:21:53 -0800277
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800278 switch (tsb_idx) {
279 case MM_TSB_BASE:
280 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
281 break;
David Miller9e695d22012-10-08 16:34:29 -0700282#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800283 case MM_TSB_HUGE:
284 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
285 break;
286#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800287 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800288 BUG();
Joe Perches6cb79b32011-06-03 14:45:23 +0000289 }
David S. Miller618e9ed2006-02-09 17:21:53 -0800290 hp->assoc = 1;
291 hp->num_ttes = tsb_bytes / 16;
292 hp->ctx_idx = 0;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800293 switch (tsb_idx) {
294 case MM_TSB_BASE:
295 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
296 break;
David Miller9e695d22012-10-08 16:34:29 -0700297#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800298 case MM_TSB_HUGE:
299 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
300 break;
301#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800302 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800303 BUG();
Joe Perches6cb79b32011-06-03 14:45:23 +0000304 }
David S. Miller618e9ed2006-02-09 17:21:53 -0800305 hp->tsb_base = tsb_paddr;
306 hp->resv = 0;
307 }
David S. Miller98c55842006-01-31 18:31:20 -0800308}
309
David S. Miller4dedbf82011-07-25 17:12:20 -0700310struct kmem_cache *pgtable_cache __read_mostly;
311
Christoph Lametere18b8902006-12-06 20:33:20 -0800312static struct kmem_cache *tsb_caches[8] __read_mostly;
David S. Miller9b4006d2006-03-18 18:12:42 -0800313
314static const char *tsb_cache_names[8] = {
315 "tsb_8KB",
316 "tsb_16KB",
317 "tsb_32KB",
318 "tsb_64KB",
319 "tsb_128KB",
320 "tsb_256KB",
321 "tsb_512KB",
322 "tsb_1MB",
323};
324
David Miller3a2cba92007-05-06 14:49:51 -0700325void __init pgtable_cache_init(void)
David S. Miller9b4006d2006-03-18 18:12:42 -0800326{
327 unsigned long i;
328
David S. Miller4dedbf82011-07-25 17:12:20 -0700329 pgtable_cache = kmem_cache_create("pgtable_cache",
330 PAGE_SIZE, PAGE_SIZE,
331 0,
332 _clear_page);
333 if (!pgtable_cache) {
334 prom_printf("pgtable_cache_init(): Could not create!\n");
335 prom_halt();
336 }
337
Doug Wilson151b6282014-03-07 16:29:03 +0530338 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
David S. Miller9b4006d2006-03-18 18:12:42 -0800339 unsigned long size = 8192 << i;
340 const char *name = tsb_cache_names[i];
341
342 tsb_caches[i] = kmem_cache_create(name,
343 size, size,
Paul Mundt20c2df82007-07-20 10:11:58 +0900344 0, NULL);
David S. Miller9b4006d2006-03-18 18:12:42 -0800345 if (!tsb_caches[i]) {
346 prom_printf("Could not create %s cache\n", name);
347 prom_halt();
348 }
349 }
350}
351
David S. Miller08714202008-11-16 23:49:24 -0800352int sysctl_tsb_ratio = -2;
353
354static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
355{
356 unsigned long num_ents = (new_size / sizeof(struct tsb));
357
358 if (sysctl_tsb_ratio < 0)
359 return num_ents - (num_ents >> -sysctl_tsb_ratio);
360 else
361 return num_ents + (num_ents >> sysctl_tsb_ratio);
362}
363
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800364/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
365 * do_sparc64_fault() invokes this routine to try and grow it.
David S. Miller7a1ac522006-03-16 02:02:32 -0800366 *
David S. Millerbd407912006-01-31 18:31:38 -0800367 * When we reach the maximum TSB size supported, we stick ~0UL into
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800368 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
David S. Millerbd407912006-01-31 18:31:38 -0800369 * will not trigger any longer.
370 *
371 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
372 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
David S. Millerb52439c2006-03-17 23:40:47 -0800373 * must be 512K aligned. It also must be physically contiguous, so we
374 * cannot use vmalloc().
David S. Millerbd407912006-01-31 18:31:38 -0800375 *
376 * The idea here is to grow the TSB when the RSS of the process approaches
377 * the number of entries that the current TSB can hold at once. Currently,
378 * we trigger when the RSS hits 3/4 of the TSB capacity.
379 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800380void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
David S. Millerbd407912006-01-31 18:31:38 -0800381{
382 unsigned long max_tsb_size = 1 * 1024 * 1024;
David S. Miller9b4006d2006-03-18 18:12:42 -0800383 unsigned long new_size, old_size, flags;
David S. Miller7a1ac522006-03-16 02:02:32 -0800384 struct tsb *old_tsb, *new_tsb;
David S. Miller9b4006d2006-03-18 18:12:42 -0800385 unsigned long new_cache_index, old_cache_index;
386 unsigned long new_rss_limit;
David S. Millerb52439c2006-03-17 23:40:47 -0800387 gfp_t gfp_flags;
David S. Millerbd407912006-01-31 18:31:38 -0800388
389 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
390 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
391
David S. Miller9b4006d2006-03-18 18:12:42 -0800392 new_cache_index = 0;
393 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
David S. Miller08714202008-11-16 23:49:24 -0800394 new_rss_limit = tsb_size_to_rss_limit(new_size);
395 if (new_rss_limit > rss)
David S. Millerbd407912006-01-31 18:31:38 -0800396 break;
David S. Miller9b4006d2006-03-18 18:12:42 -0800397 new_cache_index++;
David S. Millerbd407912006-01-31 18:31:38 -0800398 }
399
David S. Miller9b4006d2006-03-18 18:12:42 -0800400 if (new_size == max_tsb_size)
David S. Millerb52439c2006-03-17 23:40:47 -0800401 new_rss_limit = ~0UL;
David S. Millerb52439c2006-03-17 23:40:47 -0800402
David S. Miller9b4006d2006-03-18 18:12:42 -0800403retry_tsb_alloc:
David S. Millerb52439c2006-03-17 23:40:47 -0800404 gfp_flags = GFP_KERNEL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800405 if (new_size > (PAGE_SIZE * 2))
David S. Millera55ee1f2013-02-19 12:56:18 -0800406 gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
David S. Millerb52439c2006-03-17 23:40:47 -0800407
David S. Miller1f261ef2008-03-19 04:53:58 -0700408 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
409 gfp_flags, numa_node_id());
David S. Miller9b4006d2006-03-18 18:12:42 -0800410 if (unlikely(!new_tsb)) {
David S. Millerb52439c2006-03-17 23:40:47 -0800411 /* Not being able to fork due to a high-order TSB
412 * allocation failure is very bad behavior. Just back
413 * down to a 0-order allocation and force no TSB
414 * growing for this address space.
415 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800416 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
417 new_cache_index > 0) {
David S. Miller9b4006d2006-03-18 18:12:42 -0800418 new_cache_index = 0;
419 new_size = 8192;
David S. Millerb52439c2006-03-17 23:40:47 -0800420 new_rss_limit = ~0UL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800421 goto retry_tsb_alloc;
David S. Millerb52439c2006-03-17 23:40:47 -0800422 }
423
424 /* If we failed on a TSB grow, we are under serious
425 * memory pressure so don't try to grow any more.
426 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800427 if (mm->context.tsb_block[tsb_index].tsb != NULL)
428 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
David S. Millerbd407912006-01-31 18:31:38 -0800429 return;
David S. Millerb52439c2006-03-17 23:40:47 -0800430 }
David S. Millerbd407912006-01-31 18:31:38 -0800431
David S. Miller8b234272006-02-17 18:01:02 -0800432 /* Mark all tags as invalid. */
David S. Millerbb8646d2006-03-18 23:55:11 -0800433 tsb_init(new_tsb, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800434
435 /* Ok, we are about to commit the changes. If we are
436 * growing an existing TSB the locking is very tricky,
437 * so WATCH OUT!
438 *
439 * We have to hold mm->context.lock while committing to the
440 * new TSB, this synchronizes us with processors in
441 * flush_tsb_user() and switch_mm() for this address space.
442 *
443 * But even with that lock held, processors run asynchronously
444 * accessing the old TSB via TLB miss handling. This is OK
445 * because those actions are just propagating state from the
446 * Linux page tables into the TSB, page table mappings are not
447 * being changed. If a real fault occurs, the processor will
448 * synchronize with us when it hits flush_tsb_user(), this is
449 * also true for the case where vmscan is modifying the page
450 * tables. The only thing we need to be careful with is to
451 * skip any locked TSB entries during copy_tsb().
452 *
453 * When we finish committing to the new TSB, we have to drop
454 * the lock and ask all other cpus running this address space
455 * to run tsb_context_switch() to see the new TSB table.
456 */
457 spin_lock_irqsave(&mm->context.lock, flags);
458
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800459 old_tsb = mm->context.tsb_block[tsb_index].tsb;
460 old_cache_index =
461 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
462 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
463 sizeof(struct tsb));
David S. Miller7a1ac522006-03-16 02:02:32 -0800464
David S. Miller9b4006d2006-03-18 18:12:42 -0800465
David S. Miller7a1ac522006-03-16 02:02:32 -0800466 /* Handle multiple threads trying to grow the TSB at the same time.
467 * One will get in here first, and bump the size and the RSS limit.
468 * The others will get in here next and hit this check.
469 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800470 if (unlikely(old_tsb &&
471 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800472 spin_unlock_irqrestore(&mm->context.lock, flags);
473
David S. Miller9b4006d2006-03-18 18:12:42 -0800474 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
David S. Miller7a1ac522006-03-16 02:02:32 -0800475 return;
476 }
David S. Miller8b234272006-02-17 18:01:02 -0800477
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800478 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
David S. Millerbd407912006-01-31 18:31:38 -0800479
David S. Miller7a1ac522006-03-16 02:02:32 -0800480 if (old_tsb) {
481 extern void copy_tsb(unsigned long old_tsb_base,
482 unsigned long old_tsb_size,
483 unsigned long new_tsb_base,
484 unsigned long new_tsb_size);
485 unsigned long old_tsb_base = (unsigned long) old_tsb;
486 unsigned long new_tsb_base = (unsigned long) new_tsb;
David S. Millerbd407912006-01-31 18:31:38 -0800487
David S. Miller7a1ac522006-03-16 02:02:32 -0800488 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
489 old_tsb_base = __pa(old_tsb_base);
490 new_tsb_base = __pa(new_tsb_base);
491 }
David S. Miller9b4006d2006-03-18 18:12:42 -0800492 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800493 }
David S. Millerbd407912006-01-31 18:31:38 -0800494
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800495 mm->context.tsb_block[tsb_index].tsb = new_tsb;
496 setup_tsb_params(mm, tsb_index, new_size);
David S. Millerbd407912006-01-31 18:31:38 -0800497
David S. Miller7a1ac522006-03-16 02:02:32 -0800498 spin_unlock_irqrestore(&mm->context.lock, flags);
499
David S. Millerbd407912006-01-31 18:31:38 -0800500 /* If old_tsb is NULL, we're being invoked for the first time
501 * from init_new_context().
502 */
503 if (old_tsb) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800504 /* Reload it on the local cpu. */
David S. Millerbd407912006-01-31 18:31:38 -0800505 tsb_context_switch(mm);
506
David S. Miller7a1ac522006-03-16 02:02:32 -0800507 /* Now force other processors to do the same. */
David S. Millera3cf5e62008-08-03 00:01:05 -0700508 preempt_disable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800509 smp_tsb_sync(mm);
David S. Millera3cf5e62008-08-03 00:01:05 -0700510 preempt_enable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800511
512 /* Now it is safe to free the old tsb. */
David S. Miller9b4006d2006-03-18 18:12:42 -0800513 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
David S. Millerbd407912006-01-31 18:31:38 -0800514 }
515}
516
David S. Miller09f94282006-01-31 18:31:06 -0800517int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
518{
Mike Kravetz1e953d82016-08-31 13:48:19 -0700519 unsigned long mm_rss = get_mm_rss(mm);
David Miller9e695d22012-10-08 16:34:29 -0700520#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Mike Kravetz1e953d82016-08-31 13:48:19 -0700521 unsigned long saved_hugetlb_pte_count;
522 unsigned long saved_thp_pte_count;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800523#endif
524 unsigned int i;
525
David S. Millera77754b2006-03-06 19:59:50 -0800526 spin_lock_init(&mm->context.lock);
David S. Miller09f94282006-01-31 18:31:06 -0800527
528 mm->context.sparc64_ctx_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800529
David Miller9e695d22012-10-08 16:34:29 -0700530#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Mike Kravetzaf1b1a92016-07-15 13:08:42 -0700531 /* We reset them to zero because the fork() page copying
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800532 * will re-increment the counters as the parent PTEs are
533 * copied into the child address space.
534 */
Mike Kravetz1e953d82016-08-31 13:48:19 -0700535 saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
536 saved_thp_pte_count = mm->context.thp_pte_count;
Mike Kravetzaf1b1a92016-07-15 13:08:42 -0700537 mm->context.hugetlb_pte_count = 0;
538 mm->context.thp_pte_count = 0;
Mike Kravetz1e953d82016-08-31 13:48:19 -0700539
540 mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800541#endif
542
David S. Millerbd407912006-01-31 18:31:38 -0800543 /* copy_mm() copies over the parent's mm_struct before calling
544 * us, so we need to zero out the TSB pointer or else tsb_grow()
545 * will be confused and think there is an older TSB to free up.
546 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800547 for (i = 0; i < MM_NUM_TSBS; i++)
548 mm->context.tsb_block[i].tsb = NULL;
David S. Miller7a1ac522006-03-16 02:02:32 -0800549
550 /* If this is fork, inherit the parent's TSB size. We would
551 * grow it to that size on the first page fault anyways.
552 */
Mike Kravetz1e953d82016-08-31 13:48:19 -0700553 tsb_grow(mm, MM_TSB_BASE, mm_rss);
David S. Millerbd407912006-01-31 18:31:38 -0800554
David Miller9e695d22012-10-08 16:34:29 -0700555#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Mike Kravetz1e953d82016-08-31 13:48:19 -0700556 if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
557 tsb_grow(mm, MM_TSB_HUGE,
558 (saved_hugetlb_pte_count + saved_thp_pte_count) *
559 REAL_HPAGE_PER_HPAGE);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800560#endif
561
562 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
David S. Millerbd407912006-01-31 18:31:38 -0800563 return -ENOMEM;
David S. Miller09f94282006-01-31 18:31:06 -0800564
565 return 0;
566}
567
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800568static void tsb_destroy_one(struct tsb_config *tp)
569{
570 unsigned long cache_index;
571
572 if (!tp->tsb)
573 return;
574 cache_index = tp->tsb_reg_val & 0x7UL;
575 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
576 tp->tsb = NULL;
577 tp->tsb_reg_val = 0UL;
578}
579
David S. Miller09f94282006-01-31 18:31:06 -0800580void destroy_context(struct mm_struct *mm)
581{
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800582 unsigned long flags, i;
David S. Millerbd407912006-01-31 18:31:38 -0800583
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800584 for (i = 0; i < MM_NUM_TSBS; i++)
585 tsb_destroy_one(&mm->context.tsb_block[i]);
David S. Miller09f94282006-01-31 18:31:06 -0800586
David S. Miller77b838f2006-02-23 21:40:15 -0800587 spin_lock_irqsave(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800588
589 if (CTX_VALID(mm->context)) {
590 unsigned long nr = CTX_NRBITS(mm->context);
591 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
592 }
593
David S. Miller77b838f2006-02-23 21:40:15 -0800594 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800595}