blob: db6bb2f97a2c62f9334c1e1cf8ab39460c15acf0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002/*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01009#include <linux/sysctl.h>
Heiko Carstens1caf1702017-06-13 14:46:18 +020010#include <linux/slab.h>
11#include <linux/mm.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010012#include <asm/mmu_context.h>
13#include <asm/pgalloc.h>
14#include <asm/gmap.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_PGSTE
19
20static int page_table_allocate_pgste_min = 0;
21static int page_table_allocate_pgste_max = 1;
22int page_table_allocate_pgste = 0;
23EXPORT_SYMBOL(page_table_allocate_pgste);
24
25static struct ctl_table page_table_sysctl[] = {
26 {
27 .procname = "allocate_pgste",
28 .data = &page_table_allocate_pgste,
29 .maxlen = sizeof(int),
30 .mode = S_IRUGO | S_IWUSR,
Vasily Gorbik5bedf8a2018-06-24 12:17:43 +020031 .proc_handler = proc_dointvec_minmax,
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010032 .extra1 = &page_table_allocate_pgste_min,
33 .extra2 = &page_table_allocate_pgste_max,
34 },
35 { }
36};
37
38static struct ctl_table page_table_sysctl_dir[] = {
39 {
40 .procname = "vm",
41 .maxlen = 0,
42 .mode = 0555,
43 .child = page_table_sysctl,
44 },
45 { }
46};
47
48static int __init page_table_register_sysctl(void)
49{
50 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
51}
52__initcall(page_table_register_sysctl);
53
54#endif /* CONFIG_PGSTE */
55
56unsigned long *crst_table_alloc(struct mm_struct *mm)
57{
58 struct page *page = alloc_pages(GFP_KERNEL, 2);
59
60 if (!page)
61 return NULL;
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +020062 arch_set_page_dat(page, 2);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010063 return (unsigned long *) page_to_phys(page);
64}
65
66void crst_table_free(struct mm_struct *mm, unsigned long *table)
67{
68 free_pages((unsigned long) table, 2);
69}
70
71static void __crst_table_upgrade(void *arg)
72{
73 struct mm_struct *mm = arg;
74
Martin Schwidefsky0aaba412017-08-22 12:08:22 +020075 if (current->active_mm == mm)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010076 set_user_asce(mm);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010077 __tlb_flush_local();
78}
79
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020080int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010081{
82 unsigned long *table, *pgd;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020083 int rc, notify;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010084
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020085 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
Martin Schwidefsky2fc48762017-08-31 13:18:22 +020086 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020087 rc = 0;
88 notify = 0;
89 while (mm->context.asce_limit < end) {
90 table = crst_table_alloc(mm);
91 if (!table) {
92 rc = -ENOMEM;
93 break;
94 }
95 spin_lock_bh(&mm->page_table_lock);
96 pgd = (unsigned long *) mm->pgd;
Heiko Carstensf1c11742017-07-05 07:37:27 +020097 if (mm->context.asce_limit == _REGION2_SIZE) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020098 crst_table_init(table, _REGION2_ENTRY_EMPTY);
99 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
100 mm->pgd = (pgd_t *) table;
Heiko Carstensf1c11742017-07-05 07:37:27 +0200101 mm->context.asce_limit = _REGION1_SIZE;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200102 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
Martin Schwidefskye12e4042018-10-15 11:09:16 +0200104 mm_inc_nr_puds(mm);
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
112 }
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
115 }
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100119}
120
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200121void crst_table_downgrade(struct mm_struct *mm)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100122{
123 pgd_t *pgd;
124
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200125 /* downgrade should only happen from 3 to 2 levels (compat only) */
Martin Schwidefsky2fc48762017-08-31 13:18:22 +0200126 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200127
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100128 if (current->active_mm == mm) {
129 clear_user_asce();
130 __tlb_flush_mm(mm);
131 }
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200132
133 pgd = mm->pgd;
Martin Schwidefsky814cedb2018-11-27 14:04:04 +0100134 mm_dec_nr_pmds(mm);
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200135 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Heiko Carstensf1c11742017-07-05 07:37:27 +0200136 mm->context.asce_limit = _REGION3_SIZE;
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
138 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200139 crst_table_free(mm, (unsigned long *) pgd);
140
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100141 if (current->active_mm == mm)
142 set_user_asce(mm);
143}
144
145static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
146{
147 unsigned int old, new;
148
149 do {
150 old = atomic_read(v);
151 new = old ^ bits;
152 } while (atomic_cmpxchg(v, old, new) != old);
153 return new;
154}
155
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100156#ifdef CONFIG_PGSTE
157
158struct page *page_table_alloc_pgste(struct mm_struct *mm)
159{
160 struct page *page;
Heiko Carstens41879ff2017-10-04 19:27:07 +0200161 u64 *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100162
Michal Hockofaee35a2017-03-07 16:48:40 +0100163 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100164 if (page) {
Heiko Carstens41879ff2017-10-04 19:27:07 +0200165 table = (u64 *)page_to_phys(page);
166 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
167 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100168 }
169 return page;
170}
171
172void page_table_free_pgste(struct page *page)
173{
174 __free_page(page);
175}
176
177#endif /* CONFIG_PGSTE */
178
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100179/*
180 * page table entry allocation/free routines.
181 */
182unsigned long *page_table_alloc(struct mm_struct *mm)
183{
184 unsigned long *table;
185 struct page *page;
186 unsigned int mask, bit;
187
188 /* Try to get a fragment of a 4K page as a 2K page table */
189 if (!mm_alloc_pgste(mm)) {
190 table = NULL;
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200191 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100192 if (!list_empty(&mm->context.pgtable_list)) {
193 page = list_first_entry(&mm->context.pgtable_list,
194 struct page, lru);
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700195 mask = atomic_read(&page->_refcount) >> 24;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100196 mask = (mask | (mask >> 4)) & 3;
197 if (mask != 3) {
198 table = (unsigned long *) page_to_phys(page);
199 bit = mask & 1; /* =1 -> second 2K */
200 if (bit)
201 table += PTRS_PER_PTE;
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700202 atomic_xor_bits(&page->_refcount,
203 1U << (bit + 24));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100204 list_del(&page->lru);
205 }
206 }
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200207 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100208 if (table)
209 return table;
210 }
211 /* Allocate a fresh page */
Michal Hocko10d58bf2016-06-24 14:49:17 -0700212 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100213 if (!page)
214 return NULL;
215 if (!pgtable_page_ctor(page)) {
216 __free_page(page);
217 return NULL;
218 }
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +0200219 arch_set_page_dat(page, 0);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100220 /* Initialize page table */
221 table = (unsigned long *) page_to_phys(page);
222 if (mm_alloc_pgste(mm)) {
223 /* Return 4K page table with PGSTEs */
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700224 atomic_xor_bits(&page->_refcount, 3 << 24);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200225 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
226 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100227 } else {
228 /* Return the first 2K fragment of the page */
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700229 atomic_xor_bits(&page->_refcount, 1 << 24);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200230 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200231 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100232 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200233 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100234 }
235 return table;
236}
237
238void page_table_free(struct mm_struct *mm, unsigned long *table)
239{
240 struct page *page;
241 unsigned int bit, mask;
242
243 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
244 if (!mm_alloc_pgste(mm)) {
245 /* Free 2K page table fragment of a 4K page */
246 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200247 spin_lock_bh(&mm->context.lock);
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700248 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
249 mask >>= 24;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100250 if (mask & 3)
251 list_add(&page->lru, &mm->context.pgtable_list);
252 else
253 list_del(&page->lru);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200254 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100255 if (mask != 0)
256 return;
Eric Farmandfa75862018-06-29 19:54:01 +0200257 } else {
258 atomic_xor_bits(&page->_refcount, 3U << 24);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100259 }
260
261 pgtable_page_dtor(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100262 __free_page(page);
263}
264
265void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
266 unsigned long vmaddr)
267{
268 struct mm_struct *mm;
269 struct page *page;
270 unsigned int bit, mask;
271
272 mm = tlb->mm;
273 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
274 if (mm_alloc_pgste(mm)) {
275 gmap_unlink(mm, table, vmaddr);
276 table = (unsigned long *) (__pa(table) | 3);
277 tlb_remove_table(tlb, table);
278 return;
279 }
280 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200281 spin_lock_bh(&mm->context.lock);
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700282 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
283 mask >>= 24;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100284 if (mask & 3)
285 list_add_tail(&page->lru, &mm->context.pgtable_list);
286 else
287 list_del(&page->lru);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200288 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100289 table = (unsigned long *) (__pa(table) | (1U << bit));
290 tlb_remove_table(tlb, table);
291}
292
293static void __tlb_remove_table(void *_table)
294{
295 unsigned int mask = (unsigned long) _table & 3;
296 void *table = (void *)((unsigned long) _table ^ mask);
297 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
298
299 switch (mask) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200300 case 0: /* pmd, pud, or p4d */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100301 free_pages((unsigned long) table, 2);
302 break;
303 case 1: /* lower 2K of a 4K page table */
304 case 2: /* higher 2K of a 4K page table */
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700305 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
306 mask >>= 24;
307 if (mask != 0)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100308 break;
309 /* fallthrough */
310 case 3: /* 4K page table with pgstes */
Eric Farmandfa75862018-06-29 19:54:01 +0200311 if (mask & 3)
312 atomic_xor_bits(&page->_refcount, 3 << 24);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100313 pgtable_page_dtor(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100314 __free_page(page);
315 break;
316 }
317}
318
319static void tlb_remove_table_smp_sync(void *arg)
320{
321 /* Simply deliver the interrupt */
322}
323
324static void tlb_remove_table_one(void *table)
325{
326 /*
327 * This isn't an RCU grace period and hence the page-tables cannot be
328 * assumed to be actually RCU-freed.
329 *
330 * It is however sufficient for software page-table walkers that rely
331 * on IRQ disabling. See the comment near struct mmu_table_batch.
332 */
333 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
334 __tlb_remove_table(table);
335}
336
337static void tlb_remove_table_rcu(struct rcu_head *head)
338{
339 struct mmu_table_batch *batch;
340 int i;
341
342 batch = container_of(head, struct mmu_table_batch, rcu);
343
344 for (i = 0; i < batch->nr; i++)
345 __tlb_remove_table(batch->tables[i]);
346
347 free_page((unsigned long)batch);
348}
349
350void tlb_table_flush(struct mmu_gather *tlb)
351{
352 struct mmu_table_batch **batch = &tlb->batch;
353
354 if (*batch) {
Paul E. McKenney0d4e68e2018-10-30 16:30:07 -0700355 call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100356 *batch = NULL;
357 }
358}
359
360void tlb_remove_table(struct mmu_gather *tlb, void *table)
361{
362 struct mmu_table_batch **batch = &tlb->batch;
363
364 tlb->mm->context.flush_mm = 1;
365 if (*batch == NULL) {
366 *batch = (struct mmu_table_batch *)
367 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
368 if (*batch == NULL) {
369 __tlb_flush_mm_lazy(tlb->mm);
370 tlb_remove_table_one(table);
371 return;
372 }
373 (*batch)->nr = 0;
374 }
375 (*batch)->tables[(*batch)->nr++] = table;
376 if ((*batch)->nr == MAX_TABLE_BATCH)
377 tlb_flush_mmu(tlb);
378}
Heiko Carstens1caf1702017-06-13 14:46:18 +0200379
380/*
381 * Base infrastructure required to generate basic asces, region, segment,
382 * and page tables that do not make use of enhanced features like EDAT1.
383 */
384
385static struct kmem_cache *base_pgt_cache;
386
387static unsigned long base_pgt_alloc(void)
388{
389 u64 *table;
390
391 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
392 if (table)
393 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
394 return (unsigned long) table;
395}
396
397static void base_pgt_free(unsigned long table)
398{
399 kmem_cache_free(base_pgt_cache, (void *) table);
400}
401
402static unsigned long base_crst_alloc(unsigned long val)
403{
404 unsigned long table;
405
406 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
407 if (table)
408 crst_table_init((unsigned long *)table, val);
409 return table;
410}
411
412static void base_crst_free(unsigned long table)
413{
414 free_pages(table, CRST_ALLOC_ORDER);
415}
416
417#define BASE_ADDR_END_FUNC(NAME, SIZE) \
418static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
419 unsigned long end) \
420{ \
421 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
422 \
423 return (next - 1) < (end - 1) ? next : end; \
424}
425
426BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
427BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
428BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
429BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
430BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
431
432static inline unsigned long base_lra(unsigned long address)
433{
434 unsigned long real;
435
436 asm volatile(
437 " lra %0,0(%1)\n"
438 : "=d" (real) : "a" (address) : "cc");
439 return real;
440}
441
442static int base_page_walk(unsigned long origin, unsigned long addr,
443 unsigned long end, int alloc)
444{
445 unsigned long *pte, next;
446
447 if (!alloc)
448 return 0;
449 pte = (unsigned long *) origin;
450 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
451 do {
452 next = base_page_addr_end(addr, end);
453 *pte = base_lra(addr);
454 } while (pte++, addr = next, addr < end);
455 return 0;
456}
457
458static int base_segment_walk(unsigned long origin, unsigned long addr,
459 unsigned long end, int alloc)
460{
461 unsigned long *ste, next, table;
462 int rc;
463
464 ste = (unsigned long *) origin;
465 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
466 do {
467 next = base_segment_addr_end(addr, end);
468 if (*ste & _SEGMENT_ENTRY_INVALID) {
469 if (!alloc)
470 continue;
471 table = base_pgt_alloc();
472 if (!table)
473 return -ENOMEM;
474 *ste = table | _SEGMENT_ENTRY;
475 }
476 table = *ste & _SEGMENT_ENTRY_ORIGIN;
477 rc = base_page_walk(table, addr, next, alloc);
478 if (rc)
479 return rc;
480 if (!alloc)
481 base_pgt_free(table);
482 cond_resched();
483 } while (ste++, addr = next, addr < end);
484 return 0;
485}
486
487static int base_region3_walk(unsigned long origin, unsigned long addr,
488 unsigned long end, int alloc)
489{
490 unsigned long *rtte, next, table;
491 int rc;
492
493 rtte = (unsigned long *) origin;
494 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
495 do {
496 next = base_region3_addr_end(addr, end);
497 if (*rtte & _REGION_ENTRY_INVALID) {
498 if (!alloc)
499 continue;
500 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
501 if (!table)
502 return -ENOMEM;
503 *rtte = table | _REGION3_ENTRY;
504 }
505 table = *rtte & _REGION_ENTRY_ORIGIN;
506 rc = base_segment_walk(table, addr, next, alloc);
507 if (rc)
508 return rc;
509 if (!alloc)
510 base_crst_free(table);
511 } while (rtte++, addr = next, addr < end);
512 return 0;
513}
514
515static int base_region2_walk(unsigned long origin, unsigned long addr,
516 unsigned long end, int alloc)
517{
518 unsigned long *rste, next, table;
519 int rc;
520
521 rste = (unsigned long *) origin;
522 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
523 do {
524 next = base_region2_addr_end(addr, end);
525 if (*rste & _REGION_ENTRY_INVALID) {
526 if (!alloc)
527 continue;
528 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
529 if (!table)
530 return -ENOMEM;
531 *rste = table | _REGION2_ENTRY;
532 }
533 table = *rste & _REGION_ENTRY_ORIGIN;
534 rc = base_region3_walk(table, addr, next, alloc);
535 if (rc)
536 return rc;
537 if (!alloc)
538 base_crst_free(table);
539 } while (rste++, addr = next, addr < end);
540 return 0;
541}
542
543static int base_region1_walk(unsigned long origin, unsigned long addr,
544 unsigned long end, int alloc)
545{
546 unsigned long *rfte, next, table;
547 int rc;
548
549 rfte = (unsigned long *) origin;
550 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
551 do {
552 next = base_region1_addr_end(addr, end);
553 if (*rfte & _REGION_ENTRY_INVALID) {
554 if (!alloc)
555 continue;
556 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
557 if (!table)
558 return -ENOMEM;
559 *rfte = table | _REGION1_ENTRY;
560 }
561 table = *rfte & _REGION_ENTRY_ORIGIN;
562 rc = base_region2_walk(table, addr, next, alloc);
563 if (rc)
564 return rc;
565 if (!alloc)
566 base_crst_free(table);
567 } while (rfte++, addr = next, addr < end);
568 return 0;
569}
570
571/**
572 * base_asce_free - free asce and tables returned from base_asce_alloc()
573 * @asce: asce to be freed
574 *
575 * Frees all region, segment, and page tables that were allocated with a
576 * corresponding base_asce_alloc() call.
577 */
578void base_asce_free(unsigned long asce)
579{
580 unsigned long table = asce & _ASCE_ORIGIN;
581
582 if (!asce)
583 return;
584 switch (asce & _ASCE_TYPE_MASK) {
585 case _ASCE_TYPE_SEGMENT:
586 base_segment_walk(table, 0, _REGION3_SIZE, 0);
587 break;
588 case _ASCE_TYPE_REGION3:
589 base_region3_walk(table, 0, _REGION2_SIZE, 0);
590 break;
591 case _ASCE_TYPE_REGION2:
592 base_region2_walk(table, 0, _REGION1_SIZE, 0);
593 break;
594 case _ASCE_TYPE_REGION1:
595 base_region1_walk(table, 0, -_PAGE_SIZE, 0);
596 break;
597 }
598 base_crst_free(table);
599}
600
601static int base_pgt_cache_init(void)
602{
603 static DEFINE_MUTEX(base_pgt_cache_mutex);
604 unsigned long sz = _PAGE_TABLE_SIZE;
605
606 if (base_pgt_cache)
607 return 0;
608 mutex_lock(&base_pgt_cache_mutex);
609 if (!base_pgt_cache)
610 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
611 mutex_unlock(&base_pgt_cache_mutex);
612 return base_pgt_cache ? 0 : -ENOMEM;
613}
614
615/**
616 * base_asce_alloc - create kernel mapping without enhanced DAT features
617 * @addr: virtual start address of kernel mapping
618 * @num_pages: number of consecutive pages
619 *
620 * Generate an asce, including all required region, segment and page tables,
621 * that can be used to access the virtual kernel mapping. The difference is
622 * that the returned asce does not make use of any enhanced DAT features like
623 * e.g. large pages. This is required for some I/O functions that pass an
624 * asce, like e.g. some service call requests.
625 *
626 * Note: the returned asce may NEVER be attached to any cpu. It may only be
627 * used for I/O requests. tlb entries that might result because the
628 * asce was attached to a cpu won't be cleared.
629 */
630unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
631{
632 unsigned long asce, table, end;
633 int rc;
634
635 if (base_pgt_cache_init())
636 return 0;
637 end = addr + num_pages * PAGE_SIZE;
638 if (end <= _REGION3_SIZE) {
639 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
640 if (!table)
641 return 0;
642 rc = base_segment_walk(table, addr, end, 1);
643 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
644 } else if (end <= _REGION2_SIZE) {
645 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
646 if (!table)
647 return 0;
648 rc = base_region3_walk(table, addr, end, 1);
649 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
650 } else if (end <= _REGION1_SIZE) {
651 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
652 if (!table)
653 return 0;
654 rc = base_region2_walk(table, addr, end, 1);
655 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
656 } else {
657 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
658 if (!table)
659 return 0;
660 rc = base_region1_walk(table, addr, end, 1);
661 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
662 }
663 if (rc) {
664 base_asce_free(asce);
665 asce = 0;
666 }
667 return asce;
668}