blob: fd35c1a0213b4802bc4996644bf8286818905f61 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002/*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01009#include <linux/sysctl.h>
Heiko Carstens1caf1702017-06-13 14:46:18 +020010#include <linux/slab.h>
11#include <linux/mm.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010012#include <asm/mmu_context.h>
13#include <asm/pgalloc.h>
14#include <asm/gmap.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_PGSTE
19
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010020int page_table_allocate_pgste = 0;
21EXPORT_SYMBOL(page_table_allocate_pgste);
22
23static struct ctl_table page_table_sysctl[] = {
24 {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
Vasily Gorbik5bedf8a2018-06-24 12:17:43 +020029 .proc_handler = proc_dointvec_minmax,
Vasily Gorbikac7a0fc2019-06-26 00:00:42 +020030 .extra1 = SYSCTL_ZERO,
31 .extra2 = SYSCTL_ONE,
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010032 },
33 { }
34};
35
36static struct ctl_table page_table_sysctl_dir[] = {
37 {
38 .procname = "vm",
39 .maxlen = 0,
40 .mode = 0555,
41 .child = page_table_sysctl,
42 },
43 { }
44};
45
46static int __init page_table_register_sysctl(void)
47{
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49}
50__initcall(page_table_register_sysctl);
51
52#endif /* CONFIG_PGSTE */
53
54unsigned long *crst_table_alloc(struct mm_struct *mm)
55{
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
57
58 if (!page)
59 return NULL;
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +020060 arch_set_page_dat(page, 2);
Alexander Gordeev2a444fd2021-02-12 07:43:18 +010061 return (unsigned long *) page_to_virt(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010062}
63
64void crst_table_free(struct mm_struct *mm, unsigned long *table)
65{
66 free_pages((unsigned long) table, 2);
67}
68
69static void __crst_table_upgrade(void *arg)
70{
71 struct mm_struct *mm = arg;
72
Heiko Carstens87d59862020-11-16 08:06:40 +010073 /* change all active ASCEs to avoid the creation of new TLBs */
Christian Borntraeger316ec152020-04-15 15:21:01 +020074 if (current->active_mm == mm) {
75 S390_lowcore.user_asce = mm->context.asce;
Heiko Carstens87d59862020-11-16 08:06:40 +010076 __ctl_load(S390_lowcore.user_asce, 7, 7);
Christian Borntraeger316ec152020-04-15 15:21:01 +020077 }
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010078 __tlb_flush_local();
79}
80
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020081int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010082{
Alexander Gordeev31932752020-03-08 21:34:49 +010083 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
84 unsigned long asce_limit = mm->context.asce_limit;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010085
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020086 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
Alexander Gordeev31932752020-03-08 21:34:49 +010087 VM_BUG_ON(asce_limit < _REGION2_SIZE);
88
89 if (end <= asce_limit)
90 return 0;
91
92 if (asce_limit == _REGION2_SIZE) {
93 p4d = crst_table_alloc(mm);
94 if (unlikely(!p4d))
95 goto err_p4d;
96 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020097 }
Alexander Gordeev31932752020-03-08 21:34:49 +010098 if (end > _REGION1_SIZE) {
99 pgd = crst_table_alloc(mm);
100 if (unlikely(!pgd))
101 goto err_pgd;
102 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
103 }
104
105 spin_lock_bh(&mm->page_table_lock);
106
107 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700108 * This routine gets called with mmap_lock lock held and there is
Alexander Gordeev31932752020-03-08 21:34:49 +0100109 * no reason to optimize for the case of otherwise. However, if
110 * that would ever change, the below check will let us know.
111 */
112 VM_BUG_ON(asce_limit != mm->context.asce_limit);
113
114 if (p4d) {
115 __pgd = (unsigned long *) mm->pgd;
116 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
117 mm->pgd = (pgd_t *) p4d;
118 mm->context.asce_limit = _REGION1_SIZE;
119 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
120 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
121 mm_inc_nr_puds(mm);
122 }
123 if (pgd) {
124 __pgd = (unsigned long *) mm->pgd;
125 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
126 mm->pgd = (pgd_t *) pgd;
Alexander Gordeevf7555602020-03-19 13:44:49 +0100127 mm->context.asce_limit = TASK_SIZE_MAX;
Alexander Gordeev31932752020-03-08 21:34:49 +0100128 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
129 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
130 }
131
132 spin_unlock_bh(&mm->page_table_lock);
133
134 on_each_cpu(__crst_table_upgrade, mm, 0);
135
136 return 0;
137
138err_pgd:
139 crst_table_free(mm, p4d);
140err_p4d:
141 return -ENOMEM;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100142}
143
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100144static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
145{
146 unsigned int old, new;
147
148 do {
149 old = atomic_read(v);
150 new = old ^ bits;
151 } while (atomic_cmpxchg(v, old, new) != old);
152 return new;
153}
154
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100155#ifdef CONFIG_PGSTE
156
157struct page *page_table_alloc_pgste(struct mm_struct *mm)
158{
159 struct page *page;
Heiko Carstens41879ff2017-10-04 19:27:07 +0200160 u64 *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100161
Michal Hockofaee35a2017-03-07 16:48:40 +0100162 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100163 if (page) {
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100164 table = (u64 *)page_to_virt(page);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200165 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
166 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100167 }
168 return page;
169}
170
171void page_table_free_pgste(struct page *page)
172{
173 __free_page(page);
174}
175
176#endif /* CONFIG_PGSTE */
177
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100178/*
Alexander Gordeev11943722021-11-04 07:14:45 +0100179 * A 2KB-pgtable is either upper or lower half of a normal page.
180 * The second half of the page may be unused or used as another
181 * 2KB-pgtable.
182 *
183 * Whenever possible the parent page for a new 2KB-pgtable is picked
184 * from the list of partially allocated pages mm_context_t::pgtable_list.
185 * In case the list is empty a new parent page is allocated and added to
186 * the list.
187 *
188 * When a parent page gets fully allocated it contains 2KB-pgtables in both
189 * upper and lower halves and is removed from mm_context_t::pgtable_list.
190 *
191 * When 2KB-pgtable is freed from to fully allocated parent page that
192 * page turns partially allocated and added to mm_context_t::pgtable_list.
193 *
194 * If 2KB-pgtable is freed from the partially allocated parent page that
195 * page turns unused and gets removed from mm_context_t::pgtable_list.
196 * Furthermore, the unused parent page is released.
197 *
198 * As follows from the above, no unallocated or fully allocated parent
199 * pages are contained in mm_context_t::pgtable_list.
200 *
201 * The upper byte (bits 24-31) of the parent page _refcount is used
202 * for tracking contained 2KB-pgtables and has the following format:
203 *
204 * PP AA
205 * 01234567 upper byte (bits 24-31) of struct page::_refcount
206 * || ||
207 * || |+--- upper 2KB-pgtable is allocated
208 * || +---- lower 2KB-pgtable is allocated
209 * |+------- upper 2KB-pgtable is pending for removal
210 * +-------- lower 2KB-pgtable is pending for removal
211 *
212 * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
213 * using _refcount is possible).
214 *
215 * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
216 * The parent page is either:
217 * - added to mm_context_t::pgtable_list in case the second half of the
218 * parent page is still unallocated;
219 * - removed from mm_context_t::pgtable_list in case both hales of the
220 * parent page are allocated;
221 * These operations are protected with mm_context_t::lock.
222 *
223 * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
224 * and the corresponding PP bit is set to 1 in a single atomic operation.
225 * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
226 * exclusive and may never be both set to 1!
227 * The parent page is either:
228 * - added to mm_context_t::pgtable_list in case the second half of the
229 * parent page is still allocated;
230 * - removed from mm_context_t::pgtable_list in case the second half of
231 * the parent page is unallocated;
232 * These operations are protected with mm_context_t::lock.
233 *
234 * It is important to understand that mm_context_t::lock only protects
235 * mm_context_t::pgtable_list and AA bits, but not the parent page itself
236 * and PP bits.
237 *
238 * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
239 * while both AA bits and the second PP bit are already unset. Then the
240 * parent page does not contain any 2KB-pgtable fragment anymore, and it has
241 * also been removed from mm_context_t::pgtable_list. It is safe to release
242 * the page therefore.
243 *
244 * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
245 * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
246 * while the PP bits are never used, nor such a page is added to or removed
247 * from mm_context_t::pgtable_list.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100248 */
249unsigned long *page_table_alloc(struct mm_struct *mm)
250{
251 unsigned long *table;
252 struct page *page;
253 unsigned int mask, bit;
254
255 /* Try to get a fragment of a 4K page as a 2K page table */
256 if (!mm_alloc_pgste(mm)) {
257 table = NULL;
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200258 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100259 if (!list_empty(&mm->context.pgtable_list)) {
260 page = list_first_entry(&mm->context.pgtable_list,
261 struct page, lru);
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700262 mask = atomic_read(&page->_refcount) >> 24;
Alexander Gordeev11943722021-11-04 07:14:45 +0100263 /*
264 * The pending removal bits must also be checked.
265 * Failure to do so might lead to an impossible
266 * value of (i.e 0x13 or 0x23) written to _refcount.
267 * Such values violate the assumption that pending and
268 * allocation bits are mutually exclusive, and the rest
269 * of the code unrails as result. That could lead to
270 * a whole bunch of races and corruptions.
271 */
272 mask = (mask | (mask >> 4)) & 0x03U;
273 if (mask != 0x03U) {
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100274 table = (unsigned long *) page_to_virt(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100275 bit = mask & 1; /* =1 -> second 2K */
276 if (bit)
277 table += PTRS_PER_PTE;
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700278 atomic_xor_bits(&page->_refcount,
Alexander Gordeev11943722021-11-04 07:14:45 +0100279 0x01U << (bit + 24));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100280 list_del(&page->lru);
281 }
282 }
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200283 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100284 if (table)
285 return table;
286 }
287 /* Allocate a fresh page */
Michal Hocko10d58bf2016-06-24 14:49:17 -0700288 page = alloc_page(GFP_KERNEL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100289 if (!page)
290 return NULL;
Mark Rutlandb4ed71f2019-09-25 16:49:46 -0700291 if (!pgtable_pte_page_ctor(page)) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100292 __free_page(page);
293 return NULL;
294 }
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +0200295 arch_set_page_dat(page, 0);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100296 /* Initialize page table */
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100297 table = (unsigned long *) page_to_virt(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100298 if (mm_alloc_pgste(mm)) {
299 /* Return 4K page table with PGSTEs */
Alexander Gordeev11943722021-11-04 07:14:45 +0100300 atomic_xor_bits(&page->_refcount, 0x03U << 24);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200301 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
302 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100303 } else {
304 /* Return the first 2K fragment of the page */
Alexander Gordeev11943722021-11-04 07:14:45 +0100305 atomic_xor_bits(&page->_refcount, 0x01U << 24);
Heiko Carstens41879ff2017-10-04 19:27:07 +0200306 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200307 spin_lock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100308 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200309 spin_unlock_bh(&mm->context.lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100310 }
311 return table;
312}
313
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100314static void page_table_release_check(struct page *page, void *table,
315 unsigned int half, unsigned int mask)
316{
317 char msg[128];
318
319 if (!IS_ENABLED(CONFIG_DEBUG_VM) || !mask)
320 return;
321 snprintf(msg, sizeof(msg),
322 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
323 table, half, mask);
324 dump_page(page, msg);
325}
326
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100327void page_table_free(struct mm_struct *mm, unsigned long *table)
328{
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100329 unsigned int mask, bit, half;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100330 struct page *page;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100331
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100332 page = virt_to_page(table);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100333 if (!mm_alloc_pgste(mm)) {
334 /* Free 2K page table fragment of a 4K page */
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100335 bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200336 spin_lock_bh(&mm->context.lock);
Alexander Gordeev11943722021-11-04 07:14:45 +0100337 /*
338 * Mark the page for delayed release. The actual release
339 * will happen outside of the critical section from this
340 * function or from __tlb_remove_table()
341 */
Alexander Gordeevc2c22492021-11-04 07:14:44 +0100342 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700343 mask >>= 24;
Alexander Gordeev11943722021-11-04 07:14:45 +0100344 if (mask & 0x03U)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100345 list_add(&page->lru, &mm->context.pgtable_list);
346 else
347 list_del(&page->lru);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200348 spin_unlock_bh(&mm->context.lock);
Alexander Gordeevc2c22492021-11-04 07:14:44 +0100349 mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
350 mask >>= 24;
Alexander Gordeev11943722021-11-04 07:14:45 +0100351 if (mask != 0x00U)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100352 return;
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100353 half = 0x01U << bit;
Eric Farmandfa75862018-06-29 19:54:01 +0200354 } else {
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100355 half = 0x03U;
356 mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
357 mask >>= 24;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100358 }
359
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100360 page_table_release_check(page, table, half, mask);
Mark Rutlandb4ed71f2019-09-25 16:49:46 -0700361 pgtable_pte_page_dtor(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100362 __free_page(page);
363}
364
365void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
366 unsigned long vmaddr)
367{
368 struct mm_struct *mm;
369 struct page *page;
370 unsigned int bit, mask;
371
372 mm = tlb->mm;
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100373 page = virt_to_page(table);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100374 if (mm_alloc_pgste(mm)) {
375 gmap_unlink(mm, table, vmaddr);
Alexander Gordeev11943722021-11-04 07:14:45 +0100376 table = (unsigned long *) ((unsigned long)table | 0x03U);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100377 tlb_remove_table(tlb, table);
378 return;
379 }
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100380 bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200381 spin_lock_bh(&mm->context.lock);
Alexander Gordeev11943722021-11-04 07:14:45 +0100382 /*
383 * Mark the page for delayed release. The actual release will happen
384 * outside of the critical section from __tlb_remove_table() or from
385 * page_table_free()
386 */
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700387 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
388 mask >>= 24;
Alexander Gordeev11943722021-11-04 07:14:45 +0100389 if (mask & 0x03U)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100390 list_add_tail(&page->lru, &mm->context.pgtable_list);
391 else
392 list_del(&page->lru);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200393 spin_unlock_bh(&mm->context.lock);
Alexander Gordeev11943722021-11-04 07:14:45 +0100394 table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100395 tlb_remove_table(tlb, table);
396}
397
Martin Schwidefsky9de7d832018-09-18 14:51:51 +0200398void __tlb_remove_table(void *_table)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100399{
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100400 unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100401 void *table = (void *)((unsigned long) _table ^ mask);
Alexander Gordeev2a444fd2021-02-12 07:43:18 +0100402 struct page *page = virt_to_page(table);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100403
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100404 switch (half) {
Alexander Gordeev11943722021-11-04 07:14:45 +0100405 case 0x00U: /* pmd, pud, or p4d */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100406 free_pages((unsigned long) table, 2);
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100407 return;
Alexander Gordeev11943722021-11-04 07:14:45 +0100408 case 0x01U: /* lower 2K of a 4K page table */
409 case 0x02U: /* higher 2K of a 4K page table */
Matthew Wilcox620b4e92018-06-07 17:08:15 -0700410 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
411 mask >>= 24;
Alexander Gordeev11943722021-11-04 07:14:45 +0100412 if (mask != 0x00U)
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100413 return;
414 break;
Alexander Gordeev11943722021-11-04 07:14:45 +0100415 case 0x03U: /* 4K page table with pgstes */
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100416 mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
417 mask >>= 24;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100418 break;
419 }
Alexander Gordeev4c88bb92021-11-04 07:14:46 +0100420
421 page_table_release_check(page, table, half, mask);
422 pgtable_pte_page_dtor(page);
423 __free_page(page);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100424}
425
Heiko Carstens1caf1702017-06-13 14:46:18 +0200426/*
427 * Base infrastructure required to generate basic asces, region, segment,
428 * and page tables that do not make use of enhanced features like EDAT1.
429 */
430
431static struct kmem_cache *base_pgt_cache;
432
Heiko Carstensda001fc2021-12-09 12:01:25 +0100433static unsigned long *base_pgt_alloc(void)
Heiko Carstens1caf1702017-06-13 14:46:18 +0200434{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100435 unsigned long *table;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200436
437 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
438 if (table)
Heiko Carstensda001fc2021-12-09 12:01:25 +0100439 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200440 return table;
441}
442
Heiko Carstensda001fc2021-12-09 12:01:25 +0100443static void base_pgt_free(unsigned long *table)
Heiko Carstens1caf1702017-06-13 14:46:18 +0200444{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100445 kmem_cache_free(base_pgt_cache, table);
446}
447
448static unsigned long *base_crst_alloc(unsigned long val)
449{
450 unsigned long *table;
451
452 table = (unsigned long *)__get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
453 if (table)
454 crst_table_init(table, val);
455 return table;
456}
457
458static void base_crst_free(unsigned long *table)
459{
460 free_pages((unsigned long)table, CRST_ALLOC_ORDER);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200461}
462
463#define BASE_ADDR_END_FUNC(NAME, SIZE) \
464static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
465 unsigned long end) \
466{ \
467 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
468 \
469 return (next - 1) < (end - 1) ? next : end; \
470}
471
472BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
473BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
474BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
475BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
476BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
477
478static inline unsigned long base_lra(unsigned long address)
479{
480 unsigned long real;
481
482 asm volatile(
483 " lra %0,0(%1)\n"
484 : "=d" (real) : "a" (address) : "cc");
485 return real;
486}
487
Heiko Carstensda001fc2021-12-09 12:01:25 +0100488static int base_page_walk(unsigned long *origin, unsigned long addr,
Heiko Carstens1caf1702017-06-13 14:46:18 +0200489 unsigned long end, int alloc)
490{
491 unsigned long *pte, next;
492
493 if (!alloc)
494 return 0;
Heiko Carstensda001fc2021-12-09 12:01:25 +0100495 pte = origin;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200496 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
497 do {
498 next = base_page_addr_end(addr, end);
499 *pte = base_lra(addr);
500 } while (pte++, addr = next, addr < end);
501 return 0;
502}
503
Heiko Carstensda001fc2021-12-09 12:01:25 +0100504static int base_segment_walk(unsigned long *origin, unsigned long addr,
Heiko Carstens1caf1702017-06-13 14:46:18 +0200505 unsigned long end, int alloc)
506{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100507 unsigned long *ste, next, *table;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200508 int rc;
509
Heiko Carstensda001fc2021-12-09 12:01:25 +0100510 ste = origin;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200511 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
512 do {
513 next = base_segment_addr_end(addr, end);
514 if (*ste & _SEGMENT_ENTRY_INVALID) {
515 if (!alloc)
516 continue;
517 table = base_pgt_alloc();
518 if (!table)
519 return -ENOMEM;
Heiko Carstens2f882802021-12-07 20:06:21 +0100520 *ste = __pa(table) | _SEGMENT_ENTRY;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200521 }
Heiko Carstensda001fc2021-12-09 12:01:25 +0100522 table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200523 rc = base_page_walk(table, addr, next, alloc);
524 if (rc)
525 return rc;
526 if (!alloc)
527 base_pgt_free(table);
528 cond_resched();
529 } while (ste++, addr = next, addr < end);
530 return 0;
531}
532
Heiko Carstensda001fc2021-12-09 12:01:25 +0100533static int base_region3_walk(unsigned long *origin, unsigned long addr,
Heiko Carstens1caf1702017-06-13 14:46:18 +0200534 unsigned long end, int alloc)
535{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100536 unsigned long *rtte, next, *table;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200537 int rc;
538
Heiko Carstensda001fc2021-12-09 12:01:25 +0100539 rtte = origin;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200540 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
541 do {
542 next = base_region3_addr_end(addr, end);
543 if (*rtte & _REGION_ENTRY_INVALID) {
544 if (!alloc)
545 continue;
546 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
547 if (!table)
548 return -ENOMEM;
Heiko Carstens2f882802021-12-07 20:06:21 +0100549 *rtte = __pa(table) | _REGION3_ENTRY;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200550 }
Heiko Carstensda001fc2021-12-09 12:01:25 +0100551 table = __va(*rtte & _REGION_ENTRY_ORIGIN);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200552 rc = base_segment_walk(table, addr, next, alloc);
553 if (rc)
554 return rc;
555 if (!alloc)
556 base_crst_free(table);
557 } while (rtte++, addr = next, addr < end);
558 return 0;
559}
560
Heiko Carstensda001fc2021-12-09 12:01:25 +0100561static int base_region2_walk(unsigned long *origin, unsigned long addr,
Heiko Carstens1caf1702017-06-13 14:46:18 +0200562 unsigned long end, int alloc)
563{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100564 unsigned long *rste, next, *table;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200565 int rc;
566
Heiko Carstensda001fc2021-12-09 12:01:25 +0100567 rste = origin;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200568 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
569 do {
570 next = base_region2_addr_end(addr, end);
571 if (*rste & _REGION_ENTRY_INVALID) {
572 if (!alloc)
573 continue;
574 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
575 if (!table)
576 return -ENOMEM;
Heiko Carstens2f882802021-12-07 20:06:21 +0100577 *rste = __pa(table) | _REGION2_ENTRY;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200578 }
Heiko Carstensda001fc2021-12-09 12:01:25 +0100579 table = __va(*rste & _REGION_ENTRY_ORIGIN);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200580 rc = base_region3_walk(table, addr, next, alloc);
581 if (rc)
582 return rc;
583 if (!alloc)
584 base_crst_free(table);
585 } while (rste++, addr = next, addr < end);
586 return 0;
587}
588
Heiko Carstensda001fc2021-12-09 12:01:25 +0100589static int base_region1_walk(unsigned long *origin, unsigned long addr,
Heiko Carstens1caf1702017-06-13 14:46:18 +0200590 unsigned long end, int alloc)
591{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100592 unsigned long *rfte, next, *table;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200593 int rc;
594
Heiko Carstensda001fc2021-12-09 12:01:25 +0100595 rfte = origin;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200596 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
597 do {
598 next = base_region1_addr_end(addr, end);
599 if (*rfte & _REGION_ENTRY_INVALID) {
600 if (!alloc)
601 continue;
602 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
603 if (!table)
604 return -ENOMEM;
Heiko Carstens2f882802021-12-07 20:06:21 +0100605 *rfte = __pa(table) | _REGION1_ENTRY;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200606 }
Heiko Carstensda001fc2021-12-09 12:01:25 +0100607 table = __va(*rfte & _REGION_ENTRY_ORIGIN);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200608 rc = base_region2_walk(table, addr, next, alloc);
609 if (rc)
610 return rc;
611 if (!alloc)
612 base_crst_free(table);
613 } while (rfte++, addr = next, addr < end);
614 return 0;
615}
616
617/**
618 * base_asce_free - free asce and tables returned from base_asce_alloc()
619 * @asce: asce to be freed
620 *
621 * Frees all region, segment, and page tables that were allocated with a
622 * corresponding base_asce_alloc() call.
623 */
624void base_asce_free(unsigned long asce)
625{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100626 unsigned long *table = __va(asce & _ASCE_ORIGIN);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200627
628 if (!asce)
629 return;
630 switch (asce & _ASCE_TYPE_MASK) {
631 case _ASCE_TYPE_SEGMENT:
632 base_segment_walk(table, 0, _REGION3_SIZE, 0);
633 break;
634 case _ASCE_TYPE_REGION3:
635 base_region3_walk(table, 0, _REGION2_SIZE, 0);
636 break;
637 case _ASCE_TYPE_REGION2:
638 base_region2_walk(table, 0, _REGION1_SIZE, 0);
639 break;
640 case _ASCE_TYPE_REGION1:
Alexander Gordeevf7555602020-03-19 13:44:49 +0100641 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
Heiko Carstens1caf1702017-06-13 14:46:18 +0200642 break;
643 }
644 base_crst_free(table);
645}
646
647static int base_pgt_cache_init(void)
648{
649 static DEFINE_MUTEX(base_pgt_cache_mutex);
650 unsigned long sz = _PAGE_TABLE_SIZE;
651
652 if (base_pgt_cache)
653 return 0;
654 mutex_lock(&base_pgt_cache_mutex);
655 if (!base_pgt_cache)
656 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
657 mutex_unlock(&base_pgt_cache_mutex);
658 return base_pgt_cache ? 0 : -ENOMEM;
659}
660
661/**
662 * base_asce_alloc - create kernel mapping without enhanced DAT features
663 * @addr: virtual start address of kernel mapping
664 * @num_pages: number of consecutive pages
665 *
666 * Generate an asce, including all required region, segment and page tables,
667 * that can be used to access the virtual kernel mapping. The difference is
668 * that the returned asce does not make use of any enhanced DAT features like
669 * e.g. large pages. This is required for some I/O functions that pass an
670 * asce, like e.g. some service call requests.
671 *
672 * Note: the returned asce may NEVER be attached to any cpu. It may only be
673 * used for I/O requests. tlb entries that might result because the
674 * asce was attached to a cpu won't be cleared.
675 */
676unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
677{
Heiko Carstensda001fc2021-12-09 12:01:25 +0100678 unsigned long asce, *table, end;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200679 int rc;
680
681 if (base_pgt_cache_init())
682 return 0;
683 end = addr + num_pages * PAGE_SIZE;
684 if (end <= _REGION3_SIZE) {
685 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
686 if (!table)
687 return 0;
688 rc = base_segment_walk(table, addr, end, 1);
Heiko Carstens2f882802021-12-07 20:06:21 +0100689 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200690 } else if (end <= _REGION2_SIZE) {
691 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
692 if (!table)
693 return 0;
694 rc = base_region3_walk(table, addr, end, 1);
Heiko Carstens2f882802021-12-07 20:06:21 +0100695 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200696 } else if (end <= _REGION1_SIZE) {
697 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
698 if (!table)
699 return 0;
700 rc = base_region2_walk(table, addr, end, 1);
Heiko Carstens2f882802021-12-07 20:06:21 +0100701 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200702 } else {
703 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
704 if (!table)
705 return 0;
706 rc = base_region1_walk(table, addr, end, 1);
Heiko Carstens2f882802021-12-07 20:06:21 +0100707 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
Heiko Carstens1caf1702017-06-13 14:46:18 +0200708 }
709 if (rc) {
710 base_asce_free(asce);
711 asce = 0;
712 }
713 return asce;
714}