blob: f69ff3c13496582dbb212e4b3b384478dc5db866 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstens239a64252009-06-12 10:26:33 +02002 * Copyright IBM Corp. 2007,2009
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010026#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020027
28#ifndef CONFIG_64BIT
29#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020030#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020031#else
32#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020033#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020034#endif
35
Heiko Carstens239a64252009-06-12 10:26:33 +020036unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37EXPORT_SYMBOL(VMALLOC_START);
38
39static int __init parse_vmalloc(char *arg)
40{
41 if (!arg)
42 return -EINVAL;
43 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
44 return 0;
45}
46early_param("vmalloc", parse_vmalloc);
47
Martin Schwidefsky043d0702011-05-23 10:24:23 +020048unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020049{
50 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
51
52 if (!page)
53 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020054 return (unsigned long *) page_to_phys(page);
55}
56
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010057void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020058{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020059 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020060}
61
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062#ifdef CONFIG_64BIT
63int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
64{
65 unsigned long *table, *pgd;
66 unsigned long entry;
67
68 BUG_ON(limit > (1UL << 53));
69repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020070 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010071 if (!table)
72 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020073 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010074 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS |
81 _ASCE_TYPE_REGION3;
82 } else {
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 _ASCE_USER_BITS |
87 _ASCE_TYPE_REGION2;
88 }
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010092 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010093 table = NULL;
94 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020095 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010096 if (table)
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
99 goto repeat;
100 update_mm(mm, current);
101 return 0;
102}
103
104void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
105{
106 pgd_t *pgd;
107
108 if (mm->context.asce_limit <= limit)
109 return;
110 __tlb_flush_mm(mm);
111 while (mm->context.asce_limit > limit) {
112 pgd = mm->pgd;
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_REGION3;
119 break;
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS |
124 _ASCE_TYPE_SEGMENT;
125 break;
126 default:
127 BUG();
128 }
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100130 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100131 crst_table_free(mm, (unsigned long *) pgd);
132 }
133 update_mm(mm, current);
134}
135#endif
136
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
146{
147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
150
151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152 if (!gmap)
153 goto out;
154 INIT_LIST_HEAD(&gmap->crst_list);
155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200165 list_add(&gmap->list, &mm->context.gmap_list);
166 return gmap;
167
168out_free:
169 kfree(gmap);
170out:
171 return NULL;
172}
173EXPORT_SYMBOL_GPL(gmap_alloc);
174
175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176{
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
179 struct page *page;
180
181 if (*table & _SEGMENT_ENTRY_INV)
182 return 0;
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
187 continue;
188 list_del(&rmap->list);
189 kfree(rmap);
190 break;
191 }
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 return 1;
194}
195
196static void gmap_flush_tlb(struct gmap *gmap)
197{
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
200 _ASCE_TYPE_REGION1);
201 else
202 __tlb_flush_global();
203}
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
208 */
209void gmap_free(struct gmap *gmap)
210{
211 struct page *page, *next;
212 unsigned long *table;
213 int i;
214
215
216 /* Flush tlb. */
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
219 _ASCE_TYPE_REGION1);
220 else
221 __tlb_flush_global();
222
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
225 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226 table = (unsigned long *) page_to_phys(page);
227 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
228 /* Remove gmap rmap structures for segment table. */
229 for (i = 0; i < PTRS_PER_PMD; i++, table++)
230 gmap_unlink_segment(gmap, table);
231 __free_pages(page, ALLOC_ORDER);
232 }
233 up_read(&gmap->mm->mmap_sem);
234 list_del(&gmap->list);
235 kfree(gmap);
236}
237EXPORT_SYMBOL_GPL(gmap_free);
238
239/**
240 * gmap_enable - switch primary space to the guest address space
241 * @gmap: pointer to the guest address space structure
242 */
243void gmap_enable(struct gmap *gmap)
244{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200245 S390_lowcore.gmap = (unsigned long) gmap;
246}
247EXPORT_SYMBOL_GPL(gmap_enable);
248
249/**
250 * gmap_disable - switch back to the standard primary address space
251 * @gmap: pointer to the guest address space structure
252 */
253void gmap_disable(struct gmap *gmap)
254{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200255 S390_lowcore.gmap = 0UL;
256}
257EXPORT_SYMBOL_GPL(gmap_disable);
258
259static int gmap_alloc_table(struct gmap *gmap,
260 unsigned long *table, unsigned long init)
261{
262 struct page *page;
263 unsigned long *new;
264
265 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
266 if (!page)
267 return -ENOMEM;
268 new = (unsigned long *) page_to_phys(page);
269 crst_table_init(new, init);
270 down_read(&gmap->mm->mmap_sem);
271 if (*table & _REGION_ENTRY_INV) {
272 list_add(&page->lru, &gmap->crst_list);
273 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
274 (*table & _REGION_ENTRY_TYPE_MASK);
275 } else
276 __free_pages(page, ALLOC_ORDER);
277 up_read(&gmap->mm->mmap_sem);
278 return 0;
279}
280
281/**
282 * gmap_unmap_segment - unmap segment from the guest address space
283 * @gmap: pointer to the guest address space structure
284 * @addr: address in the guest address space
285 * @len: length of the memory area to unmap
286 *
287 * Returns 0 if the unmap succeded, -EINVAL if not.
288 */
289int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
290{
291 unsigned long *table;
292 unsigned long off;
293 int flush;
294
295 if ((to | len) & (PMD_SIZE - 1))
296 return -EINVAL;
297 if (len == 0 || to + len < to)
298 return -EINVAL;
299
300 flush = 0;
301 down_read(&gmap->mm->mmap_sem);
302 for (off = 0; off < len; off += PMD_SIZE) {
303 /* Walk the guest addr space page table */
304 table = gmap->table + (((to + off) >> 53) & 0x7ff);
305 if (*table & _REGION_ENTRY_INV)
306 return 0;
307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
308 table = table + (((to + off) >> 42) & 0x7ff);
309 if (*table & _REGION_ENTRY_INV)
310 return 0;
311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 table = table + (((to + off) >> 31) & 0x7ff);
313 if (*table & _REGION_ENTRY_INV)
314 return 0;
315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 table = table + (((to + off) >> 20) & 0x7ff);
317
318 /* Clear segment table entry in guest address space. */
319 flush |= gmap_unlink_segment(gmap, table);
320 *table = _SEGMENT_ENTRY_INV;
321 }
322 up_read(&gmap->mm->mmap_sem);
323 if (flush)
324 gmap_flush_tlb(gmap);
325 return 0;
326}
327EXPORT_SYMBOL_GPL(gmap_unmap_segment);
328
329/**
330 * gmap_mmap_segment - map a segment to the guest address space
331 * @gmap: pointer to the guest address space structure
332 * @from: source address in the parent address space
333 * @to: target address in the guest address space
334 *
335 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
336 */
337int gmap_map_segment(struct gmap *gmap, unsigned long from,
338 unsigned long to, unsigned long len)
339{
340 unsigned long *table;
341 unsigned long off;
342 int flush;
343
344 if ((from | to | len) & (PMD_SIZE - 1))
345 return -EINVAL;
346 if (len == 0 || from + len > PGDIR_SIZE ||
347 from + len < from || to + len < to)
348 return -EINVAL;
349
350 flush = 0;
351 down_read(&gmap->mm->mmap_sem);
352 for (off = 0; off < len; off += PMD_SIZE) {
353 /* Walk the gmap address space page table */
354 table = gmap->table + (((to + off) >> 53) & 0x7ff);
355 if ((*table & _REGION_ENTRY_INV) &&
356 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
357 goto out_unmap;
358 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
359 table = table + (((to + off) >> 42) & 0x7ff);
360 if ((*table & _REGION_ENTRY_INV) &&
361 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
362 goto out_unmap;
363 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
364 table = table + (((to + off) >> 31) & 0x7ff);
365 if ((*table & _REGION_ENTRY_INV) &&
366 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
367 goto out_unmap;
368 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
369 table = table + (((to + off) >> 20) & 0x7ff);
370
371 /* Store 'from' address in an invalid segment table entry. */
372 flush |= gmap_unlink_segment(gmap, table);
373 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
374 }
375 up_read(&gmap->mm->mmap_sem);
376 if (flush)
377 gmap_flush_tlb(gmap);
378 return 0;
379
380out_unmap:
381 up_read(&gmap->mm->mmap_sem);
382 gmap_unmap_segment(gmap, to, len);
383 return -ENOMEM;
384}
385EXPORT_SYMBOL_GPL(gmap_map_segment);
386
387unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
388{
389 unsigned long *table, vmaddr, segment;
390 struct mm_struct *mm;
391 struct gmap_pgtable *mp;
392 struct gmap_rmap *rmap;
393 struct vm_area_struct *vma;
394 struct page *page;
395 pgd_t *pgd;
396 pud_t *pud;
397 pmd_t *pmd;
398
399 current->thread.gmap_addr = address;
400 mm = gmap->mm;
401 /* Walk the gmap address space page table */
402 table = gmap->table + ((address >> 53) & 0x7ff);
403 if (unlikely(*table & _REGION_ENTRY_INV))
404 return -EFAULT;
405 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
406 table = table + ((address >> 42) & 0x7ff);
407 if (unlikely(*table & _REGION_ENTRY_INV))
408 return -EFAULT;
409 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
410 table = table + ((address >> 31) & 0x7ff);
411 if (unlikely(*table & _REGION_ENTRY_INV))
412 return -EFAULT;
413 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
414 table = table + ((address >> 20) & 0x7ff);
415
416 /* Convert the gmap address to an mm address. */
417 segment = *table;
418 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
419 page = pfn_to_page(segment >> PAGE_SHIFT);
420 mp = (struct gmap_pgtable *) page->index;
421 return mp->vmaddr | (address & ~PMD_MASK);
422 } else if (segment & _SEGMENT_ENTRY_RO) {
423 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
424 vma = find_vma(mm, vmaddr);
425 if (!vma || vma->vm_start > vmaddr)
426 return -EFAULT;
427
428 /* Walk the parent mm page table */
429 pgd = pgd_offset(mm, vmaddr);
430 pud = pud_alloc(mm, pgd, vmaddr);
431 if (!pud)
432 return -ENOMEM;
433 pmd = pmd_alloc(mm, pud, vmaddr);
434 if (!pmd)
435 return -ENOMEM;
436 if (!pmd_present(*pmd) &&
437 __pte_alloc(mm, vma, pmd, vmaddr))
438 return -ENOMEM;
439 /* pmd now points to a valid segment table entry. */
440 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
441 if (!rmap)
442 return -ENOMEM;
443 /* Link gmap segment table entry location to page table. */
444 page = pmd_page(*pmd);
445 mp = (struct gmap_pgtable *) page->index;
446 rmap->entry = table;
447 list_add(&rmap->list, &mp->mapper);
448 /* Set gmap segment table entry to page table. */
449 *table = pmd_val(*pmd) & PAGE_MASK;
450 return vmaddr | (address & ~PMD_MASK);
451 }
452 return -EFAULT;
453
454}
455EXPORT_SYMBOL_GPL(gmap_fault);
456
457void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
458{
459 struct gmap_rmap *rmap, *next;
460 struct gmap_pgtable *mp;
461 struct page *page;
462 int flush;
463
464 flush = 0;
465 spin_lock(&mm->page_table_lock);
466 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
467 mp = (struct gmap_pgtable *) page->index;
468 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
469 *rmap->entry =
470 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
471 list_del(&rmap->list);
472 kfree(rmap);
473 flush = 1;
474 }
475 spin_unlock(&mm->page_table_lock);
476 if (flush)
477 __tlb_flush_global();
478}
479
480static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
481 unsigned long vmaddr)
482{
483 struct page *page;
484 unsigned long *table;
485 struct gmap_pgtable *mp;
486
487 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
488 if (!page)
489 return NULL;
490 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
491 if (!mp) {
492 __free_page(page);
493 return NULL;
494 }
495 pgtable_page_ctor(page);
496 mp->vmaddr = vmaddr & PMD_MASK;
497 INIT_LIST_HEAD(&mp->mapper);
498 page->index = (unsigned long) mp;
499 atomic_set(&page->_mapcount, 3);
500 table = (unsigned long *) page_to_phys(page);
501 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
502 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
503 return table;
504}
505
506static inline void page_table_free_pgste(unsigned long *table)
507{
508 struct page *page;
509 struct gmap_pgtable *mp;
510
511 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
512 mp = (struct gmap_pgtable *) page->index;
513 BUG_ON(!list_empty(&mp->mapper));
514 pgtable_page_ctor(page);
515 atomic_set(&page->_mapcount, -1);
516 kfree(mp);
517 __free_page(page);
518}
519
520#else /* CONFIG_PGSTE */
521
522static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
523 unsigned long vmaddr)
524{
Jan Glauber944291d2011-08-03 16:44:18 +0200525 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200526}
527
528static inline void page_table_free_pgste(unsigned long *table)
529{
530}
531
532static inline void gmap_unmap_notifier(struct mm_struct *mm,
533 unsigned long *table)
534{
535}
536
537#endif /* CONFIG_PGSTE */
538
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200539static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
540{
541 unsigned int old, new;
542
543 do {
544 old = atomic_read(v);
545 new = old ^ bits;
546 } while (atomic_cmpxchg(v, old, new) != old);
547 return new;
548}
549
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200550/*
551 * page table entry allocation/free routines.
552 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200553unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200554{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100555 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200556 unsigned long *table;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200557 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200558
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200559 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200560 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200561 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200562 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200563 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100564 if (!list_empty(&mm->context.pgtable_list)) {
565 page = list_first_entry(&mm->context.pgtable_list,
566 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200567 table = (unsigned long *) page_to_phys(page);
568 mask = atomic_read(&page->_mapcount);
569 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200570 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200571 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200572 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100573 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
574 if (!page)
575 return NULL;
576 pgtable_page_ctor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200577 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100578 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200579 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200580 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100581 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200582 } else {
583 for (bit = 1; mask & bit; bit <<= 1)
584 table += PTRS_PER_PTE;
585 mask = atomic_xor_bits(&page->_mapcount, bit);
586 if ((mask & FRAG_MASK) == FRAG_MASK)
587 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100588 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200589 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200590 return table;
591}
592
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100593void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200594{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100595 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200596 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200597
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200598 if (mm_has_pgste(mm)) {
599 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200600 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200601 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200602 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100603 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200604 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200605 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200606 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100607 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200608 mask = atomic_xor_bits(&page->_mapcount, bit);
609 if (mask & FRAG_MASK)
610 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200611 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200612 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100613 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200614 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100615 __free_page(page);
616 }
617}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200618
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200619#ifdef CONFIG_HAVE_RCU_TABLE_FREE
Martin Schwidefsky80217142010-10-25 16:10:11 +0200620
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200621static void __page_table_free_rcu(void *table, unsigned bit)
622{
623 struct page *page;
624
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200625 if (bit == FRAG_MASK)
626 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200627 /* Free 1K/2K page table fragment of a 4K page */
628 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
629 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
630 pgtable_page_dtor(page);
631 atomic_set(&page->_mapcount, -1);
632 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200633 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200634}
635
636void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
637{
638 struct mm_struct *mm;
639 struct page *page;
640 unsigned int bit, mask;
641
642 mm = tlb->mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200643 if (mm_has_pgste(mm)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200644 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200645 table = (unsigned long *) (__pa(table) | FRAG_MASK);
646 tlb_remove_table(tlb, table);
647 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200648 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200649 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200650 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
651 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200652 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
653 list_del(&page->lru);
654 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
655 if (mask & FRAG_MASK)
656 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200657 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200658 table = (unsigned long *) (__pa(table) | (bit << 4));
659 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200660}
661
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200662void __tlb_remove_table(void *_table)
663{
664 void *table = (void *)((unsigned long) _table & PAGE_MASK);
665 unsigned type = (unsigned long) _table & ~PAGE_MASK;
666
667 if (type)
668 __page_table_free_rcu(table, type);
669 else
670 free_pages((unsigned long) table, ALLOC_ORDER);
671}
672
673#endif
674
Carsten Otte402b0862008-03-25 18:47:10 +0100675/*
676 * switch on pgstes for its userspace process (for kvm)
677 */
678int s390_enable_sie(void)
679{
680 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200681 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100682
Carsten Otte702d9e52009-03-26 15:23:57 +0100683 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100684 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100685 return -EINVAL;
686
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200687 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200688 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200689 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100690
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200691 /* lets check if we are allowed to replace the mm */
692 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100693 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200694#ifdef CONFIG_AIO
695 !hlist_empty(&tsk->mm->ioctx_list) ||
696#endif
697 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200698 task_unlock(tsk);
699 return -EINVAL;
700 }
701 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100702
Christian Borntraeger250cf772008-10-28 11:10:15 +0100703 /* we copy the mm and let dup_mm create the page tables with_pgstes */
704 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100705 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100706 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100707 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200708 return -ENOMEM;
709
Christian Borntraeger250cf772008-10-28 11:10:15 +0100710 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200711 task_lock(tsk);
712 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200713#ifdef CONFIG_AIO
714 !hlist_empty(&tsk->mm->ioctx_list) ||
715#endif
716 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200717 mmput(mm);
718 task_unlock(tsk);
719 return -EINVAL;
720 }
721
722 /* ok, we are alone. No ptrace, no threads, etc. */
723 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100724 tsk->mm = tsk->active_mm = mm;
725 preempt_disable();
726 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200727 atomic_inc(&mm->context.attach_count);
728 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100729 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100730 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100731 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200732 mmput(old_mm);
733 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100734}
735EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200736
Heiko Carstens87458ff2009-09-22 22:58:46 +0200737#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200738bool kernel_page_present(struct page *page)
739{
740 unsigned long addr;
741 int cc;
742
743 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200744 asm volatile(
745 " lra %1,0(%1)\n"
746 " ipm %0\n"
747 " srl %0,28"
748 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200749 return cc == 0;
750}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200751#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */