blob: dc2269f1821c8521fdfcc2933b360ab5a463d279 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Christian Borntraeger388186b2011-10-30 15:17:03 +01002 * Copyright IBM Corp. 2007,2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010026#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020027
28#ifndef CONFIG_64BIT
29#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020030#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020031#else
32#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020033#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020034#endif
35
Heiko Carstens239a64252009-06-12 10:26:33 +020036unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37EXPORT_SYMBOL(VMALLOC_START);
38
39static int __init parse_vmalloc(char *arg)
40{
41 if (!arg)
42 return -EINVAL;
43 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
44 return 0;
45}
46early_param("vmalloc", parse_vmalloc);
47
Martin Schwidefsky043d0702011-05-23 10:24:23 +020048unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020049{
50 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
51
52 if (!page)
53 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020054 return (unsigned long *) page_to_phys(page);
55}
56
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010057void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020058{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020059 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020060}
61
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062#ifdef CONFIG_64BIT
63int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
64{
65 unsigned long *table, *pgd;
66 unsigned long entry;
67
68 BUG_ON(limit > (1UL << 53));
69repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020070 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010071 if (!table)
72 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020073 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010074 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS |
81 _ASCE_TYPE_REGION3;
82 } else {
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 _ASCE_USER_BITS |
87 _ASCE_TYPE_REGION2;
88 }
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010092 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010093 table = NULL;
94 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020095 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010096 if (table)
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
99 goto repeat;
100 update_mm(mm, current);
101 return 0;
102}
103
104void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
105{
106 pgd_t *pgd;
107
108 if (mm->context.asce_limit <= limit)
109 return;
110 __tlb_flush_mm(mm);
111 while (mm->context.asce_limit > limit) {
112 pgd = mm->pgd;
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_REGION3;
119 break;
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS |
124 _ASCE_TYPE_SEGMENT;
125 break;
126 default:
127 BUG();
128 }
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100130 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100131 crst_table_free(mm, (unsigned long *) pgd);
132 }
133 update_mm(mm, current);
134}
135#endif
136
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
146{
147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
150
151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152 if (!gmap)
153 goto out;
154 INIT_LIST_HEAD(&gmap->crst_list);
155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200165 list_add(&gmap->list, &mm->context.gmap_list);
166 return gmap;
167
168out_free:
169 kfree(gmap);
170out:
171 return NULL;
172}
173EXPORT_SYMBOL_GPL(gmap_alloc);
174
175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176{
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
179 struct page *page;
180
181 if (*table & _SEGMENT_ENTRY_INV)
182 return 0;
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
187 continue;
188 list_del(&rmap->list);
189 kfree(rmap);
190 break;
191 }
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 return 1;
194}
195
196static void gmap_flush_tlb(struct gmap *gmap)
197{
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
200 _ASCE_TYPE_REGION1);
201 else
202 __tlb_flush_global();
203}
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
208 */
209void gmap_free(struct gmap *gmap)
210{
211 struct page *page, *next;
212 unsigned long *table;
213 int i;
214
215
216 /* Flush tlb. */
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
219 _ASCE_TYPE_REGION1);
220 else
221 __tlb_flush_global();
222
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100225 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
227 table = (unsigned long *) page_to_phys(page);
228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
229 /* Remove gmap rmap structures for segment table. */
230 for (i = 0; i < PTRS_PER_PMD; i++, table++)
231 gmap_unlink_segment(gmap, table);
232 __free_pages(page, ALLOC_ORDER);
233 }
Carsten Ottecc772452011-10-30 15:17:01 +0100234 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200235 up_read(&gmap->mm->mmap_sem);
236 list_del(&gmap->list);
237 kfree(gmap);
238}
239EXPORT_SYMBOL_GPL(gmap_free);
240
241/**
242 * gmap_enable - switch primary space to the guest address space
243 * @gmap: pointer to the guest address space structure
244 */
245void gmap_enable(struct gmap *gmap)
246{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200247 S390_lowcore.gmap = (unsigned long) gmap;
248}
249EXPORT_SYMBOL_GPL(gmap_enable);
250
251/**
252 * gmap_disable - switch back to the standard primary address space
253 * @gmap: pointer to the guest address space structure
254 */
255void gmap_disable(struct gmap *gmap)
256{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200257 S390_lowcore.gmap = 0UL;
258}
259EXPORT_SYMBOL_GPL(gmap_disable);
260
Carsten Ottea9162f232011-10-30 15:17:00 +0100261/*
262 * gmap_alloc_table is assumed to be called with mmap_sem held
263 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200264static int gmap_alloc_table(struct gmap *gmap,
265 unsigned long *table, unsigned long init)
266{
267 struct page *page;
268 unsigned long *new;
269
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100270 /* since we dont free the gmap table until gmap_free we can unlock */
271 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200272 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100273 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200274 if (!page)
275 return -ENOMEM;
276 new = (unsigned long *) page_to_phys(page);
277 crst_table_init(new, init);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200278 if (*table & _REGION_ENTRY_INV) {
279 list_add(&page->lru, &gmap->crst_list);
280 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
281 (*table & _REGION_ENTRY_TYPE_MASK);
282 } else
283 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200284 return 0;
285}
286
287/**
288 * gmap_unmap_segment - unmap segment from the guest address space
289 * @gmap: pointer to the guest address space structure
290 * @addr: address in the guest address space
291 * @len: length of the memory area to unmap
292 *
293 * Returns 0 if the unmap succeded, -EINVAL if not.
294 */
295int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
296{
297 unsigned long *table;
298 unsigned long off;
299 int flush;
300
301 if ((to | len) & (PMD_SIZE - 1))
302 return -EINVAL;
303 if (len == 0 || to + len < to)
304 return -EINVAL;
305
306 flush = 0;
307 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100308 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200309 for (off = 0; off < len; off += PMD_SIZE) {
310 /* Walk the guest addr space page table */
311 table = gmap->table + (((to + off) >> 53) & 0x7ff);
312 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200313 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200314 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
315 table = table + (((to + off) >> 42) & 0x7ff);
316 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200317 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200318 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
319 table = table + (((to + off) >> 31) & 0x7ff);
320 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200321 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200322 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
323 table = table + (((to + off) >> 20) & 0x7ff);
324
325 /* Clear segment table entry in guest address space. */
326 flush |= gmap_unlink_segment(gmap, table);
327 *table = _SEGMENT_ENTRY_INV;
328 }
Carsten Otte05873df2011-09-26 16:40:34 +0200329out:
Carsten Ottecc772452011-10-30 15:17:01 +0100330 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200331 up_read(&gmap->mm->mmap_sem);
332 if (flush)
333 gmap_flush_tlb(gmap);
334 return 0;
335}
336EXPORT_SYMBOL_GPL(gmap_unmap_segment);
337
338/**
339 * gmap_mmap_segment - map a segment to the guest address space
340 * @gmap: pointer to the guest address space structure
341 * @from: source address in the parent address space
342 * @to: target address in the guest address space
343 *
344 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
345 */
346int gmap_map_segment(struct gmap *gmap, unsigned long from,
347 unsigned long to, unsigned long len)
348{
349 unsigned long *table;
350 unsigned long off;
351 int flush;
352
353 if ((from | to | len) & (PMD_SIZE - 1))
354 return -EINVAL;
355 if (len == 0 || from + len > PGDIR_SIZE ||
356 from + len < from || to + len < to)
357 return -EINVAL;
358
359 flush = 0;
360 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100361 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200362 for (off = 0; off < len; off += PMD_SIZE) {
363 /* Walk the gmap address space page table */
364 table = gmap->table + (((to + off) >> 53) & 0x7ff);
365 if ((*table & _REGION_ENTRY_INV) &&
366 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
367 goto out_unmap;
368 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
369 table = table + (((to + off) >> 42) & 0x7ff);
370 if ((*table & _REGION_ENTRY_INV) &&
371 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
372 goto out_unmap;
373 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
374 table = table + (((to + off) >> 31) & 0x7ff);
375 if ((*table & _REGION_ENTRY_INV) &&
376 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
377 goto out_unmap;
378 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
379 table = table + (((to + off) >> 20) & 0x7ff);
380
381 /* Store 'from' address in an invalid segment table entry. */
382 flush |= gmap_unlink_segment(gmap, table);
383 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
384 }
Carsten Ottecc772452011-10-30 15:17:01 +0100385 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200386 up_read(&gmap->mm->mmap_sem);
387 if (flush)
388 gmap_flush_tlb(gmap);
389 return 0;
390
391out_unmap:
Carsten Ottecc772452011-10-30 15:17:01 +0100392 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200393 up_read(&gmap->mm->mmap_sem);
394 gmap_unmap_segment(gmap, to, len);
395 return -ENOMEM;
396}
397EXPORT_SYMBOL_GPL(gmap_map_segment);
398
Carsten Otte499069e2011-10-30 15:17:02 +0100399/*
400 * this function is assumed to be called with mmap_sem held
401 */
402unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200403{
404 unsigned long *table, vmaddr, segment;
405 struct mm_struct *mm;
406 struct gmap_pgtable *mp;
407 struct gmap_rmap *rmap;
408 struct vm_area_struct *vma;
409 struct page *page;
410 pgd_t *pgd;
411 pud_t *pud;
412 pmd_t *pmd;
413
414 current->thread.gmap_addr = address;
415 mm = gmap->mm;
416 /* Walk the gmap address space page table */
417 table = gmap->table + ((address >> 53) & 0x7ff);
418 if (unlikely(*table & _REGION_ENTRY_INV))
419 return -EFAULT;
420 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
421 table = table + ((address >> 42) & 0x7ff);
422 if (unlikely(*table & _REGION_ENTRY_INV))
423 return -EFAULT;
424 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
425 table = table + ((address >> 31) & 0x7ff);
426 if (unlikely(*table & _REGION_ENTRY_INV))
427 return -EFAULT;
428 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
429 table = table + ((address >> 20) & 0x7ff);
430
431 /* Convert the gmap address to an mm address. */
432 segment = *table;
433 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
434 page = pfn_to_page(segment >> PAGE_SHIFT);
435 mp = (struct gmap_pgtable *) page->index;
436 return mp->vmaddr | (address & ~PMD_MASK);
437 } else if (segment & _SEGMENT_ENTRY_RO) {
438 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
439 vma = find_vma(mm, vmaddr);
440 if (!vma || vma->vm_start > vmaddr)
441 return -EFAULT;
442
443 /* Walk the parent mm page table */
444 pgd = pgd_offset(mm, vmaddr);
445 pud = pud_alloc(mm, pgd, vmaddr);
446 if (!pud)
447 return -ENOMEM;
448 pmd = pmd_alloc(mm, pud, vmaddr);
449 if (!pmd)
450 return -ENOMEM;
451 if (!pmd_present(*pmd) &&
452 __pte_alloc(mm, vma, pmd, vmaddr))
453 return -ENOMEM;
454 /* pmd now points to a valid segment table entry. */
455 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
456 if (!rmap)
457 return -ENOMEM;
458 /* Link gmap segment table entry location to page table. */
459 page = pmd_page(*pmd);
460 mp = (struct gmap_pgtable *) page->index;
461 rmap->entry = table;
Carsten Ottecc772452011-10-30 15:17:01 +0100462 spin_lock(&mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200463 list_add(&rmap->list, &mp->mapper);
Carsten Ottecc772452011-10-30 15:17:01 +0100464 spin_unlock(&mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200465 /* Set gmap segment table entry to page table. */
466 *table = pmd_val(*pmd) & PAGE_MASK;
467 return vmaddr | (address & ~PMD_MASK);
468 }
469 return -EFAULT;
Carsten Otte499069e2011-10-30 15:17:02 +0100470}
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200471
Carsten Otte499069e2011-10-30 15:17:02 +0100472unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
473{
474 unsigned long rc;
475
476 down_read(&gmap->mm->mmap_sem);
477 rc = __gmap_fault(address, gmap);
478 up_read(&gmap->mm->mmap_sem);
479
480 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200481}
482EXPORT_SYMBOL_GPL(gmap_fault);
483
Christian Borntraeger388186b2011-10-30 15:17:03 +0100484void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
485{
486
487 unsigned long *table, address, size;
488 struct vm_area_struct *vma;
489 struct gmap_pgtable *mp;
490 struct page *page;
491
492 down_read(&gmap->mm->mmap_sem);
493 address = from;
494 while (address < to) {
495 /* Walk the gmap address space page table */
496 table = gmap->table + ((address >> 53) & 0x7ff);
497 if (unlikely(*table & _REGION_ENTRY_INV)) {
498 address = (address + PMD_SIZE) & PMD_MASK;
499 continue;
500 }
501 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
502 table = table + ((address >> 42) & 0x7ff);
503 if (unlikely(*table & _REGION_ENTRY_INV)) {
504 address = (address + PMD_SIZE) & PMD_MASK;
505 continue;
506 }
507 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
508 table = table + ((address >> 31) & 0x7ff);
509 if (unlikely(*table & _REGION_ENTRY_INV)) {
510 address = (address + PMD_SIZE) & PMD_MASK;
511 continue;
512 }
513 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
514 table = table + ((address >> 20) & 0x7ff);
515 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
516 address = (address + PMD_SIZE) & PMD_MASK;
517 continue;
518 }
519 page = pfn_to_page(*table >> PAGE_SHIFT);
520 mp = (struct gmap_pgtable *) page->index;
521 vma = find_vma(gmap->mm, mp->vmaddr);
522 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
523 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
524 size, NULL);
525 address = (address + PMD_SIZE) & PMD_MASK;
526 }
527 up_read(&gmap->mm->mmap_sem);
528}
529EXPORT_SYMBOL_GPL(gmap_discard);
530
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200531void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
532{
533 struct gmap_rmap *rmap, *next;
534 struct gmap_pgtable *mp;
535 struct page *page;
536 int flush;
537
538 flush = 0;
539 spin_lock(&mm->page_table_lock);
540 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
541 mp = (struct gmap_pgtable *) page->index;
542 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
543 *rmap->entry =
544 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
545 list_del(&rmap->list);
546 kfree(rmap);
547 flush = 1;
548 }
549 spin_unlock(&mm->page_table_lock);
550 if (flush)
551 __tlb_flush_global();
552}
553
554static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
555 unsigned long vmaddr)
556{
557 struct page *page;
558 unsigned long *table;
559 struct gmap_pgtable *mp;
560
561 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
562 if (!page)
563 return NULL;
564 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
565 if (!mp) {
566 __free_page(page);
567 return NULL;
568 }
569 pgtable_page_ctor(page);
570 mp->vmaddr = vmaddr & PMD_MASK;
571 INIT_LIST_HEAD(&mp->mapper);
572 page->index = (unsigned long) mp;
573 atomic_set(&page->_mapcount, 3);
574 table = (unsigned long *) page_to_phys(page);
575 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
576 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
577 return table;
578}
579
580static inline void page_table_free_pgste(unsigned long *table)
581{
582 struct page *page;
583 struct gmap_pgtable *mp;
584
585 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
586 mp = (struct gmap_pgtable *) page->index;
587 BUG_ON(!list_empty(&mp->mapper));
588 pgtable_page_ctor(page);
589 atomic_set(&page->_mapcount, -1);
590 kfree(mp);
591 __free_page(page);
592}
593
594#else /* CONFIG_PGSTE */
595
596static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
597 unsigned long vmaddr)
598{
Jan Glauber944291d2011-08-03 16:44:18 +0200599 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200600}
601
602static inline void page_table_free_pgste(unsigned long *table)
603{
604}
605
606static inline void gmap_unmap_notifier(struct mm_struct *mm,
607 unsigned long *table)
608{
609}
610
611#endif /* CONFIG_PGSTE */
612
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200613static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
614{
615 unsigned int old, new;
616
617 do {
618 old = atomic_read(v);
619 new = old ^ bits;
620 } while (atomic_cmpxchg(v, old, new) != old);
621 return new;
622}
623
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200624/*
625 * page table entry allocation/free routines.
626 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200627unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200628{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100629 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200630 unsigned long *table;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200631 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200632
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200633 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200634 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200635 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200636 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200637 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100638 if (!list_empty(&mm->context.pgtable_list)) {
639 page = list_first_entry(&mm->context.pgtable_list,
640 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200641 table = (unsigned long *) page_to_phys(page);
642 mask = atomic_read(&page->_mapcount);
643 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200644 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200645 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200646 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100647 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
648 if (!page)
649 return NULL;
650 pgtable_page_ctor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200651 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100652 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200653 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200654 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100655 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200656 } else {
657 for (bit = 1; mask & bit; bit <<= 1)
658 table += PTRS_PER_PTE;
659 mask = atomic_xor_bits(&page->_mapcount, bit);
660 if ((mask & FRAG_MASK) == FRAG_MASK)
661 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100662 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200663 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200664 return table;
665}
666
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100667void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200668{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100669 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200670 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200671
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200672 if (mm_has_pgste(mm)) {
673 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200674 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200675 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200676 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100677 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200678 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200679 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200680 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100681 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200682 mask = atomic_xor_bits(&page->_mapcount, bit);
683 if (mask & FRAG_MASK)
684 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200685 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200686 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100687 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200688 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100689 __free_page(page);
690 }
691}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200692
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200693#ifdef CONFIG_HAVE_RCU_TABLE_FREE
Martin Schwidefsky80217142010-10-25 16:10:11 +0200694
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200695static void __page_table_free_rcu(void *table, unsigned bit)
696{
697 struct page *page;
698
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200699 if (bit == FRAG_MASK)
700 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200701 /* Free 1K/2K page table fragment of a 4K page */
702 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
703 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
704 pgtable_page_dtor(page);
705 atomic_set(&page->_mapcount, -1);
706 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200707 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200708}
709
710void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
711{
712 struct mm_struct *mm;
713 struct page *page;
714 unsigned int bit, mask;
715
716 mm = tlb->mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200717 if (mm_has_pgste(mm)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200718 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200719 table = (unsigned long *) (__pa(table) | FRAG_MASK);
720 tlb_remove_table(tlb, table);
721 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200722 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200723 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200724 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
725 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200726 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
727 list_del(&page->lru);
728 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
729 if (mask & FRAG_MASK)
730 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200731 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200732 table = (unsigned long *) (__pa(table) | (bit << 4));
733 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200734}
735
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200736void __tlb_remove_table(void *_table)
737{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +0100738 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
739 void *table = (void *)((unsigned long) _table & ~mask);
740 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200741
742 if (type)
743 __page_table_free_rcu(table, type);
744 else
745 free_pages((unsigned long) table, ALLOC_ORDER);
746}
747
748#endif
749
Carsten Otte402b0862008-03-25 18:47:10 +0100750/*
751 * switch on pgstes for its userspace process (for kvm)
752 */
753int s390_enable_sie(void)
754{
755 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200756 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100757
Carsten Otte702d9e52009-03-26 15:23:57 +0100758 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100759 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100760 return -EINVAL;
761
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200762 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200763 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200764 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100765
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200766 /* lets check if we are allowed to replace the mm */
767 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100768 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200769#ifdef CONFIG_AIO
770 !hlist_empty(&tsk->mm->ioctx_list) ||
771#endif
772 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200773 task_unlock(tsk);
774 return -EINVAL;
775 }
776 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100777
Christian Borntraeger250cf772008-10-28 11:10:15 +0100778 /* we copy the mm and let dup_mm create the page tables with_pgstes */
779 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100780 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100781 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100782 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200783 return -ENOMEM;
784
Christian Borntraeger250cf772008-10-28 11:10:15 +0100785 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200786 task_lock(tsk);
787 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200788#ifdef CONFIG_AIO
789 !hlist_empty(&tsk->mm->ioctx_list) ||
790#endif
791 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200792 mmput(mm);
793 task_unlock(tsk);
794 return -EINVAL;
795 }
796
797 /* ok, we are alone. No ptrace, no threads, etc. */
798 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100799 tsk->mm = tsk->active_mm = mm;
800 preempt_disable();
801 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200802 atomic_inc(&mm->context.attach_count);
803 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100804 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100805 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100806 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200807 mmput(old_mm);
808 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100809}
810EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200811
Heiko Carstens87458ff2009-09-22 22:58:46 +0200812#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200813bool kernel_page_present(struct page *page)
814{
815 unsigned long addr;
816 int cc;
817
818 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200819 asm volatile(
820 " lra %1,0(%1)\n"
821 " ipm %0\n"
822 " srl %0,28"
823 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200824 return cc == 0;
825}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200826#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */