blob: 2accf7113d13c83de5ff9c599e9a32dcd9c47ee3 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020029#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020030#else
31#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020032#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020033#endif
34
Heiko Carstens239a64252009-06-12 10:26:33 +020035
Martin Schwidefsky043d0702011-05-23 10:24:23 +020036unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020037{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39
40 if (!page)
41 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020042 return (unsigned long *) page_to_phys(page);
43}
44
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010045void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020046{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020047 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020048}
49
Martin Schwidefsky6252d702008-02-09 18:24:37 +010050#ifdef CONFIG_64BIT
51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52{
53 unsigned long *table, *pgd;
54 unsigned long entry;
55
56 BUG_ON(limit > (1UL << 53));
57repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020058 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010059 if (!table)
60 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020061 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062 if (mm->context.asce_limit < limit) {
63 pgd = (unsigned long *) mm->pgd;
64 if (mm->context.asce_limit <= (1UL << 31)) {
65 entry = _REGION3_ENTRY_EMPTY;
66 mm->context.asce_limit = 1UL << 42;
67 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
68 _ASCE_USER_BITS |
69 _ASCE_TYPE_REGION3;
70 } else {
71 entry = _REGION2_ENTRY_EMPTY;
72 mm->context.asce_limit = 1UL << 53;
73 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
74 _ASCE_USER_BITS |
75 _ASCE_TYPE_REGION2;
76 }
77 crst_table_init(table, entry);
78 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
79 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010080 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010081 table = NULL;
82 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020083 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010084 if (table)
85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit)
87 goto repeat;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010088 return 0;
89}
90
91void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
92{
93 pgd_t *pgd;
94
Martin Schwidefsky6252d702008-02-09 18:24:37 +010095 while (mm->context.asce_limit > limit) {
96 pgd = mm->pgd;
97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
98 case _REGION_ENTRY_TYPE_R2:
99 mm->context.asce_limit = 1UL << 42;
100 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
101 _ASCE_USER_BITS |
102 _ASCE_TYPE_REGION3;
103 break;
104 case _REGION_ENTRY_TYPE_R3:
105 mm->context.asce_limit = 1UL << 31;
106 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
107 _ASCE_USER_BITS |
108 _ASCE_TYPE_SEGMENT;
109 break;
110 default:
111 BUG();
112 }
113 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100114 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100115 crst_table_free(mm, (unsigned long *) pgd);
116 }
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100117}
118#endif
119
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200120#ifdef CONFIG_PGSTE
121
122/**
123 * gmap_alloc - allocate a guest address space
124 * @mm: pointer to the parent mm_struct
125 *
126 * Returns a guest address space structure.
127 */
128struct gmap *gmap_alloc(struct mm_struct *mm)
129{
130 struct gmap *gmap;
131 struct page *page;
132 unsigned long *table;
133
134 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
135 if (!gmap)
136 goto out;
137 INIT_LIST_HEAD(&gmap->crst_list);
138 gmap->mm = mm;
139 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
140 if (!page)
141 goto out_free;
142 list_add(&page->lru, &gmap->crst_list);
143 table = (unsigned long *) page_to_phys(page);
144 crst_table_init(table, _REGION1_ENTRY_EMPTY);
145 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200146 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
147 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200148 list_add(&gmap->list, &mm->context.gmap_list);
149 return gmap;
150
151out_free:
152 kfree(gmap);
153out:
154 return NULL;
155}
156EXPORT_SYMBOL_GPL(gmap_alloc);
157
158static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
159{
160 struct gmap_pgtable *mp;
161 struct gmap_rmap *rmap;
162 struct page *page;
163
164 if (*table & _SEGMENT_ENTRY_INV)
165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index;
168 list_for_each_entry(rmap, &mp->mapper, list) {
169 if (rmap->entry != table)
170 continue;
171 list_del(&rmap->list);
172 kfree(rmap);
173 break;
174 }
175 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
176 return 1;
177}
178
179static void gmap_flush_tlb(struct gmap *gmap)
180{
181 if (MACHINE_HAS_IDTE)
182 __tlb_flush_idte((unsigned long) gmap->table |
183 _ASCE_TYPE_REGION1);
184 else
185 __tlb_flush_global();
186}
187
188/**
189 * gmap_free - free a guest address space
190 * @gmap: pointer to the guest address space structure
191 */
192void gmap_free(struct gmap *gmap)
193{
194 struct page *page, *next;
195 unsigned long *table;
196 int i;
197
198
199 /* Flush tlb. */
200 if (MACHINE_HAS_IDTE)
201 __tlb_flush_idte((unsigned long) gmap->table |
202 _ASCE_TYPE_REGION1);
203 else
204 __tlb_flush_global();
205
206 /* Free all segment & region tables. */
207 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100208 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200209 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
210 table = (unsigned long *) page_to_phys(page);
211 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
212 /* Remove gmap rmap structures for segment table. */
213 for (i = 0; i < PTRS_PER_PMD; i++, table++)
214 gmap_unlink_segment(gmap, table);
215 __free_pages(page, ALLOC_ORDER);
216 }
Carsten Ottecc772452011-10-30 15:17:01 +0100217 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200218 up_read(&gmap->mm->mmap_sem);
219 list_del(&gmap->list);
220 kfree(gmap);
221}
222EXPORT_SYMBOL_GPL(gmap_free);
223
224/**
225 * gmap_enable - switch primary space to the guest address space
226 * @gmap: pointer to the guest address space structure
227 */
228void gmap_enable(struct gmap *gmap)
229{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200230 S390_lowcore.gmap = (unsigned long) gmap;
231}
232EXPORT_SYMBOL_GPL(gmap_enable);
233
234/**
235 * gmap_disable - switch back to the standard primary address space
236 * @gmap: pointer to the guest address space structure
237 */
238void gmap_disable(struct gmap *gmap)
239{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200240 S390_lowcore.gmap = 0UL;
241}
242EXPORT_SYMBOL_GPL(gmap_disable);
243
Carsten Ottea9162f232011-10-30 15:17:00 +0100244/*
245 * gmap_alloc_table is assumed to be called with mmap_sem held
246 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200247static int gmap_alloc_table(struct gmap *gmap,
248 unsigned long *table, unsigned long init)
249{
250 struct page *page;
251 unsigned long *new;
252
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100253 /* since we dont free the gmap table until gmap_free we can unlock */
254 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200255 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100256 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200257 if (!page)
258 return -ENOMEM;
259 new = (unsigned long *) page_to_phys(page);
260 crst_table_init(new, init);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200261 if (*table & _REGION_ENTRY_INV) {
262 list_add(&page->lru, &gmap->crst_list);
263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK);
265 } else
266 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200267 return 0;
268}
269
270/**
271 * gmap_unmap_segment - unmap segment from the guest address space
272 * @gmap: pointer to the guest address space structure
273 * @addr: address in the guest address space
274 * @len: length of the memory area to unmap
275 *
276 * Returns 0 if the unmap succeded, -EINVAL if not.
277 */
278int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
279{
280 unsigned long *table;
281 unsigned long off;
282 int flush;
283
284 if ((to | len) & (PMD_SIZE - 1))
285 return -EINVAL;
286 if (len == 0 || to + len < to)
287 return -EINVAL;
288
289 flush = 0;
290 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100291 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200292 for (off = 0; off < len; off += PMD_SIZE) {
293 /* Walk the guest addr space page table */
294 table = gmap->table + (((to + off) >> 53) & 0x7ff);
295 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200296 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff);
299 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200300 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff);
303 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200304 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff);
307
308 /* Clear segment table entry in guest address space. */
309 flush |= gmap_unlink_segment(gmap, table);
310 *table = _SEGMENT_ENTRY_INV;
311 }
Carsten Otte05873df2011-09-26 16:40:34 +0200312out:
Carsten Ottecc772452011-10-30 15:17:01 +0100313 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200314 up_read(&gmap->mm->mmap_sem);
315 if (flush)
316 gmap_flush_tlb(gmap);
317 return 0;
318}
319EXPORT_SYMBOL_GPL(gmap_unmap_segment);
320
321/**
322 * gmap_mmap_segment - map a segment to the guest address space
323 * @gmap: pointer to the guest address space structure
324 * @from: source address in the parent address space
325 * @to: target address in the guest address space
326 *
327 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
328 */
329int gmap_map_segment(struct gmap *gmap, unsigned long from,
330 unsigned long to, unsigned long len)
331{
332 unsigned long *table;
333 unsigned long off;
334 int flush;
335
336 if ((from | to | len) & (PMD_SIZE - 1))
337 return -EINVAL;
338 if (len == 0 || from + len > PGDIR_SIZE ||
339 from + len < from || to + len < to)
340 return -EINVAL;
341
342 flush = 0;
343 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100344 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200345 for (off = 0; off < len; off += PMD_SIZE) {
346 /* Walk the gmap address space page table */
347 table = gmap->table + (((to + off) >> 53) & 0x7ff);
348 if ((*table & _REGION_ENTRY_INV) &&
349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350 goto out_unmap;
351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) &&
354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) &&
359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360 goto out_unmap;
361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
362 table = table + (((to + off) >> 20) & 0x7ff);
363
364 /* Store 'from' address in an invalid segment table entry. */
365 flush |= gmap_unlink_segment(gmap, table);
366 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
367 }
Carsten Ottecc772452011-10-30 15:17:01 +0100368 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200369 up_read(&gmap->mm->mmap_sem);
370 if (flush)
371 gmap_flush_tlb(gmap);
372 return 0;
373
374out_unmap:
Carsten Ottecc772452011-10-30 15:17:01 +0100375 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200376 up_read(&gmap->mm->mmap_sem);
377 gmap_unmap_segment(gmap, to, len);
378 return -ENOMEM;
379}
380EXPORT_SYMBOL_GPL(gmap_map_segment);
381
Heiko Carstensc5034942012-09-10 16:14:33 +0200382static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
383{
384 unsigned long *table;
385
386 table = gmap->table + ((address >> 53) & 0x7ff);
387 if (unlikely(*table & _REGION_ENTRY_INV))
388 return ERR_PTR(-EFAULT);
389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
390 table = table + ((address >> 42) & 0x7ff);
391 if (unlikely(*table & _REGION_ENTRY_INV))
392 return ERR_PTR(-EFAULT);
393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
394 table = table + ((address >> 31) & 0x7ff);
395 if (unlikely(*table & _REGION_ENTRY_INV))
396 return ERR_PTR(-EFAULT);
397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
398 table = table + ((address >> 20) & 0x7ff);
399 return table;
400}
401
402/**
403 * __gmap_translate - translate a guest address to a user space address
404 * @address: guest address
405 * @gmap: pointer to guest mapping meta data structure
406 *
407 * Returns user space address which corresponds to the guest address or
408 * -EFAULT if no such mapping exists.
409 * This function does not establish potentially missing page table entries.
410 * The mmap_sem of the mm that belongs to the address space must be held
411 * when this function gets called.
412 */
413unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
414{
415 unsigned long *segment_ptr, vmaddr, segment;
416 struct gmap_pgtable *mp;
417 struct page *page;
418
419 current->thread.gmap_addr = address;
420 segment_ptr = gmap_table_walk(address, gmap);
421 if (IS_ERR(segment_ptr))
422 return PTR_ERR(segment_ptr);
423 /* Convert the gmap address to an mm address. */
424 segment = *segment_ptr;
425 if (!(segment & _SEGMENT_ENTRY_INV)) {
426 page = pfn_to_page(segment >> PAGE_SHIFT);
427 mp = (struct gmap_pgtable *) page->index;
428 return mp->vmaddr | (address & ~PMD_MASK);
429 } else if (segment & _SEGMENT_ENTRY_RO) {
430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
431 return vmaddr | (address & ~PMD_MASK);
432 }
433 return -EFAULT;
434}
435EXPORT_SYMBOL_GPL(__gmap_translate);
436
437/**
438 * gmap_translate - translate a guest address to a user space address
439 * @address: guest address
440 * @gmap: pointer to guest mapping meta data structure
441 *
442 * Returns user space address which corresponds to the guest address or
443 * -EFAULT if no such mapping exists.
444 * This function does not establish potentially missing page table entries.
445 */
446unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
447{
448 unsigned long rc;
449
450 down_read(&gmap->mm->mmap_sem);
451 rc = __gmap_translate(address, gmap);
452 up_read(&gmap->mm->mmap_sem);
453 return rc;
454}
455EXPORT_SYMBOL_GPL(gmap_translate);
456
Carsten Otte499069e2011-10-30 15:17:02 +0100457/*
458 * this function is assumed to be called with mmap_sem held
459 */
460unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200461{
Heiko Carstensc5034942012-09-10 16:14:33 +0200462 unsigned long *segment_ptr, vmaddr, segment;
463 struct vm_area_struct *vma;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200464 struct gmap_pgtable *mp;
465 struct gmap_rmap *rmap;
Heiko Carstensc5034942012-09-10 16:14:33 +0200466 struct mm_struct *mm;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200467 struct page *page;
468 pgd_t *pgd;
469 pud_t *pud;
470 pmd_t *pmd;
471
472 current->thread.gmap_addr = address;
Heiko Carstensc5034942012-09-10 16:14:33 +0200473 segment_ptr = gmap_table_walk(address, gmap);
474 if (IS_ERR(segment_ptr))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200475 return -EFAULT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200476 /* Convert the gmap address to an mm address. */
Heiko Carstensc5034942012-09-10 16:14:33 +0200477 segment = *segment_ptr;
478 if (!(segment & _SEGMENT_ENTRY_INV)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200479 page = pfn_to_page(segment >> PAGE_SHIFT);
480 mp = (struct gmap_pgtable *) page->index;
481 return mp->vmaddr | (address & ~PMD_MASK);
482 } else if (segment & _SEGMENT_ENTRY_RO) {
Heiko Carstensc5034942012-09-10 16:14:33 +0200483 mm = gmap->mm;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200484 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
485 vma = find_vma(mm, vmaddr);
486 if (!vma || vma->vm_start > vmaddr)
487 return -EFAULT;
488
489 /* Walk the parent mm page table */
490 pgd = pgd_offset(mm, vmaddr);
491 pud = pud_alloc(mm, pgd, vmaddr);
492 if (!pud)
493 return -ENOMEM;
494 pmd = pmd_alloc(mm, pud, vmaddr);
495 if (!pmd)
496 return -ENOMEM;
497 if (!pmd_present(*pmd) &&
498 __pte_alloc(mm, vma, pmd, vmaddr))
499 return -ENOMEM;
500 /* pmd now points to a valid segment table entry. */
501 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
502 if (!rmap)
503 return -ENOMEM;
504 /* Link gmap segment table entry location to page table. */
505 page = pmd_page(*pmd);
506 mp = (struct gmap_pgtable *) page->index;
Heiko Carstensc5034942012-09-10 16:14:33 +0200507 rmap->entry = segment_ptr;
Carsten Ottecc772452011-10-30 15:17:01 +0100508 spin_lock(&mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200509 list_add(&rmap->list, &mp->mapper);
Carsten Ottecc772452011-10-30 15:17:01 +0100510 spin_unlock(&mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200511 /* Set gmap segment table entry to page table. */
Heiko Carstensc5034942012-09-10 16:14:33 +0200512 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200513 return vmaddr | (address & ~PMD_MASK);
514 }
515 return -EFAULT;
Carsten Otte499069e2011-10-30 15:17:02 +0100516}
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200517
Carsten Otte499069e2011-10-30 15:17:02 +0100518unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
519{
520 unsigned long rc;
521
522 down_read(&gmap->mm->mmap_sem);
523 rc = __gmap_fault(address, gmap);
524 up_read(&gmap->mm->mmap_sem);
525
526 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200527}
528EXPORT_SYMBOL_GPL(gmap_fault);
529
Christian Borntraeger388186b2011-10-30 15:17:03 +0100530void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
531{
532
533 unsigned long *table, address, size;
534 struct vm_area_struct *vma;
535 struct gmap_pgtable *mp;
536 struct page *page;
537
538 down_read(&gmap->mm->mmap_sem);
539 address = from;
540 while (address < to) {
541 /* Walk the gmap address space page table */
542 table = gmap->table + ((address >> 53) & 0x7ff);
543 if (unlikely(*table & _REGION_ENTRY_INV)) {
544 address = (address + PMD_SIZE) & PMD_MASK;
545 continue;
546 }
547 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
548 table = table + ((address >> 42) & 0x7ff);
549 if (unlikely(*table & _REGION_ENTRY_INV)) {
550 address = (address + PMD_SIZE) & PMD_MASK;
551 continue;
552 }
553 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
554 table = table + ((address >> 31) & 0x7ff);
555 if (unlikely(*table & _REGION_ENTRY_INV)) {
556 address = (address + PMD_SIZE) & PMD_MASK;
557 continue;
558 }
559 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
560 table = table + ((address >> 20) & 0x7ff);
561 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
562 address = (address + PMD_SIZE) & PMD_MASK;
563 continue;
564 }
565 page = pfn_to_page(*table >> PAGE_SHIFT);
566 mp = (struct gmap_pgtable *) page->index;
567 vma = find_vma(gmap->mm, mp->vmaddr);
568 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
569 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
570 size, NULL);
571 address = (address + PMD_SIZE) & PMD_MASK;
572 }
573 up_read(&gmap->mm->mmap_sem);
574}
575EXPORT_SYMBOL_GPL(gmap_discard);
576
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200577void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
578{
579 struct gmap_rmap *rmap, *next;
580 struct gmap_pgtable *mp;
581 struct page *page;
582 int flush;
583
584 flush = 0;
585 spin_lock(&mm->page_table_lock);
586 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
587 mp = (struct gmap_pgtable *) page->index;
588 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
589 *rmap->entry =
590 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
591 list_del(&rmap->list);
592 kfree(rmap);
593 flush = 1;
594 }
595 spin_unlock(&mm->page_table_lock);
596 if (flush)
597 __tlb_flush_global();
598}
599
600static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
601 unsigned long vmaddr)
602{
603 struct page *page;
604 unsigned long *table;
605 struct gmap_pgtable *mp;
606
607 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
608 if (!page)
609 return NULL;
610 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
611 if (!mp) {
612 __free_page(page);
613 return NULL;
614 }
615 pgtable_page_ctor(page);
616 mp->vmaddr = vmaddr & PMD_MASK;
617 INIT_LIST_HEAD(&mp->mapper);
618 page->index = (unsigned long) mp;
619 atomic_set(&page->_mapcount, 3);
620 table = (unsigned long *) page_to_phys(page);
621 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
622 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
623 return table;
624}
625
626static inline void page_table_free_pgste(unsigned long *table)
627{
628 struct page *page;
629 struct gmap_pgtable *mp;
630
631 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
632 mp = (struct gmap_pgtable *) page->index;
633 BUG_ON(!list_empty(&mp->mapper));
Martin Schwidefsky2320c572012-02-17 10:29:21 +0100634 pgtable_page_dtor(page);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200635 atomic_set(&page->_mapcount, -1);
636 kfree(mp);
637 __free_page(page);
638}
639
640#else /* CONFIG_PGSTE */
641
642static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
643 unsigned long vmaddr)
644{
Jan Glauber944291d2011-08-03 16:44:18 +0200645 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200646}
647
648static inline void page_table_free_pgste(unsigned long *table)
649{
650}
651
652static inline void gmap_unmap_notifier(struct mm_struct *mm,
653 unsigned long *table)
654{
655}
656
657#endif /* CONFIG_PGSTE */
658
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200659static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
660{
661 unsigned int old, new;
662
663 do {
664 old = atomic_read(v);
665 new = old ^ bits;
666 } while (atomic_cmpxchg(v, old, new) != old);
667 return new;
668}
669
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200670/*
671 * page table entry allocation/free routines.
672 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200673unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200674{
Heiko Carstens41459d36cf2012-09-14 11:09:52 +0200675 unsigned long *uninitialized_var(table);
676 struct page *uninitialized_var(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200677 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200678
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200679 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200680 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200681 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200682 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200683 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100684 if (!list_empty(&mm->context.pgtable_list)) {
685 page = list_first_entry(&mm->context.pgtable_list,
686 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200687 table = (unsigned long *) page_to_phys(page);
688 mask = atomic_read(&page->_mapcount);
689 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200690 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200691 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200692 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100693 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
694 if (!page)
695 return NULL;
696 pgtable_page_ctor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200697 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100698 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200699 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200700 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100701 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200702 } else {
703 for (bit = 1; mask & bit; bit <<= 1)
704 table += PTRS_PER_PTE;
705 mask = atomic_xor_bits(&page->_mapcount, bit);
706 if ((mask & FRAG_MASK) == FRAG_MASK)
707 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100708 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200709 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200710 return table;
711}
712
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100713void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200714{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100715 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200716 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200717
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200718 if (mm_has_pgste(mm)) {
719 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200720 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200721 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200722 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100723 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200724 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200725 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200726 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100727 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200728 mask = atomic_xor_bits(&page->_mapcount, bit);
729 if (mask & FRAG_MASK)
730 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200731 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200732 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100733 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200734 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100735 __free_page(page);
736 }
737}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200738
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200739static void __page_table_free_rcu(void *table, unsigned bit)
740{
741 struct page *page;
742
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200743 if (bit == FRAG_MASK)
744 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200745 /* Free 1K/2K page table fragment of a 4K page */
746 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
747 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
748 pgtable_page_dtor(page);
749 atomic_set(&page->_mapcount, -1);
750 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200751 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200752}
753
754void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
755{
756 struct mm_struct *mm;
757 struct page *page;
758 unsigned int bit, mask;
759
760 mm = tlb->mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200761 if (mm_has_pgste(mm)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200762 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200763 table = (unsigned long *) (__pa(table) | FRAG_MASK);
764 tlb_remove_table(tlb, table);
765 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200766 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200767 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200768 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
769 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200770 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
771 list_del(&page->lru);
772 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
773 if (mask & FRAG_MASK)
774 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200775 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200776 table = (unsigned long *) (__pa(table) | (bit << 4));
777 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200778}
779
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200780void __tlb_remove_table(void *_table)
781{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +0100782 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
783 void *table = (void *)((unsigned long) _table & ~mask);
784 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200785
786 if (type)
787 __page_table_free_rcu(table, type);
788 else
789 free_pages((unsigned long) table, ALLOC_ORDER);
790}
791
Martin Schwidefskycd94154cc2012-04-11 14:28:07 +0200792static void tlb_remove_table_smp_sync(void *arg)
793{
794 /* Simply deliver the interrupt */
795}
796
797static void tlb_remove_table_one(void *table)
798{
799 /*
800 * This isn't an RCU grace period and hence the page-tables cannot be
801 * assumed to be actually RCU-freed.
802 *
803 * It is however sufficient for software page-table walkers that rely
804 * on IRQ disabling. See the comment near struct mmu_table_batch.
805 */
806 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
807 __tlb_remove_table(table);
808}
809
810static void tlb_remove_table_rcu(struct rcu_head *head)
811{
812 struct mmu_table_batch *batch;
813 int i;
814
815 batch = container_of(head, struct mmu_table_batch, rcu);
816
817 for (i = 0; i < batch->nr; i++)
818 __tlb_remove_table(batch->tables[i]);
819
820 free_page((unsigned long)batch);
821}
822
823void tlb_table_flush(struct mmu_gather *tlb)
824{
825 struct mmu_table_batch **batch = &tlb->batch;
826
827 if (*batch) {
828 __tlb_flush_mm(tlb->mm);
829 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
830 *batch = NULL;
831 }
832}
833
834void tlb_remove_table(struct mmu_gather *tlb, void *table)
835{
836 struct mmu_table_batch **batch = &tlb->batch;
837
838 if (*batch == NULL) {
839 *batch = (struct mmu_table_batch *)
840 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
841 if (*batch == NULL) {
842 __tlb_flush_mm(tlb->mm);
843 tlb_remove_table_one(table);
844 return;
845 }
846 (*batch)->nr = 0;
847 }
848 (*batch)->tables[(*batch)->nr++] = table;
849 if ((*batch)->nr == MAX_TABLE_BATCH)
850 tlb_table_flush(tlb);
851}
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200852
Gerald Schaefer274023d2012-10-08 16:30:21 -0700853#ifdef CONFIG_TRANSPARENT_HUGEPAGE
854void thp_split_vma(struct vm_area_struct *vma)
855{
856 unsigned long addr;
857 struct page *page;
858
859 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
860 page = follow_page(vma, addr, FOLL_SPLIT);
861 }
862}
863
864void thp_split_mm(struct mm_struct *mm)
865{
866 struct vm_area_struct *vma = mm->mmap;
867
868 while (vma != NULL) {
869 thp_split_vma(vma);
870 vma->vm_flags &= ~VM_HUGEPAGE;
871 vma->vm_flags |= VM_NOHUGEPAGE;
872 vma = vma->vm_next;
873 }
874}
875#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
876
Carsten Otte402b0862008-03-25 18:47:10 +0100877/*
878 * switch on pgstes for its userspace process (for kvm)
879 */
880int s390_enable_sie(void)
881{
882 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200883 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100884
Carsten Otte702d9e52009-03-26 15:23:57 +0100885 /* Do we have switched amode? If no, we cannot do sie */
Heiko Carstensd1b0d842012-09-02 11:02:23 +0200886 if (s390_user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100887 return -EINVAL;
888
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200889 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200890 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200891 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100892
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200893 /* lets check if we are allowed to replace the mm */
894 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100895 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200896#ifdef CONFIG_AIO
897 !hlist_empty(&tsk->mm->ioctx_list) ||
898#endif
899 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200900 task_unlock(tsk);
901 return -EINVAL;
902 }
903 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100904
Christian Borntraeger250cf772008-10-28 11:10:15 +0100905 /* we copy the mm and let dup_mm create the page tables with_pgstes */
906 tsk->mm->context.alloc_pgste = 1;
Christian Borntraeger2739b6d2012-05-09 16:27:38 +0200907 /* make sure that both mms have a correct rss state */
908 sync_mm_rss(tsk->mm);
Carsten Otte402b0862008-03-25 18:47:10 +0100909 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100910 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100911 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200912 return -ENOMEM;
913
Gerald Schaefer274023d2012-10-08 16:30:21 -0700914#ifdef CONFIG_TRANSPARENT_HUGEPAGE
915 /* split thp mappings and disable thp for future mappings */
916 thp_split_mm(mm);
917 mm->def_flags |= VM_NOHUGEPAGE;
918#endif
919
Christian Borntraeger250cf772008-10-28 11:10:15 +0100920 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200921 task_lock(tsk);
922 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200923#ifdef CONFIG_AIO
924 !hlist_empty(&tsk->mm->ioctx_list) ||
925#endif
926 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200927 mmput(mm);
928 task_unlock(tsk);
929 return -EINVAL;
930 }
931
932 /* ok, we are alone. No ptrace, no threads, etc. */
933 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100934 tsk->mm = tsk->active_mm = mm;
935 preempt_disable();
936 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200937 atomic_inc(&mm->context.attach_count);
938 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100939 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100940 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100941 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200942 mmput(old_mm);
943 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100944}
945EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200946
Gerald Schaefer75077af2012-10-08 16:30:15 -0700947#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700948int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
949 pmd_t *pmdp)
950{
951 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
952 /* No need to flush TLB
953 * On s390 reference bits are in storage key and never in TLB */
954 return pmdp_test_and_clear_young(vma, address, pmdp);
955}
956
957int pmdp_set_access_flags(struct vm_area_struct *vma,
958 unsigned long address, pmd_t *pmdp,
959 pmd_t entry, int dirty)
960{
961 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
962
963 if (pmd_same(*pmdp, entry))
964 return 0;
965 pmdp_invalidate(vma, address, pmdp);
966 set_pmd_at(vma->vm_mm, address, pmdp, entry);
967 return 1;
968}
969
Gerald Schaefer75077af2012-10-08 16:30:15 -0700970static void pmdp_splitting_flush_sync(void *arg)
971{
972 /* Simply deliver the interrupt */
973}
974
975void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
976 pmd_t *pmdp)
977{
978 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
979 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
980 (unsigned long *) pmdp)) {
981 /* need to serialize against gup-fast (IRQ disabled) */
982 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
983 }
984}
Gerald Schaefer9501d092012-10-08 16:30:18 -0700985
986void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
987{
988 struct list_head *lh = (struct list_head *) pgtable;
989
990 assert_spin_locked(&mm->page_table_lock);
991
992 /* FIFO */
993 if (!mm->pmd_huge_pte)
994 INIT_LIST_HEAD(lh);
995 else
996 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
997 mm->pmd_huge_pte = pgtable;
998}
999
1000pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
1001{
1002 struct list_head *lh;
1003 pgtable_t pgtable;
1004 pte_t *ptep;
1005
1006 assert_spin_locked(&mm->page_table_lock);
1007
1008 /* FIFO */
1009 pgtable = mm->pmd_huge_pte;
1010 lh = (struct list_head *) pgtable;
1011 if (list_empty(lh))
1012 mm->pmd_huge_pte = NULL;
1013 else {
1014 mm->pmd_huge_pte = (pgtable_t) lh->next;
1015 list_del(lh);
1016 }
1017 ptep = (pte_t *) pgtable;
1018 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1019 ptep++;
1020 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1021 return pgtable;
1022}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001023#endif /* CONFIG_TRANSPARENT_HUGEPAGE */