blob: b9d35d63934ea60bc0dd9cf74891955b55bd780f [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020029#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020030#else
31#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020032#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020033#endif
34
Heiko Carstens239a64252009-06-12 10:26:33 +020035
Martin Schwidefsky043d0702011-05-23 10:24:23 +020036unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020037{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39
40 if (!page)
41 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020042 return (unsigned long *) page_to_phys(page);
43}
44
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010045void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020046{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020047 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020048}
49
Martin Schwidefsky6252d702008-02-09 18:24:37 +010050#ifdef CONFIG_64BIT
51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52{
53 unsigned long *table, *pgd;
54 unsigned long entry;
55
56 BUG_ON(limit > (1UL << 53));
57repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020058 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010059 if (!table)
60 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020061 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062 if (mm->context.asce_limit < limit) {
63 pgd = (unsigned long *) mm->pgd;
64 if (mm->context.asce_limit <= (1UL << 31)) {
65 entry = _REGION3_ENTRY_EMPTY;
66 mm->context.asce_limit = 1UL << 42;
67 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
68 _ASCE_USER_BITS |
69 _ASCE_TYPE_REGION3;
70 } else {
71 entry = _REGION2_ENTRY_EMPTY;
72 mm->context.asce_limit = 1UL << 53;
73 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
74 _ASCE_USER_BITS |
75 _ASCE_TYPE_REGION2;
76 }
77 crst_table_init(table, entry);
78 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
79 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010080 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010081 table = NULL;
82 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020083 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010084 if (table)
85 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit)
87 goto repeat;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010088 return 0;
89}
90
91void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
92{
93 pgd_t *pgd;
94
Martin Schwidefsky6252d702008-02-09 18:24:37 +010095 while (mm->context.asce_limit > limit) {
96 pgd = mm->pgd;
97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
98 case _REGION_ENTRY_TYPE_R2:
99 mm->context.asce_limit = 1UL << 42;
100 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
101 _ASCE_USER_BITS |
102 _ASCE_TYPE_REGION3;
103 break;
104 case _REGION_ENTRY_TYPE_R3:
105 mm->context.asce_limit = 1UL << 31;
106 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
107 _ASCE_USER_BITS |
108 _ASCE_TYPE_SEGMENT;
109 break;
110 default:
111 BUG();
112 }
113 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100114 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100115 crst_table_free(mm, (unsigned long *) pgd);
116 }
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100117}
118#endif
119
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200120#ifdef CONFIG_PGSTE
121
122/**
123 * gmap_alloc - allocate a guest address space
124 * @mm: pointer to the parent mm_struct
125 *
126 * Returns a guest address space structure.
127 */
128struct gmap *gmap_alloc(struct mm_struct *mm)
129{
130 struct gmap *gmap;
131 struct page *page;
132 unsigned long *table;
133
134 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
135 if (!gmap)
136 goto out;
137 INIT_LIST_HEAD(&gmap->crst_list);
138 gmap->mm = mm;
139 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
140 if (!page)
141 goto out_free;
142 list_add(&page->lru, &gmap->crst_list);
143 table = (unsigned long *) page_to_phys(page);
144 crst_table_init(table, _REGION1_ENTRY_EMPTY);
145 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200146 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
147 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200148 list_add(&gmap->list, &mm->context.gmap_list);
149 return gmap;
150
151out_free:
152 kfree(gmap);
153out:
154 return NULL;
155}
156EXPORT_SYMBOL_GPL(gmap_alloc);
157
158static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
159{
160 struct gmap_pgtable *mp;
161 struct gmap_rmap *rmap;
162 struct page *page;
163
Martin Schwidefskye5098612013-07-23 20:57:57 +0200164 if (*table & _SEGMENT_ENTRY_INVALID)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index;
168 list_for_each_entry(rmap, &mp->mapper, list) {
169 if (rmap->entry != table)
170 continue;
171 list_del(&rmap->list);
172 kfree(rmap);
173 break;
174 }
Martin Schwidefskye5098612013-07-23 20:57:57 +0200175 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200176 return 1;
177}
178
179static void gmap_flush_tlb(struct gmap *gmap)
180{
181 if (MACHINE_HAS_IDTE)
182 __tlb_flush_idte((unsigned long) gmap->table |
183 _ASCE_TYPE_REGION1);
184 else
185 __tlb_flush_global();
186}
187
188/**
189 * gmap_free - free a guest address space
190 * @gmap: pointer to the guest address space structure
191 */
192void gmap_free(struct gmap *gmap)
193{
194 struct page *page, *next;
195 unsigned long *table;
196 int i;
197
198
199 /* Flush tlb. */
200 if (MACHINE_HAS_IDTE)
201 __tlb_flush_idte((unsigned long) gmap->table |
202 _ASCE_TYPE_REGION1);
203 else
204 __tlb_flush_global();
205
206 /* Free all segment & region tables. */
207 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100208 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200209 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
210 table = (unsigned long *) page_to_phys(page);
211 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
212 /* Remove gmap rmap structures for segment table. */
213 for (i = 0; i < PTRS_PER_PMD; i++, table++)
214 gmap_unlink_segment(gmap, table);
215 __free_pages(page, ALLOC_ORDER);
216 }
Carsten Ottecc772452011-10-30 15:17:01 +0100217 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200218 up_read(&gmap->mm->mmap_sem);
219 list_del(&gmap->list);
220 kfree(gmap);
221}
222EXPORT_SYMBOL_GPL(gmap_free);
223
224/**
225 * gmap_enable - switch primary space to the guest address space
226 * @gmap: pointer to the guest address space structure
227 */
228void gmap_enable(struct gmap *gmap)
229{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200230 S390_lowcore.gmap = (unsigned long) gmap;
231}
232EXPORT_SYMBOL_GPL(gmap_enable);
233
234/**
235 * gmap_disable - switch back to the standard primary address space
236 * @gmap: pointer to the guest address space structure
237 */
238void gmap_disable(struct gmap *gmap)
239{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200240 S390_lowcore.gmap = 0UL;
241}
242EXPORT_SYMBOL_GPL(gmap_disable);
243
Carsten Ottea9162f22011-10-30 15:17:00 +0100244/*
245 * gmap_alloc_table is assumed to be called with mmap_sem held
246 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200247static int gmap_alloc_table(struct gmap *gmap,
248 unsigned long *table, unsigned long init)
249{
250 struct page *page;
251 unsigned long *new;
252
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100253 /* since we dont free the gmap table until gmap_free we can unlock */
254 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200255 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100256 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200257 if (!page)
258 return -ENOMEM;
259 new = (unsigned long *) page_to_phys(page);
260 crst_table_init(new, init);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200261 if (*table & _REGION_ENTRY_INVALID) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200262 list_add(&page->lru, &gmap->crst_list);
263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK);
265 } else
266 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200267 return 0;
268}
269
270/**
271 * gmap_unmap_segment - unmap segment from the guest address space
272 * @gmap: pointer to the guest address space structure
273 * @addr: address in the guest address space
274 * @len: length of the memory area to unmap
275 *
276 * Returns 0 if the unmap succeded, -EINVAL if not.
277 */
278int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
279{
280 unsigned long *table;
281 unsigned long off;
282 int flush;
283
284 if ((to | len) & (PMD_SIZE - 1))
285 return -EINVAL;
286 if (len == 0 || to + len < to)
287 return -EINVAL;
288
289 flush = 0;
290 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100291 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200292 for (off = 0; off < len; off += PMD_SIZE) {
293 /* Walk the guest addr space page table */
294 table = gmap->table + (((to + off) >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200295 if (*table & _REGION_ENTRY_INVALID)
Carsten Otte05873df2011-09-26 16:40:34 +0200296 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200299 if (*table & _REGION_ENTRY_INVALID)
Carsten Otte05873df2011-09-26 16:40:34 +0200300 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200303 if (*table & _REGION_ENTRY_INVALID)
Carsten Otte05873df2011-09-26 16:40:34 +0200304 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff);
307
308 /* Clear segment table entry in guest address space. */
309 flush |= gmap_unlink_segment(gmap, table);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200310 *table = _SEGMENT_ENTRY_INVALID;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200311 }
Carsten Otte05873df2011-09-26 16:40:34 +0200312out:
Carsten Ottecc772452011-10-30 15:17:01 +0100313 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200314 up_read(&gmap->mm->mmap_sem);
315 if (flush)
316 gmap_flush_tlb(gmap);
317 return 0;
318}
319EXPORT_SYMBOL_GPL(gmap_unmap_segment);
320
321/**
322 * gmap_mmap_segment - map a segment to the guest address space
323 * @gmap: pointer to the guest address space structure
324 * @from: source address in the parent address space
325 * @to: target address in the guest address space
326 *
327 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
328 */
329int gmap_map_segment(struct gmap *gmap, unsigned long from,
330 unsigned long to, unsigned long len)
331{
332 unsigned long *table;
333 unsigned long off;
334 int flush;
335
336 if ((from | to | len) & (PMD_SIZE - 1))
337 return -EINVAL;
338 if (len == 0 || from + len > PGDIR_SIZE ||
339 from + len < from || to + len < to)
340 return -EINVAL;
341
342 flush = 0;
343 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100344 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200345 for (off = 0; off < len; off += PMD_SIZE) {
346 /* Walk the gmap address space page table */
347 table = gmap->table + (((to + off) >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200348 if ((*table & _REGION_ENTRY_INVALID) &&
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350 goto out_unmap;
351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200353 if ((*table & _REGION_ENTRY_INVALID) &&
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200358 if ((*table & _REGION_ENTRY_INVALID) &&
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360 goto out_unmap;
361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
362 table = table + (((to + off) >> 20) & 0x7ff);
363
364 /* Store 'from' address in an invalid segment table entry. */
365 flush |= gmap_unlink_segment(gmap, table);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200366 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
367 _SEGMENT_ENTRY_PROTECT);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200368 }
Carsten Ottecc772452011-10-30 15:17:01 +0100369 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200370 up_read(&gmap->mm->mmap_sem);
371 if (flush)
372 gmap_flush_tlb(gmap);
373 return 0;
374
375out_unmap:
Carsten Ottecc772452011-10-30 15:17:01 +0100376 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200377 up_read(&gmap->mm->mmap_sem);
378 gmap_unmap_segment(gmap, to, len);
379 return -ENOMEM;
380}
381EXPORT_SYMBOL_GPL(gmap_map_segment);
382
Heiko Carstensc5034942012-09-10 16:14:33 +0200383static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
384{
385 unsigned long *table;
386
387 table = gmap->table + ((address >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200388 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensc5034942012-09-10 16:14:33 +0200389 return ERR_PTR(-EFAULT);
390 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
391 table = table + ((address >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200392 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensc5034942012-09-10 16:14:33 +0200393 return ERR_PTR(-EFAULT);
394 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
395 table = table + ((address >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200396 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensc5034942012-09-10 16:14:33 +0200397 return ERR_PTR(-EFAULT);
398 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
399 table = table + ((address >> 20) & 0x7ff);
400 return table;
401}
402
403/**
404 * __gmap_translate - translate a guest address to a user space address
405 * @address: guest address
406 * @gmap: pointer to guest mapping meta data structure
407 *
408 * Returns user space address which corresponds to the guest address or
409 * -EFAULT if no such mapping exists.
410 * This function does not establish potentially missing page table entries.
411 * The mmap_sem of the mm that belongs to the address space must be held
412 * when this function gets called.
413 */
414unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
415{
416 unsigned long *segment_ptr, vmaddr, segment;
417 struct gmap_pgtable *mp;
418 struct page *page;
419
420 current->thread.gmap_addr = address;
421 segment_ptr = gmap_table_walk(address, gmap);
422 if (IS_ERR(segment_ptr))
423 return PTR_ERR(segment_ptr);
424 /* Convert the gmap address to an mm address. */
425 segment = *segment_ptr;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200426 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
Heiko Carstensc5034942012-09-10 16:14:33 +0200427 page = pfn_to_page(segment >> PAGE_SHIFT);
428 mp = (struct gmap_pgtable *) page->index;
429 return mp->vmaddr | (address & ~PMD_MASK);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200430 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
Heiko Carstensc5034942012-09-10 16:14:33 +0200431 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
432 return vmaddr | (address & ~PMD_MASK);
433 }
434 return -EFAULT;
435}
436EXPORT_SYMBOL_GPL(__gmap_translate);
437
438/**
439 * gmap_translate - translate a guest address to a user space address
440 * @address: guest address
441 * @gmap: pointer to guest mapping meta data structure
442 *
443 * Returns user space address which corresponds to the guest address or
444 * -EFAULT if no such mapping exists.
445 * This function does not establish potentially missing page table entries.
446 */
447unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
448{
449 unsigned long rc;
450
451 down_read(&gmap->mm->mmap_sem);
452 rc = __gmap_translate(address, gmap);
453 up_read(&gmap->mm->mmap_sem);
454 return rc;
455}
456EXPORT_SYMBOL_GPL(gmap_translate);
457
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200458static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
459 unsigned long *segment_ptr, struct gmap *gmap)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200460{
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200461 unsigned long vmaddr;
Heiko Carstensc5034942012-09-10 16:14:33 +0200462 struct vm_area_struct *vma;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200463 struct gmap_pgtable *mp;
464 struct gmap_rmap *rmap;
Heiko Carstensc5034942012-09-10 16:14:33 +0200465 struct mm_struct *mm;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200466 struct page *page;
467 pgd_t *pgd;
468 pud_t *pud;
469 pmd_t *pmd;
470
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200471 mm = gmap->mm;
472 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
473 vma = find_vma(mm, vmaddr);
474 if (!vma || vma->vm_start > vmaddr)
475 return -EFAULT;
476 /* Walk the parent mm page table */
477 pgd = pgd_offset(mm, vmaddr);
478 pud = pud_alloc(mm, pgd, vmaddr);
479 if (!pud)
480 return -ENOMEM;
481 pmd = pmd_alloc(mm, pud, vmaddr);
482 if (!pmd)
483 return -ENOMEM;
484 if (!pmd_present(*pmd) &&
485 __pte_alloc(mm, vma, pmd, vmaddr))
486 return -ENOMEM;
487 /* pmd now points to a valid segment table entry. */
488 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
489 if (!rmap)
490 return -ENOMEM;
491 /* Link gmap segment table entry location to page table. */
492 page = pmd_page(*pmd);
493 mp = (struct gmap_pgtable *) page->index;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200494 rmap->gmap = gmap;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200495 rmap->entry = segment_ptr;
Christian Borntraegere86cbd82013-05-29 13:08:39 +0200496 rmap->vmaddr = address & PMD_MASK;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200497 spin_lock(&mm->page_table_lock);
498 if (*segment_ptr == segment) {
499 list_add(&rmap->list, &mp->mapper);
500 /* Set gmap segment table entry to page table. */
501 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
502 rmap = NULL;
503 }
504 spin_unlock(&mm->page_table_lock);
505 kfree(rmap);
506 return 0;
507}
508
509static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
510{
511 struct gmap_rmap *rmap, *next;
512 struct gmap_pgtable *mp;
513 struct page *page;
514 int flush;
515
516 flush = 0;
517 spin_lock(&mm->page_table_lock);
518 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
519 mp = (struct gmap_pgtable *) page->index;
520 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
Martin Schwidefskye5098612013-07-23 20:57:57 +0200521 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
522 _SEGMENT_ENTRY_PROTECT);
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200523 list_del(&rmap->list);
524 kfree(rmap);
525 flush = 1;
526 }
527 spin_unlock(&mm->page_table_lock);
528 if (flush)
529 __tlb_flush_global();
530}
531
532/*
533 * this function is assumed to be called with mmap_sem held
534 */
535unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
536{
537 unsigned long *segment_ptr, segment;
538 struct gmap_pgtable *mp;
539 struct page *page;
540 int rc;
541
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200542 current->thread.gmap_addr = address;
Heiko Carstensc5034942012-09-10 16:14:33 +0200543 segment_ptr = gmap_table_walk(address, gmap);
544 if (IS_ERR(segment_ptr))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200545 return -EFAULT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200546 /* Convert the gmap address to an mm address. */
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200547 while (1) {
548 segment = *segment_ptr;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200549 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200550 /* Page table is present */
551 page = pfn_to_page(segment >> PAGE_SHIFT);
552 mp = (struct gmap_pgtable *) page->index;
553 return mp->vmaddr | (address & ~PMD_MASK);
554 }
Martin Schwidefskye5098612013-07-23 20:57:57 +0200555 if (!(segment & _SEGMENT_ENTRY_PROTECT))
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200556 /* Nothing mapped in the gmap address space. */
557 break;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200558 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200559 if (rc)
560 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200561 }
562 return -EFAULT;
Carsten Otte499069e2011-10-30 15:17:02 +0100563}
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200564
Carsten Otte499069e2011-10-30 15:17:02 +0100565unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
566{
567 unsigned long rc;
568
569 down_read(&gmap->mm->mmap_sem);
570 rc = __gmap_fault(address, gmap);
571 up_read(&gmap->mm->mmap_sem);
572
573 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200574}
575EXPORT_SYMBOL_GPL(gmap_fault);
576
Christian Borntraeger388186b2011-10-30 15:17:03 +0100577void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
578{
579
580 unsigned long *table, address, size;
581 struct vm_area_struct *vma;
582 struct gmap_pgtable *mp;
583 struct page *page;
584
585 down_read(&gmap->mm->mmap_sem);
586 address = from;
587 while (address < to) {
588 /* Walk the gmap address space page table */
589 table = gmap->table + ((address >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200590 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100591 address = (address + PMD_SIZE) & PMD_MASK;
592 continue;
593 }
594 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
595 table = table + ((address >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200596 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100597 address = (address + PMD_SIZE) & PMD_MASK;
598 continue;
599 }
600 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
601 table = table + ((address >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200602 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100603 address = (address + PMD_SIZE) & PMD_MASK;
604 continue;
605 }
606 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
607 table = table + ((address >> 20) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200608 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100609 address = (address + PMD_SIZE) & PMD_MASK;
610 continue;
611 }
612 page = pfn_to_page(*table >> PAGE_SHIFT);
613 mp = (struct gmap_pgtable *) page->index;
614 vma = find_vma(gmap->mm, mp->vmaddr);
615 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
616 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
617 size, NULL);
618 address = (address + PMD_SIZE) & PMD_MASK;
619 }
620 up_read(&gmap->mm->mmap_sem);
621}
622EXPORT_SYMBOL_GPL(gmap_discard);
623
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200624static LIST_HEAD(gmap_notifier_list);
625static DEFINE_SPINLOCK(gmap_notifier_lock);
626
627/**
628 * gmap_register_ipte_notifier - register a pte invalidation callback
629 * @nb: pointer to the gmap notifier block
630 */
631void gmap_register_ipte_notifier(struct gmap_notifier *nb)
632{
633 spin_lock(&gmap_notifier_lock);
634 list_add(&nb->list, &gmap_notifier_list);
635 spin_unlock(&gmap_notifier_lock);
636}
637EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
638
639/**
640 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
641 * @nb: pointer to the gmap notifier block
642 */
643void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
644{
645 spin_lock(&gmap_notifier_lock);
646 list_del_init(&nb->list);
647 spin_unlock(&gmap_notifier_lock);
648}
649EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
650
651/**
652 * gmap_ipte_notify - mark a range of ptes for invalidation notification
653 * @gmap: pointer to guest mapping meta data structure
654 * @address: virtual address in the guest address space
655 * @len: size of area
656 *
657 * Returns 0 if for each page in the given range a gmap mapping exists and
658 * the invalidation notification could be set. If the gmap mapping is missing
659 * for one or more pages -EFAULT is returned. If no memory could be allocated
660 * -ENOMEM is returned. This function establishes missing page table entries.
661 */
662int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
663{
664 unsigned long addr;
665 spinlock_t *ptl;
666 pte_t *ptep, entry;
667 pgste_t pgste;
668 int rc = 0;
669
670 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
671 return -EINVAL;
672 down_read(&gmap->mm->mmap_sem);
673 while (len) {
674 /* Convert gmap address and connect the page tables */
675 addr = __gmap_fault(start, gmap);
676 if (IS_ERR_VALUE(addr)) {
677 rc = addr;
678 break;
679 }
680 /* Get the page mapped */
Christian Borntraegerbb4b42c2013-05-08 15:25:38 +0200681 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200682 rc = -EFAULT;
683 break;
684 }
685 /* Walk the process page table, lock and get pte pointer */
686 ptep = get_locked_pte(gmap->mm, addr, &ptl);
687 if (unlikely(!ptep))
688 continue;
689 /* Set notification bit in the pgste of the pte */
690 entry = *ptep;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200691 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200692 pgste = pgste_get_lock(ptep);
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200693 pgste_val(pgste) |= PGSTE_IN_BIT;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200694 pgste_set_unlock(ptep, pgste);
695 start += PAGE_SIZE;
696 len -= PAGE_SIZE;
697 }
698 spin_unlock(ptl);
699 }
700 up_read(&gmap->mm->mmap_sem);
701 return rc;
702}
703EXPORT_SYMBOL_GPL(gmap_ipte_notify);
704
705/**
706 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
707 * @mm: pointer to the process mm_struct
708 * @addr: virtual address in the process address space
709 * @pte: pointer to the page table entry
710 *
711 * This function is assumed to be called with the page table lock held
712 * for the pte to notify.
713 */
714void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
715{
716 unsigned long segment_offset;
717 struct gmap_notifier *nb;
718 struct gmap_pgtable *mp;
719 struct gmap_rmap *rmap;
720 struct page *page;
721
722 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
723 segment_offset = segment_offset * (4096 / sizeof(pte_t));
724 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
725 mp = (struct gmap_pgtable *) page->index;
726 spin_lock(&gmap_notifier_lock);
727 list_for_each_entry(rmap, &mp->mapper, list) {
728 list_for_each_entry(nb, &gmap_notifier_list, list)
729 nb->notifier_call(rmap->gmap,
730 rmap->vmaddr + segment_offset);
731 }
732 spin_unlock(&gmap_notifier_lock);
733}
734
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200735static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
736 unsigned long vmaddr)
737{
738 struct page *page;
739 unsigned long *table;
740 struct gmap_pgtable *mp;
741
742 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
743 if (!page)
744 return NULL;
745 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
746 if (!mp) {
747 __free_page(page);
748 return NULL;
749 }
750 pgtable_page_ctor(page);
751 mp->vmaddr = vmaddr & PMD_MASK;
752 INIT_LIST_HEAD(&mp->mapper);
753 page->index = (unsigned long) mp;
754 atomic_set(&page->_mapcount, 3);
755 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200756 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200757 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
758 return table;
759}
760
761static inline void page_table_free_pgste(unsigned long *table)
762{
763 struct page *page;
764 struct gmap_pgtable *mp;
765
766 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
767 mp = (struct gmap_pgtable *) page->index;
768 BUG_ON(!list_empty(&mp->mapper));
Martin Schwidefsky2320c572012-02-17 10:29:21 +0100769 pgtable_page_dtor(page);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200770 atomic_set(&page->_mapcount, -1);
771 kfree(mp);
772 __free_page(page);
773}
774
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200775int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
776 unsigned long key, bool nq)
777{
778 spinlock_t *ptl;
779 pgste_t old, new;
780 pte_t *ptep;
781
782 down_read(&mm->mmap_sem);
783 ptep = get_locked_pte(current->mm, addr, &ptl);
784 if (unlikely(!ptep)) {
785 up_read(&mm->mmap_sem);
786 return -EFAULT;
787 }
788
789 new = old = pgste_get_lock(ptep);
790 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
791 PGSTE_ACC_BITS | PGSTE_FP_BIT);
792 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
793 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
794 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
795 unsigned long address, bits;
796 unsigned char skey;
797
798 address = pte_val(*ptep) & PAGE_MASK;
799 skey = page_get_storage_key(address);
800 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
801 /* Set storage key ACC and FP */
802 page_set_storage_key(address,
803 (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)),
804 !nq);
805
806 /* Merge host changed & referenced into pgste */
807 pgste_val(new) |= bits << 52;
808 /* Transfer skey changed & referenced bit to kvm user bits */
809 pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
810 }
811 /* changing the guest storage key is considered a change of the page */
812 if ((pgste_val(new) ^ pgste_val(old)) &
813 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
814 pgste_val(new) |= PGSTE_UC_BIT;
815
816 pgste_set_unlock(ptep, new);
817 pte_unmap_unlock(*ptep, ptl);
818 up_read(&mm->mmap_sem);
819 return 0;
820}
821EXPORT_SYMBOL(set_guest_storage_key);
822
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200823#else /* CONFIG_PGSTE */
824
825static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
826 unsigned long vmaddr)
827{
Jan Glauber944291d2011-08-03 16:44:18 +0200828 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200829}
830
831static inline void page_table_free_pgste(unsigned long *table)
832{
833}
834
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200835static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
836 unsigned long *table)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200837{
838}
839
840#endif /* CONFIG_PGSTE */
841
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200842static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
843{
844 unsigned int old, new;
845
846 do {
847 old = atomic_read(v);
848 new = old ^ bits;
849 } while (atomic_cmpxchg(v, old, new) != old);
850 return new;
851}
852
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200853/*
854 * page table entry allocation/free routines.
855 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200856unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200857{
Heiko Carstens41459d32012-09-14 11:09:52 +0200858 unsigned long *uninitialized_var(table);
859 struct page *uninitialized_var(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200860 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200861
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200862 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200863 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200864 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200865 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200866 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100867 if (!list_empty(&mm->context.pgtable_list)) {
868 page = list_first_entry(&mm->context.pgtable_list,
869 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200870 table = (unsigned long *) page_to_phys(page);
871 mask = atomic_read(&page->_mapcount);
872 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200873 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200874 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200875 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100876 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
877 if (!page)
878 return NULL;
879 pgtable_page_ctor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200880 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100881 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200882 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200883 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100884 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200885 } else {
886 for (bit = 1; mask & bit; bit <<= 1)
887 table += PTRS_PER_PTE;
888 mask = atomic_xor_bits(&page->_mapcount, bit);
889 if ((mask & FRAG_MASK) == FRAG_MASK)
890 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100891 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200892 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200893 return table;
894}
895
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100896void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200897{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100898 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200899 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200900
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200901 if (mm_has_pgste(mm)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200902 gmap_disconnect_pgtable(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200903 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200904 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200905 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100906 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200907 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200908 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200909 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100910 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200911 mask = atomic_xor_bits(&page->_mapcount, bit);
912 if (mask & FRAG_MASK)
913 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200914 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200915 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100916 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200917 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100918 __free_page(page);
919 }
920}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200921
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200922static void __page_table_free_rcu(void *table, unsigned bit)
923{
924 struct page *page;
925
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200926 if (bit == FRAG_MASK)
927 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200928 /* Free 1K/2K page table fragment of a 4K page */
929 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
930 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
931 pgtable_page_dtor(page);
932 atomic_set(&page->_mapcount, -1);
933 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200934 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200935}
936
937void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
938{
939 struct mm_struct *mm;
940 struct page *page;
941 unsigned int bit, mask;
942
943 mm = tlb->mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200944 if (mm_has_pgste(mm)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200945 gmap_disconnect_pgtable(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200946 table = (unsigned long *) (__pa(table) | FRAG_MASK);
947 tlb_remove_table(tlb, table);
948 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200949 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200950 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200951 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
952 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200953 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
954 list_del(&page->lru);
955 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
956 if (mask & FRAG_MASK)
957 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200958 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200959 table = (unsigned long *) (__pa(table) | (bit << 4));
960 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200961}
962
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200963void __tlb_remove_table(void *_table)
964{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +0100965 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
966 void *table = (void *)((unsigned long) _table & ~mask);
967 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200968
969 if (type)
970 __page_table_free_rcu(table, type);
971 else
972 free_pages((unsigned long) table, ALLOC_ORDER);
973}
974
Martin Schwidefskycd941542012-04-11 14:28:07 +0200975static void tlb_remove_table_smp_sync(void *arg)
976{
977 /* Simply deliver the interrupt */
978}
979
980static void tlb_remove_table_one(void *table)
981{
982 /*
983 * This isn't an RCU grace period and hence the page-tables cannot be
984 * assumed to be actually RCU-freed.
985 *
986 * It is however sufficient for software page-table walkers that rely
987 * on IRQ disabling. See the comment near struct mmu_table_batch.
988 */
989 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
990 __tlb_remove_table(table);
991}
992
993static void tlb_remove_table_rcu(struct rcu_head *head)
994{
995 struct mmu_table_batch *batch;
996 int i;
997
998 batch = container_of(head, struct mmu_table_batch, rcu);
999
1000 for (i = 0; i < batch->nr; i++)
1001 __tlb_remove_table(batch->tables[i]);
1002
1003 free_page((unsigned long)batch);
1004}
1005
1006void tlb_table_flush(struct mmu_gather *tlb)
1007{
1008 struct mmu_table_batch **batch = &tlb->batch;
1009
1010 if (*batch) {
1011 __tlb_flush_mm(tlb->mm);
1012 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1013 *batch = NULL;
1014 }
1015}
1016
1017void tlb_remove_table(struct mmu_gather *tlb, void *table)
1018{
1019 struct mmu_table_batch **batch = &tlb->batch;
1020
1021 if (*batch == NULL) {
1022 *batch = (struct mmu_table_batch *)
1023 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1024 if (*batch == NULL) {
1025 __tlb_flush_mm(tlb->mm);
1026 tlb_remove_table_one(table);
1027 return;
1028 }
1029 (*batch)->nr = 0;
1030 }
1031 (*batch)->tables[(*batch)->nr++] = table;
1032 if ((*batch)->nr == MAX_TABLE_BATCH)
1033 tlb_table_flush(tlb);
1034}
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001035
Gerald Schaefer274023d2012-10-08 16:30:21 -07001036#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1037void thp_split_vma(struct vm_area_struct *vma)
1038{
1039 unsigned long addr;
1040 struct page *page;
1041
1042 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
1043 page = follow_page(vma, addr, FOLL_SPLIT);
1044 }
1045}
1046
1047void thp_split_mm(struct mm_struct *mm)
1048{
1049 struct vm_area_struct *vma = mm->mmap;
1050
1051 while (vma != NULL) {
1052 thp_split_vma(vma);
1053 vma->vm_flags &= ~VM_HUGEPAGE;
1054 vma->vm_flags |= VM_NOHUGEPAGE;
1055 vma = vma->vm_next;
1056 }
1057}
1058#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1059
Carsten Otte402b0862008-03-25 18:47:10 +01001060/*
1061 * switch on pgstes for its userspace process (for kvm)
1062 */
1063int s390_enable_sie(void)
1064{
1065 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001066 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +01001067
Carsten Otte702d9e52009-03-26 15:23:57 +01001068 /* Do we have switched amode? If no, we cannot do sie */
Heiko Carstensd1b0d842012-09-02 11:02:23 +02001069 if (s390_user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +01001070 return -EINVAL;
1071
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001072 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001073 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001074 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001075
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001076 /* lets check if we are allowed to replace the mm */
1077 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +01001078 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +02001079#ifdef CONFIG_AIO
1080 !hlist_empty(&tsk->mm->ioctx_list) ||
1081#endif
1082 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001083 task_unlock(tsk);
1084 return -EINVAL;
1085 }
1086 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +01001087
Christian Borntraeger250cf772008-10-28 11:10:15 +01001088 /* we copy the mm and let dup_mm create the page tables with_pgstes */
1089 tsk->mm->context.alloc_pgste = 1;
Christian Borntraeger2739b6d2012-05-09 16:27:38 +02001090 /* make sure that both mms have a correct rss state */
1091 sync_mm_rss(tsk->mm);
Carsten Otte402b0862008-03-25 18:47:10 +01001092 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +01001093 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001094 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001095 return -ENOMEM;
1096
Gerald Schaefer274023d2012-10-08 16:30:21 -07001097#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1098 /* split thp mappings and disable thp for future mappings */
1099 thp_split_mm(mm);
1100 mm->def_flags |= VM_NOHUGEPAGE;
1101#endif
1102
Christian Borntraeger250cf772008-10-28 11:10:15 +01001103 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001104 task_lock(tsk);
1105 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +02001106#ifdef CONFIG_AIO
1107 !hlist_empty(&tsk->mm->ioctx_list) ||
1108#endif
1109 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001110 mmput(mm);
1111 task_unlock(tsk);
1112 return -EINVAL;
1113 }
1114
1115 /* ok, we are alone. No ptrace, no threads, etc. */
1116 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +01001117 tsk->mm = tsk->active_mm = mm;
1118 preempt_disable();
1119 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +02001120 atomic_inc(&mm->context.attach_count);
1121 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +01001122 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +01001123 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +01001124 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001125 mmput(old_mm);
1126 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001127}
1128EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +02001129
Gerald Schaefer75077af2012-10-08 16:30:15 -07001130#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001131int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1132 pmd_t *pmdp)
1133{
1134 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1135 /* No need to flush TLB
1136 * On s390 reference bits are in storage key and never in TLB */
1137 return pmdp_test_and_clear_young(vma, address, pmdp);
1138}
1139
1140int pmdp_set_access_flags(struct vm_area_struct *vma,
1141 unsigned long address, pmd_t *pmdp,
1142 pmd_t entry, int dirty)
1143{
1144 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1145
1146 if (pmd_same(*pmdp, entry))
1147 return 0;
1148 pmdp_invalidate(vma, address, pmdp);
1149 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1150 return 1;
1151}
1152
Gerald Schaefer75077af2012-10-08 16:30:15 -07001153static void pmdp_splitting_flush_sync(void *arg)
1154{
1155 /* Simply deliver the interrupt */
1156}
1157
1158void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1159 pmd_t *pmdp)
1160{
1161 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1162 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1163 (unsigned long *) pmdp)) {
1164 /* need to serialize against gup-fast (IRQ disabled) */
1165 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1166 }
1167}
Gerald Schaefer9501d092012-10-08 16:30:18 -07001168
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001169void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1170 pgtable_t pgtable)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001171{
1172 struct list_head *lh = (struct list_head *) pgtable;
1173
1174 assert_spin_locked(&mm->page_table_lock);
1175
1176 /* FIFO */
1177 if (!mm->pmd_huge_pte)
1178 INIT_LIST_HEAD(lh);
1179 else
1180 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
1181 mm->pmd_huge_pte = pgtable;
1182}
1183
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001184pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001185{
1186 struct list_head *lh;
1187 pgtable_t pgtable;
1188 pte_t *ptep;
1189
1190 assert_spin_locked(&mm->page_table_lock);
1191
1192 /* FIFO */
1193 pgtable = mm->pmd_huge_pte;
1194 lh = (struct list_head *) pgtable;
1195 if (list_empty(lh))
1196 mm->pmd_huge_pte = NULL;
1197 else {
1198 mm->pmd_huge_pte = (pgtable_t) lh->next;
1199 list_del(lh);
1200 }
1201 ptep = (pte_t *) pgtable;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001202 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001203 ptep++;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001204 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001205 return pgtable;
1206}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001207#endif /* CONFIG_TRANSPARENT_HUGEPAGE */