blob: e794c88f699a4584a742a3006eb443e706e5c96e [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1
Martin Schwidefsky36409f62011-06-06 14:14:41 +020029#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020030#else
31#define ALLOC_ORDER 2
Martin Schwidefsky36409f62011-06-06 14:14:41 +020032#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020033#endif
34
Heiko Carstens239a64252009-06-12 10:26:33 +020035
Martin Schwidefsky043d0702011-05-23 10:24:23 +020036unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020037{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39
40 if (!page)
41 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020042 return (unsigned long *) page_to_phys(page);
43}
44
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010045void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020046{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020047 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020048}
49
Martin Schwidefsky6252d702008-02-09 18:24:37 +010050#ifdef CONFIG_64BIT
Martin Schwidefsky10607862013-10-28 14:48:30 +010051static void __crst_table_upgrade(void *arg)
52{
53 struct mm_struct *mm = arg;
54
55 if (current->active_mm == mm)
56 update_mm(mm, current);
57 __tlb_flush_local();
58}
59
Martin Schwidefsky6252d702008-02-09 18:24:37 +010060int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
61{
62 unsigned long *table, *pgd;
63 unsigned long entry;
Martin Schwidefsky10607862013-10-28 14:48:30 +010064 int flush;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010065
66 BUG_ON(limit > (1UL << 53));
Martin Schwidefsky10607862013-10-28 14:48:30 +010067 flush = 0;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010068repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020069 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010070 if (!table)
71 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020072 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010073 if (mm->context.asce_limit < limit) {
74 pgd = (unsigned long *) mm->pgd;
75 if (mm->context.asce_limit <= (1UL << 31)) {
76 entry = _REGION3_ENTRY_EMPTY;
77 mm->context.asce_limit = 1UL << 42;
78 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
79 _ASCE_USER_BITS |
80 _ASCE_TYPE_REGION3;
81 } else {
82 entry = _REGION2_ENTRY_EMPTY;
83 mm->context.asce_limit = 1UL << 53;
84 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
85 _ASCE_USER_BITS |
86 _ASCE_TYPE_REGION2;
87 }
88 crst_table_init(table, entry);
89 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
90 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010091 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010092 table = NULL;
Martin Schwidefsky10607862013-10-28 14:48:30 +010093 flush = 1;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010094 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020095 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010096 if (table)
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
99 goto repeat;
Martin Schwidefsky10607862013-10-28 14:48:30 +0100100 if (flush)
101 on_each_cpu(__crst_table_upgrade, mm, 0);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100102 return 0;
103}
104
105void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
106{
107 pgd_t *pgd;
108
Martin Schwidefsky10607862013-10-28 14:48:30 +0100109 if (current->active_mm == mm)
110 __tlb_flush_mm(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100111 while (mm->context.asce_limit > limit) {
112 pgd = mm->pgd;
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_REGION3;
119 break;
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS |
124 _ASCE_TYPE_SEGMENT;
125 break;
126 default:
127 BUG();
128 }
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100130 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100131 crst_table_free(mm, (unsigned long *) pgd);
132 }
Martin Schwidefsky10607862013-10-28 14:48:30 +0100133 if (current->active_mm == mm)
134 update_mm(mm, current);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100135}
136#endif
137
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200138#ifdef CONFIG_PGSTE
139
140/**
141 * gmap_alloc - allocate a guest address space
142 * @mm: pointer to the parent mm_struct
143 *
144 * Returns a guest address space structure.
145 */
146struct gmap *gmap_alloc(struct mm_struct *mm)
147{
148 struct gmap *gmap;
149 struct page *page;
150 unsigned long *table;
151
152 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
153 if (!gmap)
154 goto out;
155 INIT_LIST_HEAD(&gmap->crst_list);
156 gmap->mm = mm;
157 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
158 if (!page)
159 goto out_free;
160 list_add(&page->lru, &gmap->crst_list);
161 table = (unsigned long *) page_to_phys(page);
162 crst_table_init(table, _REGION1_ENTRY_EMPTY);
163 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200164 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
165 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200166 list_add(&gmap->list, &mm->context.gmap_list);
167 return gmap;
168
169out_free:
170 kfree(gmap);
171out:
172 return NULL;
173}
174EXPORT_SYMBOL_GPL(gmap_alloc);
175
176static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
177{
178 struct gmap_pgtable *mp;
179 struct gmap_rmap *rmap;
180 struct page *page;
181
Martin Schwidefskye5098612013-07-23 20:57:57 +0200182 if (*table & _SEGMENT_ENTRY_INVALID)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200183 return 0;
184 page = pfn_to_page(*table >> PAGE_SHIFT);
185 mp = (struct gmap_pgtable *) page->index;
186 list_for_each_entry(rmap, &mp->mapper, list) {
187 if (rmap->entry != table)
188 continue;
189 list_del(&rmap->list);
190 kfree(rmap);
191 break;
192 }
Martin Schwidefskye5098612013-07-23 20:57:57 +0200193 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200194 return 1;
195}
196
197static void gmap_flush_tlb(struct gmap *gmap)
198{
199 if (MACHINE_HAS_IDTE)
200 __tlb_flush_idte((unsigned long) gmap->table |
201 _ASCE_TYPE_REGION1);
202 else
203 __tlb_flush_global();
204}
205
206/**
207 * gmap_free - free a guest address space
208 * @gmap: pointer to the guest address space structure
209 */
210void gmap_free(struct gmap *gmap)
211{
212 struct page *page, *next;
213 unsigned long *table;
214 int i;
215
216
217 /* Flush tlb. */
218 if (MACHINE_HAS_IDTE)
219 __tlb_flush_idte((unsigned long) gmap->table |
220 _ASCE_TYPE_REGION1);
221 else
222 __tlb_flush_global();
223
224 /* Free all segment & region tables. */
225 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100226 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200227 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
228 table = (unsigned long *) page_to_phys(page);
229 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
230 /* Remove gmap rmap structures for segment table. */
231 for (i = 0; i < PTRS_PER_PMD; i++, table++)
232 gmap_unlink_segment(gmap, table);
233 __free_pages(page, ALLOC_ORDER);
234 }
Carsten Ottecc772452011-10-30 15:17:01 +0100235 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200236 up_read(&gmap->mm->mmap_sem);
237 list_del(&gmap->list);
238 kfree(gmap);
239}
240EXPORT_SYMBOL_GPL(gmap_free);
241
242/**
243 * gmap_enable - switch primary space to the guest address space
244 * @gmap: pointer to the guest address space structure
245 */
246void gmap_enable(struct gmap *gmap)
247{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200248 S390_lowcore.gmap = (unsigned long) gmap;
249}
250EXPORT_SYMBOL_GPL(gmap_enable);
251
252/**
253 * gmap_disable - switch back to the standard primary address space
254 * @gmap: pointer to the guest address space structure
255 */
256void gmap_disable(struct gmap *gmap)
257{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200258 S390_lowcore.gmap = 0UL;
259}
260EXPORT_SYMBOL_GPL(gmap_disable);
261
Carsten Ottea9162f22011-10-30 15:17:00 +0100262/*
263 * gmap_alloc_table is assumed to be called with mmap_sem held
264 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200265static int gmap_alloc_table(struct gmap *gmap,
Heiko Carstens984e2a52013-09-06 18:48:58 +0200266 unsigned long *table, unsigned long init)
267 __releases(&gmap->mm->page_table_lock)
268 __acquires(&gmap->mm->page_table_lock)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200269{
270 struct page *page;
271 unsigned long *new;
272
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100273 /* since we dont free the gmap table until gmap_free we can unlock */
274 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200275 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
Christian Borntraegerc86cce22011-12-27 11:25:47 +0100276 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200277 if (!page)
278 return -ENOMEM;
279 new = (unsigned long *) page_to_phys(page);
280 crst_table_init(new, init);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200281 if (*table & _REGION_ENTRY_INVALID) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200282 list_add(&page->lru, &gmap->crst_list);
283 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
284 (*table & _REGION_ENTRY_TYPE_MASK);
285 } else
286 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200287 return 0;
288}
289
290/**
291 * gmap_unmap_segment - unmap segment from the guest address space
292 * @gmap: pointer to the guest address space structure
293 * @addr: address in the guest address space
294 * @len: length of the memory area to unmap
295 *
296 * Returns 0 if the unmap succeded, -EINVAL if not.
297 */
298int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
299{
300 unsigned long *table;
301 unsigned long off;
302 int flush;
303
304 if ((to | len) & (PMD_SIZE - 1))
305 return -EINVAL;
306 if (len == 0 || to + len < to)
307 return -EINVAL;
308
309 flush = 0;
310 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100311 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200312 for (off = 0; off < len; off += PMD_SIZE) {
313 /* Walk the guest addr space page table */
314 table = gmap->table + (((to + off) >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200315 if (*table & _REGION_ENTRY_INVALID)
Carsten Otte05873df2011-09-26 16:40:34 +0200316 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200317 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
318 table = table + (((to + off) >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200319 if (*table & _REGION_ENTRY_INVALID)
Carsten Otte05873df2011-09-26 16:40:34 +0200320 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200321 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
322 table = table + (((to + off) >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200323 if (*table & _REGION_ENTRY_INVALID)
Carsten Otte05873df2011-09-26 16:40:34 +0200324 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200325 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
326 table = table + (((to + off) >> 20) & 0x7ff);
327
328 /* Clear segment table entry in guest address space. */
329 flush |= gmap_unlink_segment(gmap, table);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200330 *table = _SEGMENT_ENTRY_INVALID;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200331 }
Carsten Otte05873df2011-09-26 16:40:34 +0200332out:
Carsten Ottecc772452011-10-30 15:17:01 +0100333 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200334 up_read(&gmap->mm->mmap_sem);
335 if (flush)
336 gmap_flush_tlb(gmap);
337 return 0;
338}
339EXPORT_SYMBOL_GPL(gmap_unmap_segment);
340
341/**
342 * gmap_mmap_segment - map a segment to the guest address space
343 * @gmap: pointer to the guest address space structure
344 * @from: source address in the parent address space
345 * @to: target address in the guest address space
346 *
347 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
348 */
349int gmap_map_segment(struct gmap *gmap, unsigned long from,
350 unsigned long to, unsigned long len)
351{
352 unsigned long *table;
353 unsigned long off;
354 int flush;
355
356 if ((from | to | len) & (PMD_SIZE - 1))
357 return -EINVAL;
Martin Schwidefskyee6ee552013-07-26 15:04:03 +0200358 if (len == 0 || from + len > TASK_MAX_SIZE ||
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200359 from + len < from || to + len < to)
360 return -EINVAL;
361
362 flush = 0;
363 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100364 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200365 for (off = 0; off < len; off += PMD_SIZE) {
366 /* Walk the gmap address space page table */
367 table = gmap->table + (((to + off) >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200368 if ((*table & _REGION_ENTRY_INVALID) &&
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200369 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
370 goto out_unmap;
371 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
372 table = table + (((to + off) >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200373 if ((*table & _REGION_ENTRY_INVALID) &&
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200374 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
375 goto out_unmap;
376 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
377 table = table + (((to + off) >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200378 if ((*table & _REGION_ENTRY_INVALID) &&
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200379 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
380 goto out_unmap;
381 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
382 table = table + (((to + off) >> 20) & 0x7ff);
383
384 /* Store 'from' address in an invalid segment table entry. */
385 flush |= gmap_unlink_segment(gmap, table);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200386 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
387 _SEGMENT_ENTRY_PROTECT);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200388 }
Carsten Ottecc772452011-10-30 15:17:01 +0100389 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200390 up_read(&gmap->mm->mmap_sem);
391 if (flush)
392 gmap_flush_tlb(gmap);
393 return 0;
394
395out_unmap:
Carsten Ottecc772452011-10-30 15:17:01 +0100396 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200397 up_read(&gmap->mm->mmap_sem);
398 gmap_unmap_segment(gmap, to, len);
399 return -ENOMEM;
400}
401EXPORT_SYMBOL_GPL(gmap_map_segment);
402
Heiko Carstensc5034942012-09-10 16:14:33 +0200403static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
404{
405 unsigned long *table;
406
407 table = gmap->table + ((address >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200408 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensc5034942012-09-10 16:14:33 +0200409 return ERR_PTR(-EFAULT);
410 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
411 table = table + ((address >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200412 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensc5034942012-09-10 16:14:33 +0200413 return ERR_PTR(-EFAULT);
414 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
415 table = table + ((address >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200416 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensc5034942012-09-10 16:14:33 +0200417 return ERR_PTR(-EFAULT);
418 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
419 table = table + ((address >> 20) & 0x7ff);
420 return table;
421}
422
423/**
424 * __gmap_translate - translate a guest address to a user space address
425 * @address: guest address
426 * @gmap: pointer to guest mapping meta data structure
427 *
428 * Returns user space address which corresponds to the guest address or
429 * -EFAULT if no such mapping exists.
430 * This function does not establish potentially missing page table entries.
431 * The mmap_sem of the mm that belongs to the address space must be held
432 * when this function gets called.
433 */
434unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
435{
436 unsigned long *segment_ptr, vmaddr, segment;
437 struct gmap_pgtable *mp;
438 struct page *page;
439
440 current->thread.gmap_addr = address;
441 segment_ptr = gmap_table_walk(address, gmap);
442 if (IS_ERR(segment_ptr))
443 return PTR_ERR(segment_ptr);
444 /* Convert the gmap address to an mm address. */
445 segment = *segment_ptr;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200446 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
Heiko Carstensc5034942012-09-10 16:14:33 +0200447 page = pfn_to_page(segment >> PAGE_SHIFT);
448 mp = (struct gmap_pgtable *) page->index;
449 return mp->vmaddr | (address & ~PMD_MASK);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200450 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
Heiko Carstensc5034942012-09-10 16:14:33 +0200451 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
452 return vmaddr | (address & ~PMD_MASK);
453 }
454 return -EFAULT;
455}
456EXPORT_SYMBOL_GPL(__gmap_translate);
457
458/**
459 * gmap_translate - translate a guest address to a user space address
460 * @address: guest address
461 * @gmap: pointer to guest mapping meta data structure
462 *
463 * Returns user space address which corresponds to the guest address or
464 * -EFAULT if no such mapping exists.
465 * This function does not establish potentially missing page table entries.
466 */
467unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
468{
469 unsigned long rc;
470
471 down_read(&gmap->mm->mmap_sem);
472 rc = __gmap_translate(address, gmap);
473 up_read(&gmap->mm->mmap_sem);
474 return rc;
475}
476EXPORT_SYMBOL_GPL(gmap_translate);
477
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200478static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
479 unsigned long *segment_ptr, struct gmap *gmap)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200480{
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200481 unsigned long vmaddr;
Heiko Carstensc5034942012-09-10 16:14:33 +0200482 struct vm_area_struct *vma;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200483 struct gmap_pgtable *mp;
484 struct gmap_rmap *rmap;
Heiko Carstensc5034942012-09-10 16:14:33 +0200485 struct mm_struct *mm;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200486 struct page *page;
487 pgd_t *pgd;
488 pud_t *pud;
489 pmd_t *pmd;
490
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200491 mm = gmap->mm;
492 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
493 vma = find_vma(mm, vmaddr);
494 if (!vma || vma->vm_start > vmaddr)
495 return -EFAULT;
496 /* Walk the parent mm page table */
497 pgd = pgd_offset(mm, vmaddr);
498 pud = pud_alloc(mm, pgd, vmaddr);
499 if (!pud)
500 return -ENOMEM;
501 pmd = pmd_alloc(mm, pud, vmaddr);
502 if (!pmd)
503 return -ENOMEM;
504 if (!pmd_present(*pmd) &&
505 __pte_alloc(mm, vma, pmd, vmaddr))
506 return -ENOMEM;
507 /* pmd now points to a valid segment table entry. */
508 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
509 if (!rmap)
510 return -ENOMEM;
511 /* Link gmap segment table entry location to page table. */
512 page = pmd_page(*pmd);
513 mp = (struct gmap_pgtable *) page->index;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200514 rmap->gmap = gmap;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200515 rmap->entry = segment_ptr;
Christian Borntraegere86cbd82013-05-29 13:08:39 +0200516 rmap->vmaddr = address & PMD_MASK;
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200517 spin_lock(&mm->page_table_lock);
518 if (*segment_ptr == segment) {
519 list_add(&rmap->list, &mp->mapper);
520 /* Set gmap segment table entry to page table. */
521 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
522 rmap = NULL;
523 }
524 spin_unlock(&mm->page_table_lock);
525 kfree(rmap);
526 return 0;
527}
528
529static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
530{
531 struct gmap_rmap *rmap, *next;
532 struct gmap_pgtable *mp;
533 struct page *page;
534 int flush;
535
536 flush = 0;
537 spin_lock(&mm->page_table_lock);
538 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
539 mp = (struct gmap_pgtable *) page->index;
540 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
Martin Schwidefskye5098612013-07-23 20:57:57 +0200541 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
542 _SEGMENT_ENTRY_PROTECT);
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200543 list_del(&rmap->list);
544 kfree(rmap);
545 flush = 1;
546 }
547 spin_unlock(&mm->page_table_lock);
548 if (flush)
549 __tlb_flush_global();
550}
551
552/*
553 * this function is assumed to be called with mmap_sem held
554 */
555unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
556{
557 unsigned long *segment_ptr, segment;
558 struct gmap_pgtable *mp;
559 struct page *page;
560 int rc;
561
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200562 current->thread.gmap_addr = address;
Heiko Carstensc5034942012-09-10 16:14:33 +0200563 segment_ptr = gmap_table_walk(address, gmap);
564 if (IS_ERR(segment_ptr))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200565 return -EFAULT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200566 /* Convert the gmap address to an mm address. */
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200567 while (1) {
568 segment = *segment_ptr;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200569 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200570 /* Page table is present */
571 page = pfn_to_page(segment >> PAGE_SHIFT);
572 mp = (struct gmap_pgtable *) page->index;
573 return mp->vmaddr | (address & ~PMD_MASK);
574 }
Martin Schwidefskye5098612013-07-23 20:57:57 +0200575 if (!(segment & _SEGMENT_ENTRY_PROTECT))
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200576 /* Nothing mapped in the gmap address space. */
577 break;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200578 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200579 if (rc)
580 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200581 }
582 return -EFAULT;
Carsten Otte499069e2011-10-30 15:17:02 +0100583}
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200584
Carsten Otte499069e2011-10-30 15:17:02 +0100585unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
586{
587 unsigned long rc;
588
589 down_read(&gmap->mm->mmap_sem);
590 rc = __gmap_fault(address, gmap);
591 up_read(&gmap->mm->mmap_sem);
592
593 return rc;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200594}
595EXPORT_SYMBOL_GPL(gmap_fault);
596
Christian Borntraeger388186b2011-10-30 15:17:03 +0100597void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
598{
599
600 unsigned long *table, address, size;
601 struct vm_area_struct *vma;
602 struct gmap_pgtable *mp;
603 struct page *page;
604
605 down_read(&gmap->mm->mmap_sem);
606 address = from;
607 while (address < to) {
608 /* Walk the gmap address space page table */
609 table = gmap->table + ((address >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200610 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100611 address = (address + PMD_SIZE) & PMD_MASK;
612 continue;
613 }
614 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
615 table = table + ((address >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200616 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100617 address = (address + PMD_SIZE) & PMD_MASK;
618 continue;
619 }
620 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
621 table = table + ((address >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200622 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100623 address = (address + PMD_SIZE) & PMD_MASK;
624 continue;
625 }
626 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
627 table = table + ((address >> 20) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200628 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
Christian Borntraeger388186b2011-10-30 15:17:03 +0100629 address = (address + PMD_SIZE) & PMD_MASK;
630 continue;
631 }
632 page = pfn_to_page(*table >> PAGE_SHIFT);
633 mp = (struct gmap_pgtable *) page->index;
634 vma = find_vma(gmap->mm, mp->vmaddr);
635 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
636 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
637 size, NULL);
638 address = (address + PMD_SIZE) & PMD_MASK;
639 }
640 up_read(&gmap->mm->mmap_sem);
641}
642EXPORT_SYMBOL_GPL(gmap_discard);
643
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200644static LIST_HEAD(gmap_notifier_list);
645static DEFINE_SPINLOCK(gmap_notifier_lock);
646
647/**
648 * gmap_register_ipte_notifier - register a pte invalidation callback
649 * @nb: pointer to the gmap notifier block
650 */
651void gmap_register_ipte_notifier(struct gmap_notifier *nb)
652{
653 spin_lock(&gmap_notifier_lock);
654 list_add(&nb->list, &gmap_notifier_list);
655 spin_unlock(&gmap_notifier_lock);
656}
657EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
658
659/**
660 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
661 * @nb: pointer to the gmap notifier block
662 */
663void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
664{
665 spin_lock(&gmap_notifier_lock);
666 list_del_init(&nb->list);
667 spin_unlock(&gmap_notifier_lock);
668}
669EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
670
671/**
672 * gmap_ipte_notify - mark a range of ptes for invalidation notification
673 * @gmap: pointer to guest mapping meta data structure
674 * @address: virtual address in the guest address space
675 * @len: size of area
676 *
677 * Returns 0 if for each page in the given range a gmap mapping exists and
678 * the invalidation notification could be set. If the gmap mapping is missing
679 * for one or more pages -EFAULT is returned. If no memory could be allocated
680 * -ENOMEM is returned. This function establishes missing page table entries.
681 */
682int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
683{
684 unsigned long addr;
685 spinlock_t *ptl;
686 pte_t *ptep, entry;
687 pgste_t pgste;
688 int rc = 0;
689
690 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
691 return -EINVAL;
692 down_read(&gmap->mm->mmap_sem);
693 while (len) {
694 /* Convert gmap address and connect the page tables */
695 addr = __gmap_fault(start, gmap);
696 if (IS_ERR_VALUE(addr)) {
697 rc = addr;
698 break;
699 }
700 /* Get the page mapped */
Christian Borntraegerbb4b42c2013-05-08 15:25:38 +0200701 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200702 rc = -EFAULT;
703 break;
704 }
705 /* Walk the process page table, lock and get pte pointer */
706 ptep = get_locked_pte(gmap->mm, addr, &ptl);
707 if (unlikely(!ptep))
708 continue;
709 /* Set notification bit in the pgste of the pte */
710 entry = *ptep;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200711 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200712 pgste = pgste_get_lock(ptep);
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200713 pgste_val(pgste) |= PGSTE_IN_BIT;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200714 pgste_set_unlock(ptep, pgste);
715 start += PAGE_SIZE;
716 len -= PAGE_SIZE;
717 }
718 spin_unlock(ptl);
719 }
720 up_read(&gmap->mm->mmap_sem);
721 return rc;
722}
723EXPORT_SYMBOL_GPL(gmap_ipte_notify);
724
725/**
726 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
727 * @mm: pointer to the process mm_struct
728 * @addr: virtual address in the process address space
729 * @pte: pointer to the page table entry
730 *
731 * This function is assumed to be called with the page table lock held
732 * for the pte to notify.
733 */
734void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
735{
736 unsigned long segment_offset;
737 struct gmap_notifier *nb;
738 struct gmap_pgtable *mp;
739 struct gmap_rmap *rmap;
740 struct page *page;
741
742 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
743 segment_offset = segment_offset * (4096 / sizeof(pte_t));
744 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
745 mp = (struct gmap_pgtable *) page->index;
746 spin_lock(&gmap_notifier_lock);
747 list_for_each_entry(rmap, &mp->mapper, list) {
748 list_for_each_entry(nb, &gmap_notifier_list, list)
749 nb->notifier_call(rmap->gmap,
750 rmap->vmaddr + segment_offset);
751 }
752 spin_unlock(&gmap_notifier_lock);
753}
754
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200755static inline int page_table_with_pgste(struct page *page)
756{
757 return atomic_read(&page->_mapcount) == 0;
758}
759
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200760static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
761 unsigned long vmaddr)
762{
763 struct page *page;
764 unsigned long *table;
765 struct gmap_pgtable *mp;
766
767 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
768 if (!page)
769 return NULL;
770 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
771 if (!mp) {
772 __free_page(page);
773 return NULL;
774 }
Kirill A. Shutemove89cfa52013-11-14 14:31:39 -0800775 if (!pgtable_page_ctor(page)) {
776 kfree(mp);
777 __free_page(page);
778 return NULL;
779 }
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200780 mp->vmaddr = vmaddr & PMD_MASK;
781 INIT_LIST_HEAD(&mp->mapper);
782 page->index = (unsigned long) mp;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200783 atomic_set(&page->_mapcount, 0);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200784 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200785 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200786 clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
787 PAGE_SIZE/2);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200788 return table;
789}
790
791static inline void page_table_free_pgste(unsigned long *table)
792{
793 struct page *page;
794 struct gmap_pgtable *mp;
795
796 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
797 mp = (struct gmap_pgtable *) page->index;
798 BUG_ON(!list_empty(&mp->mapper));
Martin Schwidefsky2320c572012-02-17 10:29:21 +0100799 pgtable_page_dtor(page);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200800 atomic_set(&page->_mapcount, -1);
801 kfree(mp);
802 __free_page(page);
803}
804
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200805int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
806 unsigned long key, bool nq)
807{
808 spinlock_t *ptl;
809 pgste_t old, new;
810 pte_t *ptep;
811
812 down_read(&mm->mmap_sem);
813 ptep = get_locked_pte(current->mm, addr, &ptl);
814 if (unlikely(!ptep)) {
815 up_read(&mm->mmap_sem);
816 return -EFAULT;
817 }
818
819 new = old = pgste_get_lock(ptep);
820 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
821 PGSTE_ACC_BITS | PGSTE_FP_BIT);
822 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
823 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
824 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200825 unsigned long address, bits, skey;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200826
827 address = pte_val(*ptep) & PAGE_MASK;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200828 skey = (unsigned long) page_get_storage_key(address);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200829 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200830 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200831 /* Set storage key ACC and FP */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200832 page_set_storage_key(address, skey, !nq);
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200833 /* Merge host changed & referenced into pgste */
834 pgste_val(new) |= bits << 52;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200835 }
836 /* changing the guest storage key is considered a change of the page */
837 if ((pgste_val(new) ^ pgste_val(old)) &
838 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200839 pgste_val(new) |= PGSTE_HC_BIT;
Christian Borntraeger24d5dd02013-05-27 10:42:04 +0200840
841 pgste_set_unlock(ptep, new);
842 pte_unmap_unlock(*ptep, ptl);
843 up_read(&mm->mmap_sem);
844 return 0;
845}
846EXPORT_SYMBOL(set_guest_storage_key);
847
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200848#else /* CONFIG_PGSTE */
849
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200850static inline int page_table_with_pgste(struct page *page)
851{
852 return 0;
853}
854
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200855static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
856 unsigned long vmaddr)
857{
Jan Glauber944291d2011-08-03 16:44:18 +0200858 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200859}
860
861static inline void page_table_free_pgste(unsigned long *table)
862{
863}
864
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200865static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
866 unsigned long *table)
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200867{
868}
869
870#endif /* CONFIG_PGSTE */
871
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200872static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
873{
874 unsigned int old, new;
875
876 do {
877 old = atomic_read(v);
878 new = old ^ bits;
879 } while (atomic_cmpxchg(v, old, new) != old);
880 return new;
881}
882
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200883/*
884 * page table entry allocation/free routines.
885 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200886unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200887{
Heiko Carstens41459d32012-09-14 11:09:52 +0200888 unsigned long *uninitialized_var(table);
889 struct page *uninitialized_var(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200890 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200891
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200892 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200893 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200894 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200895 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200896 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100897 if (!list_empty(&mm->context.pgtable_list)) {
898 page = list_first_entry(&mm->context.pgtable_list,
899 struct page, lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200900 table = (unsigned long *) page_to_phys(page);
901 mask = atomic_read(&page->_mapcount);
902 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200903 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200904 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200905 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100906 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
907 if (!page)
908 return NULL;
Kirill A. Shutemove89cfa52013-11-14 14:31:39 -0800909 if (!pgtable_page_ctor(page)) {
910 __free_page(page);
911 return NULL;
912 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200913 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100914 table = (unsigned long *) page_to_phys(page);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200915 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200916 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100917 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200918 } else {
919 for (bit = 1; mask & bit; bit <<= 1)
920 table += PTRS_PER_PTE;
921 mask = atomic_xor_bits(&page->_mapcount, bit);
922 if ((mask & FRAG_MASK) == FRAG_MASK)
923 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100924 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200925 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200926 return table;
927}
928
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100929void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200930{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100931 struct page *page;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200932 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200933
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200934 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
935 if (page_table_with_pgste(page)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200936 gmap_disconnect_pgtable(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200937 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200938 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200939 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200940 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200941 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200942 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100943 list_del(&page->lru);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200944 mask = atomic_xor_bits(&page->_mapcount, bit);
945 if (mask & FRAG_MASK)
946 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200947 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200948 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100949 pgtable_page_dtor(page);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200950 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100951 __free_page(page);
952 }
953}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200954
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200955static void __page_table_free_rcu(void *table, unsigned bit)
956{
957 struct page *page;
958
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200959 if (bit == FRAG_MASK)
960 return page_table_free_pgste(table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200961 /* Free 1K/2K page table fragment of a 4K page */
962 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
963 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
964 pgtable_page_dtor(page);
965 atomic_set(&page->_mapcount, -1);
966 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200967 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200968}
969
970void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
971{
972 struct mm_struct *mm;
973 struct page *page;
974 unsigned int bit, mask;
975
976 mm = tlb->mm;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +0200977 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
978 if (page_table_with_pgste(page)) {
Martin Schwidefskyab8e5232013-04-16 13:37:46 +0200979 gmap_disconnect_pgtable(mm, table);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200980 table = (unsigned long *) (__pa(table) | FRAG_MASK);
981 tlb_remove_table(tlb, table);
982 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200983 }
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200984 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200985 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200986 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
987 list_del(&page->lru);
988 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
989 if (mask & FRAG_MASK)
990 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200991 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200992 table = (unsigned long *) (__pa(table) | (bit << 4));
993 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200994}
995
Heiko Carstens63df41d62013-09-06 19:10:48 +0200996static void __tlb_remove_table(void *_table)
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200997{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +0100998 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
999 void *table = (void *)((unsigned long) _table & ~mask);
1000 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001001
1002 if (type)
1003 __page_table_free_rcu(table, type);
1004 else
1005 free_pages((unsigned long) table, ALLOC_ORDER);
1006}
1007
Martin Schwidefskycd941542012-04-11 14:28:07 +02001008static void tlb_remove_table_smp_sync(void *arg)
1009{
1010 /* Simply deliver the interrupt */
1011}
1012
1013static void tlb_remove_table_one(void *table)
1014{
1015 /*
1016 * This isn't an RCU grace period and hence the page-tables cannot be
1017 * assumed to be actually RCU-freed.
1018 *
1019 * It is however sufficient for software page-table walkers that rely
1020 * on IRQ disabling. See the comment near struct mmu_table_batch.
1021 */
1022 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1023 __tlb_remove_table(table);
1024}
1025
1026static void tlb_remove_table_rcu(struct rcu_head *head)
1027{
1028 struct mmu_table_batch *batch;
1029 int i;
1030
1031 batch = container_of(head, struct mmu_table_batch, rcu);
1032
1033 for (i = 0; i < batch->nr; i++)
1034 __tlb_remove_table(batch->tables[i]);
1035
1036 free_page((unsigned long)batch);
1037}
1038
1039void tlb_table_flush(struct mmu_gather *tlb)
1040{
1041 struct mmu_table_batch **batch = &tlb->batch;
1042
1043 if (*batch) {
Martin Schwidefskycd941542012-04-11 14:28:07 +02001044 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1045 *batch = NULL;
1046 }
1047}
1048
1049void tlb_remove_table(struct mmu_gather *tlb, void *table)
1050{
1051 struct mmu_table_batch **batch = &tlb->batch;
1052
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001053 tlb->mm->context.flush_mm = 1;
Martin Schwidefskycd941542012-04-11 14:28:07 +02001054 if (*batch == NULL) {
1055 *batch = (struct mmu_table_batch *)
1056 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1057 if (*batch == NULL) {
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001058 __tlb_flush_mm_lazy(tlb->mm);
Martin Schwidefskycd941542012-04-11 14:28:07 +02001059 tlb_remove_table_one(table);
1060 return;
1061 }
1062 (*batch)->nr = 0;
1063 }
1064 (*batch)->tables[(*batch)->nr++] = table;
1065 if ((*batch)->nr == MAX_TABLE_BATCH)
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001066 tlb_flush_mmu(tlb);
Martin Schwidefskycd941542012-04-11 14:28:07 +02001067}
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001068
Gerald Schaefer274023d2012-10-08 16:30:21 -07001069#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001070static inline void thp_split_vma(struct vm_area_struct *vma)
Gerald Schaefer274023d2012-10-08 16:30:21 -07001071{
1072 unsigned long addr;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001073
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001074 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1075 follow_page(vma, addr, FOLL_SPLIT);
Gerald Schaefer274023d2012-10-08 16:30:21 -07001076}
1077
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001078static inline void thp_split_mm(struct mm_struct *mm)
Gerald Schaefer274023d2012-10-08 16:30:21 -07001079{
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001080 struct vm_area_struct *vma;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001081
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001082 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Gerald Schaefer274023d2012-10-08 16:30:21 -07001083 thp_split_vma(vma);
1084 vma->vm_flags &= ~VM_HUGEPAGE;
1085 vma->vm_flags |= VM_NOHUGEPAGE;
Gerald Schaefer274023d2012-10-08 16:30:21 -07001086 }
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001087 mm->def_flags |= VM_NOHUGEPAGE;
1088}
1089#else
1090static inline void thp_split_mm(struct mm_struct *mm)
1091{
Gerald Schaefer274023d2012-10-08 16:30:21 -07001092}
1093#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1094
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001095static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1096 struct mm_struct *mm, pud_t *pud,
1097 unsigned long addr, unsigned long end)
1098{
1099 unsigned long next, *table, *new;
1100 struct page *page;
1101 pmd_t *pmd;
1102
1103 pmd = pmd_offset(pud, addr);
1104 do {
1105 next = pmd_addr_end(addr, end);
1106again:
1107 if (pmd_none_or_clear_bad(pmd))
1108 continue;
1109 table = (unsigned long *) pmd_deref(*pmd);
1110 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1111 if (page_table_with_pgste(page))
1112 continue;
1113 /* Allocate new page table with pgstes */
1114 new = page_table_alloc_pgste(mm, addr);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001115 if (!new)
1116 return -ENOMEM;
1117
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001118 spin_lock(&mm->page_table_lock);
1119 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1120 /* Nuke pmd entry pointing to the "short" page table */
1121 pmdp_flush_lazy(mm, addr, pmd);
1122 pmd_clear(pmd);
1123 /* Copy ptes from old table to new table */
1124 memcpy(new, table, PAGE_SIZE/2);
1125 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1126 /* Establish new table */
1127 pmd_populate(mm, pmd, (pte_t *) new);
1128 /* Free old table with rcu, there might be a walker! */
1129 page_table_free_rcu(tlb, table);
1130 new = NULL;
1131 }
1132 spin_unlock(&mm->page_table_lock);
1133 if (new) {
1134 page_table_free_pgste(new);
1135 goto again;
1136 }
1137 } while (pmd++, addr = next, addr != end);
1138
1139 return addr;
1140}
1141
1142static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1143 struct mm_struct *mm, pgd_t *pgd,
1144 unsigned long addr, unsigned long end)
1145{
1146 unsigned long next;
1147 pud_t *pud;
1148
1149 pud = pud_offset(pgd, addr);
1150 do {
1151 next = pud_addr_end(addr, end);
1152 if (pud_none_or_clear_bad(pud))
1153 continue;
1154 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001155 if (unlikely(IS_ERR_VALUE(next)))
1156 return next;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001157 } while (pud++, addr = next, addr != end);
1158
1159 return addr;
1160}
1161
Dominik Dingelbe39f192013-10-31 10:01:16 +01001162static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1163 unsigned long addr, unsigned long end)
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001164{
1165 unsigned long next;
1166 pgd_t *pgd;
1167
1168 pgd = pgd_offset(mm, addr);
1169 do {
1170 next = pgd_addr_end(addr, end);
1171 if (pgd_none_or_clear_bad(pgd))
1172 continue;
1173 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001174 if (unlikely(IS_ERR_VALUE(next)))
1175 return next;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001176 } while (pgd++, addr = next, addr != end);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001177
1178 return 0;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001179}
1180
Carsten Otte402b0862008-03-25 18:47:10 +01001181/*
1182 * switch on pgstes for its userspace process (for kvm)
1183 */
1184int s390_enable_sie(void)
1185{
1186 struct task_struct *tsk = current;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001187 struct mm_struct *mm = tsk->mm;
1188 struct mmu_gather tlb;
Carsten Otte402b0862008-03-25 18:47:10 +01001189
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001190 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f62011-06-06 14:14:41 +02001191 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +02001192 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +01001193
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001194 down_write(&mm->mmap_sem);
Gerald Schaefer274023d2012-10-08 16:30:21 -07001195 /* split thp mappings and disable thp for future mappings */
1196 thp_split_mm(mm);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001197 /* Reallocate the page tables with pgstes */
Linus Torvaldsae7a8352013-09-04 18:15:06 -07001198 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
Dominik Dingelbe39f192013-10-31 10:01:16 +01001199 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1200 mm->context.has_pgste = 1;
Linus Torvaldsae7a8352013-09-04 18:15:06 -07001201 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001202 up_write(&mm->mmap_sem);
1203 return mm->context.has_pgste ? 0 : -ENOMEM;
Carsten Otte402b0862008-03-25 18:47:10 +01001204}
1205EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +02001206
Gerald Schaefer75077af2012-10-08 16:30:15 -07001207#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001208int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1209 pmd_t *pmdp)
1210{
1211 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1212 /* No need to flush TLB
1213 * On s390 reference bits are in storage key and never in TLB */
1214 return pmdp_test_and_clear_young(vma, address, pmdp);
1215}
1216
1217int pmdp_set_access_flags(struct vm_area_struct *vma,
1218 unsigned long address, pmd_t *pmdp,
1219 pmd_t entry, int dirty)
1220{
1221 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1222
1223 if (pmd_same(*pmdp, entry))
1224 return 0;
1225 pmdp_invalidate(vma, address, pmdp);
1226 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1227 return 1;
1228}
1229
Gerald Schaefer75077af2012-10-08 16:30:15 -07001230static void pmdp_splitting_flush_sync(void *arg)
1231{
1232 /* Simply deliver the interrupt */
1233}
1234
1235void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1236 pmd_t *pmdp)
1237{
1238 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1239 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1240 (unsigned long *) pmdp)) {
1241 /* need to serialize against gup-fast (IRQ disabled) */
1242 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1243 }
1244}
Gerald Schaefer9501d092012-10-08 16:30:18 -07001245
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001246void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1247 pgtable_t pgtable)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001248{
1249 struct list_head *lh = (struct list_head *) pgtable;
1250
1251 assert_spin_locked(&mm->page_table_lock);
1252
1253 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001254 if (!pmd_huge_pte(mm, pmdp))
Gerald Schaefer9501d092012-10-08 16:30:18 -07001255 INIT_LIST_HEAD(lh);
1256 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001257 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1258 pmd_huge_pte(mm, pmdp) = pgtable;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001259}
1260
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001261pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefer9501d092012-10-08 16:30:18 -07001262{
1263 struct list_head *lh;
1264 pgtable_t pgtable;
1265 pte_t *ptep;
1266
1267 assert_spin_locked(&mm->page_table_lock);
1268
1269 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001270 pgtable = pmd_huge_pte(mm, pmdp);
Gerald Schaefer9501d092012-10-08 16:30:18 -07001271 lh = (struct list_head *) pgtable;
1272 if (list_empty(lh))
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001273 pmd_huge_pte(mm, pmdp) = NULL;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001274 else {
Kirill A. Shutemovc389a252013-11-14 14:30:59 -08001275 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001276 list_del(lh);
1277 }
1278 ptep = (pte_t *) pgtable;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001279 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001280 ptep++;
Martin Schwidefskye5098612013-07-23 20:57:57 +02001281 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -07001282 return pgtable;
1283}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001284#endif /* CONFIG_TRANSPARENT_HUGEPAGE */