blob: 7e9a6761f25483c1d5aff83c7f9f14fa9e32845d [file] [log] [blame]
Greg Kroah-Hartmanac41aae2017-11-24 15:00:35 +01001// SPDX-License-Identifier: GPL-2.0
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002/*
3 * KVM guest address space mapping code
4 *
Janosch Franka9e00d82018-07-13 11:28:37 +01005 * Copyright IBM Corp. 2007, 2016, 2018
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01006 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
Janosch Franka9e00d82018-07-13 11:28:37 +01007 * David Hildenbrand <david@redhat.com>
8 * Janosch Frank <frankja@linux.vnet.ibm.com>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01009 */
10
11#include <linux/kernel.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020012#include <linux/pagewalk.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010013#include <linux/swap.h>
14#include <linux/smp.h>
15#include <linux/spinlock.h>
16#include <linux/slab.h>
17#include <linux/swapops.h>
18#include <linux/ksm.h>
19#include <linux/mman.h>
20
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/gmap.h>
24#include <asm/tlb.h>
25
David Hildenbrandfd8d4e32016-04-18 13:24:52 +020026#define GMAP_SHADOW_FAKE_TABLE 1ULL
27
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010028/**
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010029 * gmap_alloc - allocate and initialize a guest address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010030 * @mm: pointer to the parent mm_struct
Christian Borntraeger9c650d02016-04-04 09:41:32 +020031 * @limit: maximum address of the gmap address space
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010032 *
33 * Returns a guest address space structure.
34 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010035static struct gmap *gmap_alloc(unsigned long limit)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010036{
37 struct gmap *gmap;
38 struct page *page;
39 unsigned long *table;
40 unsigned long etype, atype;
41
Heiko Carstensf1c11742017-07-05 07:37:27 +020042 if (limit < _REGION3_SIZE) {
43 limit = _REGION3_SIZE - 1;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010044 atype = _ASCE_TYPE_SEGMENT;
45 etype = _SEGMENT_ENTRY_EMPTY;
Heiko Carstensf1c11742017-07-05 07:37:27 +020046 } else if (limit < _REGION2_SIZE) {
47 limit = _REGION2_SIZE - 1;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010048 atype = _ASCE_TYPE_REGION3;
49 etype = _REGION3_ENTRY_EMPTY;
Heiko Carstensf1c11742017-07-05 07:37:27 +020050 } else if (limit < _REGION1_SIZE) {
51 limit = _REGION1_SIZE - 1;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010052 atype = _ASCE_TYPE_REGION2;
53 etype = _REGION2_ENTRY_EMPTY;
54 } else {
55 limit = -1UL;
56 atype = _ASCE_TYPE_REGION1;
57 etype = _REGION1_ENTRY_EMPTY;
58 }
59 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
60 if (!gmap)
61 goto out;
62 INIT_LIST_HEAD(&gmap->crst_list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010063 INIT_LIST_HEAD(&gmap->children);
64 INIT_LIST_HEAD(&gmap->pt_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010065 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
66 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010067 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010068 spin_lock_init(&gmap->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +010069 spin_lock_init(&gmap->shadow_lock);
Chuhong Yuan40e90652019-08-08 15:18:26 +080070 refcount_set(&gmap->ref_count, 1);
Heiko Carstensf1c11742017-07-05 07:37:27 +020071 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010072 if (!page)
73 goto out_free;
74 page->index = 0;
75 list_add(&page->lru, &gmap->crst_list);
76 table = (unsigned long *) page_to_phys(page);
77 crst_table_init(table, etype);
78 gmap->table = table;
79 gmap->asce = atype | _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS | __pa(table);
81 gmap->asce_end = limit;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010082 return gmap;
83
84out_free:
85 kfree(gmap);
86out:
87 return NULL;
88}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +010089
90/**
91 * gmap_create - create a guest address space
92 * @mm: pointer to the parent mm_struct
93 * @limit: maximum size of the gmap address space
94 *
95 * Returns a guest address space structure.
96 */
97struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
98{
99 struct gmap *gmap;
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200100 unsigned long gmap_asce;
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100101
102 gmap = gmap_alloc(limit);
103 if (!gmap)
104 return NULL;
105 gmap->mm = mm;
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200106 spin_lock(&mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100107 list_add_rcu(&gmap->list, &mm->context.gmap_list);
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200108 if (list_is_singular(&mm->context.gmap_list))
109 gmap_asce = gmap->asce;
110 else
111 gmap_asce = -1UL;
112 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200113 spin_unlock(&mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100114 return gmap;
115}
116EXPORT_SYMBOL_GPL(gmap_create);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100117
118static void gmap_flush_tlb(struct gmap *gmap)
119{
120 if (MACHINE_HAS_IDTE)
David Hildenbrandf0454022016-07-07 10:44:10 +0200121 __tlb_flush_idte(gmap->asce);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100122 else
123 __tlb_flush_global();
124}
125
126static void gmap_radix_tree_free(struct radix_tree_root *root)
127{
128 struct radix_tree_iter iter;
129 unsigned long indices[16];
130 unsigned long index;
Heiko Carstensd12a3d62017-05-09 13:44:43 +0200131 void __rcu **slot;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100132 int i, nr;
133
134 /* A radix tree is freed by deleting all of its entries */
135 index = 0;
136 do {
137 nr = 0;
138 radix_tree_for_each_slot(slot, root, &iter, index) {
139 indices[nr] = iter.index;
140 if (++nr == 16)
141 break;
142 }
143 for (i = 0; i < nr; i++) {
144 index = indices[i];
145 radix_tree_delete(root, index);
146 }
147 } while (nr > 0);
148}
149
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100150static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
151{
152 struct gmap_rmap *rmap, *rnext, *head;
153 struct radix_tree_iter iter;
154 unsigned long indices[16];
155 unsigned long index;
Heiko Carstensd12a3d62017-05-09 13:44:43 +0200156 void __rcu **slot;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100157 int i, nr;
158
159 /* A radix tree is freed by deleting all of its entries */
160 index = 0;
161 do {
162 nr = 0;
163 radix_tree_for_each_slot(slot, root, &iter, index) {
164 indices[nr] = iter.index;
165 if (++nr == 16)
166 break;
167 }
168 for (i = 0; i < nr; i++) {
169 index = indices[i];
170 head = radix_tree_delete(root, index);
171 gmap_for_each_rmap_safe(rmap, rnext, head)
172 kfree(rmap);
173 }
174 } while (nr > 0);
175}
176
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100177/**
178 * gmap_free - free a guest address space
179 * @gmap: pointer to the guest address space structure
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100180 *
181 * No locks required. There are no references to this gmap anymore.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100182 */
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100183static void gmap_free(struct gmap *gmap)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100184{
185 struct page *page, *next;
186
David Hildenbrandeea36782016-04-15 12:45:45 +0200187 /* Flush tlb of all gmaps (if not already done for shadows) */
188 if (!(gmap_is_shadow(gmap) && gmap->removed))
189 gmap_flush_tlb(gmap);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100190 /* Free all segment & region tables. */
191 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
Heiko Carstensf1c11742017-07-05 07:37:27 +0200192 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100193 gmap_radix_tree_free(&gmap->guest_to_host);
194 gmap_radix_tree_free(&gmap->host_to_guest);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100195
196 /* Free additional data for a shadow gmap */
197 if (gmap_is_shadow(gmap)) {
198 /* Free all page tables. */
199 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
200 page_table_free_pgste(page);
201 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
202 /* Release reference to the parent */
203 gmap_put(gmap->parent);
204 }
205
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100206 kfree(gmap);
207}
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100208
209/**
210 * gmap_get - increase reference counter for guest address space
211 * @gmap: pointer to the guest address space structure
212 *
213 * Returns the gmap pointer
214 */
215struct gmap *gmap_get(struct gmap *gmap)
216{
Chuhong Yuan40e90652019-08-08 15:18:26 +0800217 refcount_inc(&gmap->ref_count);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100218 return gmap;
219}
220EXPORT_SYMBOL_GPL(gmap_get);
221
222/**
223 * gmap_put - decrease reference counter for guest address space
224 * @gmap: pointer to the guest address space structure
225 *
226 * If the reference counter reaches zero the guest address space is freed.
227 */
228void gmap_put(struct gmap *gmap)
229{
Chuhong Yuan40e90652019-08-08 15:18:26 +0800230 if (refcount_dec_and_test(&gmap->ref_count))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100231 gmap_free(gmap);
232}
233EXPORT_SYMBOL_GPL(gmap_put);
234
235/**
236 * gmap_remove - remove a guest address space but do not free it yet
237 * @gmap: pointer to the guest address space structure
238 */
239void gmap_remove(struct gmap *gmap)
240{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100241 struct gmap *sg, *next;
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200242 unsigned long gmap_asce;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100243
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100244 /* Remove all shadow gmaps linked to this gmap */
245 if (!list_empty(&gmap->children)) {
246 spin_lock(&gmap->shadow_lock);
247 list_for_each_entry_safe(sg, next, &gmap->children, list) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100248 list_del(&sg->list);
249 gmap_put(sg);
250 }
251 spin_unlock(&gmap->shadow_lock);
252 }
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100253 /* Remove gmap from the pre-mm list */
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200254 spin_lock(&gmap->mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100255 list_del_rcu(&gmap->list);
Martin Schwidefsky44b6cc82016-06-13 10:36:00 +0200256 if (list_empty(&gmap->mm->context.gmap_list))
257 gmap_asce = 0;
258 else if (list_is_singular(&gmap->mm->context.gmap_list))
259 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
260 struct gmap, list)->asce;
261 else
262 gmap_asce = -1UL;
263 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
Martin Schwidefskyf28a4b42017-08-17 18:17:49 +0200264 spin_unlock(&gmap->mm->context.lock);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100265 synchronize_rcu();
266 /* Put reference */
267 gmap_put(gmap);
268}
269EXPORT_SYMBOL_GPL(gmap_remove);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100270
271/**
272 * gmap_enable - switch primary space to the guest address space
273 * @gmap: pointer to the guest address space structure
274 */
275void gmap_enable(struct gmap *gmap)
276{
277 S390_lowcore.gmap = (unsigned long) gmap;
278}
279EXPORT_SYMBOL_GPL(gmap_enable);
280
281/**
282 * gmap_disable - switch back to the standard primary address space
283 * @gmap: pointer to the guest address space structure
284 */
285void gmap_disable(struct gmap *gmap)
286{
287 S390_lowcore.gmap = 0UL;
288}
289EXPORT_SYMBOL_GPL(gmap_disable);
290
David Hildenbrand37d9df92015-03-11 16:47:33 +0100291/**
292 * gmap_get_enabled - get a pointer to the currently enabled gmap
293 *
294 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
295 */
296struct gmap *gmap_get_enabled(void)
297{
298 return (struct gmap *) S390_lowcore.gmap;
299}
300EXPORT_SYMBOL_GPL(gmap_get_enabled);
301
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100302/*
303 * gmap_alloc_table is assumed to be called with mmap_sem held
304 */
305static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
306 unsigned long init, unsigned long gaddr)
307{
308 struct page *page;
309 unsigned long *new;
310
311 /* since we dont free the gmap table until gmap_free we can unlock */
Heiko Carstensf1c11742017-07-05 07:37:27 +0200312 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100313 if (!page)
314 return -ENOMEM;
315 new = (unsigned long *) page_to_phys(page);
316 crst_table_init(new, init);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100317 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100318 if (*table & _REGION_ENTRY_INVALID) {
319 list_add(&page->lru, &gmap->crst_list);
320 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
321 (*table & _REGION_ENTRY_TYPE_MASK);
322 page->index = gaddr;
323 page = NULL;
324 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100325 spin_unlock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100326 if (page)
Heiko Carstensf1c11742017-07-05 07:37:27 +0200327 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100328 return 0;
329}
330
331/**
332 * __gmap_segment_gaddr - find virtual address from segment pointer
333 * @entry: pointer to a segment table entry in the guest address space
334 *
335 * Returns the virtual address in the guest address space for the segment
336 */
337static unsigned long __gmap_segment_gaddr(unsigned long *entry)
338{
339 struct page *page;
340 unsigned long offset, mask;
341
342 offset = (unsigned long) entry / sizeof(unsigned long);
343 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
344 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
345 page = virt_to_page((void *)((unsigned long) entry & mask));
346 return page->index + offset;
347}
348
349/**
350 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
351 * @gmap: pointer to the guest address space structure
352 * @vmaddr: address in the host process address space
353 *
354 * Returns 1 if a TLB flush is required
355 */
356static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
357{
358 unsigned long *entry;
359 int flush = 0;
360
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100361 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100362 spin_lock(&gmap->guest_table_lock);
363 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
364 if (entry) {
Dominik Dingel54397bb2016-04-27 11:43:07 +0200365 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
366 *entry = _SEGMENT_ENTRY_EMPTY;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100367 }
368 spin_unlock(&gmap->guest_table_lock);
369 return flush;
370}
371
372/**
373 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
374 * @gmap: pointer to the guest address space structure
375 * @gaddr: address in the guest address space
376 *
377 * Returns 1 if a TLB flush is required
378 */
379static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
380{
381 unsigned long vmaddr;
382
383 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
384 gaddr >> PMD_SHIFT);
385 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
386}
387
388/**
389 * gmap_unmap_segment - unmap segment from the guest address space
390 * @gmap: pointer to the guest address space structure
391 * @to: address in the guest address space
392 * @len: length of the memory area to unmap
393 *
394 * Returns 0 if the unmap succeeded, -EINVAL if not.
395 */
396int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
397{
398 unsigned long off;
399 int flush;
400
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100401 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100402 if ((to | len) & (PMD_SIZE - 1))
403 return -EINVAL;
404 if (len == 0 || to + len < to)
405 return -EINVAL;
406
407 flush = 0;
408 down_write(&gmap->mm->mmap_sem);
409 for (off = 0; off < len; off += PMD_SIZE)
410 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
411 up_write(&gmap->mm->mmap_sem);
412 if (flush)
413 gmap_flush_tlb(gmap);
414 return 0;
415}
416EXPORT_SYMBOL_GPL(gmap_unmap_segment);
417
418/**
419 * gmap_map_segment - map a segment to the guest address space
420 * @gmap: pointer to the guest address space structure
421 * @from: source address in the parent address space
422 * @to: target address in the guest address space
423 * @len: length of the memory area to map
424 *
425 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
426 */
427int gmap_map_segment(struct gmap *gmap, unsigned long from,
428 unsigned long to, unsigned long len)
429{
430 unsigned long off;
431 int flush;
432
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100433 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100434 if ((from | to | len) & (PMD_SIZE - 1))
435 return -EINVAL;
436 if (len == 0 || from + len < from || to + len < to ||
Martin Schwidefskyee71d162017-04-20 14:43:51 +0200437 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100438 return -EINVAL;
439
440 flush = 0;
441 down_write(&gmap->mm->mmap_sem);
442 for (off = 0; off < len; off += PMD_SIZE) {
443 /* Remove old translation */
444 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
445 /* Store new translation */
446 if (radix_tree_insert(&gmap->guest_to_host,
447 (to + off) >> PMD_SHIFT,
448 (void *) from + off))
449 break;
450 }
451 up_write(&gmap->mm->mmap_sem);
452 if (flush)
453 gmap_flush_tlb(gmap);
454 if (off >= len)
455 return 0;
456 gmap_unmap_segment(gmap, to, len);
457 return -ENOMEM;
458}
459EXPORT_SYMBOL_GPL(gmap_map_segment);
460
461/**
462 * __gmap_translate - translate a guest address to a user space address
463 * @gmap: pointer to guest mapping meta data structure
464 * @gaddr: guest address
465 *
466 * Returns user space address which corresponds to the guest address or
467 * -EFAULT if no such mapping exists.
468 * This function does not establish potentially missing page table entries.
469 * The mmap_sem of the mm that belongs to the address space must be held
470 * when this function gets called.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100471 *
472 * Note: Can also be called for shadow gmaps.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100473 */
474unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
475{
476 unsigned long vmaddr;
477
478 vmaddr = (unsigned long)
479 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100480 /* Note: guest_to_host is empty for a shadow gmap */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100481 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
482}
483EXPORT_SYMBOL_GPL(__gmap_translate);
484
485/**
486 * gmap_translate - translate a guest address to a user space address
487 * @gmap: pointer to guest mapping meta data structure
488 * @gaddr: guest address
489 *
490 * Returns user space address which corresponds to the guest address or
491 * -EFAULT if no such mapping exists.
492 * This function does not establish potentially missing page table entries.
493 */
494unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
495{
496 unsigned long rc;
497
498 down_read(&gmap->mm->mmap_sem);
499 rc = __gmap_translate(gmap, gaddr);
500 up_read(&gmap->mm->mmap_sem);
501 return rc;
502}
503EXPORT_SYMBOL_GPL(gmap_translate);
504
505/**
506 * gmap_unlink - disconnect a page table from the gmap shadow tables
507 * @gmap: pointer to guest mapping meta data structure
508 * @table: pointer to the host page table
509 * @vmaddr: vm address associated with the host page table
510 */
511void gmap_unlink(struct mm_struct *mm, unsigned long *table,
512 unsigned long vmaddr)
513{
514 struct gmap *gmap;
515 int flush;
516
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100517 rcu_read_lock();
518 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100519 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
520 if (flush)
521 gmap_flush_tlb(gmap);
522 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100523 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100524}
525
Janosch Frank0959e162018-07-17 13:21:22 +0100526static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
527 unsigned long gaddr);
528
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100529/**
530 * gmap_link - set up shadow page tables to connect a host to a guest address
531 * @gmap: pointer to guest mapping meta data structure
532 * @gaddr: guest address
533 * @vmaddr: vm address
534 *
535 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
536 * if the vm address is already mapped to a different guest segment.
537 * The mmap_sem of the mm that belongs to the address space must be held
538 * when this function gets called.
539 */
540int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
541{
542 struct mm_struct *mm;
543 unsigned long *table;
544 spinlock_t *ptl;
545 pgd_t *pgd;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200546 p4d_t *p4d;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100547 pud_t *pud;
548 pmd_t *pmd;
Janosch Frank0959e162018-07-17 13:21:22 +0100549 u64 unprot;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100550 int rc;
551
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100552 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100553 /* Create higher level tables in the gmap page table */
554 table = gmap->table;
555 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
Heiko Carstensf1c11742017-07-05 07:37:27 +0200556 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100557 if ((*table & _REGION_ENTRY_INVALID) &&
558 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
Heiko Carstensf1c11742017-07-05 07:37:27 +0200559 gaddr & _REGION1_MASK))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100560 return -ENOMEM;
561 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
562 }
563 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
Heiko Carstensf1c11742017-07-05 07:37:27 +0200564 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100565 if ((*table & _REGION_ENTRY_INVALID) &&
566 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
Heiko Carstensf1c11742017-07-05 07:37:27 +0200567 gaddr & _REGION2_MASK))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100568 return -ENOMEM;
569 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
570 }
571 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
Heiko Carstensf1c11742017-07-05 07:37:27 +0200572 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100573 if ((*table & _REGION_ENTRY_INVALID) &&
574 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
Heiko Carstensf1c11742017-07-05 07:37:27 +0200575 gaddr & _REGION3_MASK))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100576 return -ENOMEM;
577 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
578 }
Heiko Carstensf1c11742017-07-05 07:37:27 +0200579 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100580 /* Walk the parent mm page table */
581 mm = gmap->mm;
582 pgd = pgd_offset(mm, vmaddr);
583 VM_BUG_ON(pgd_none(*pgd));
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200584 p4d = p4d_offset(pgd, vmaddr);
585 VM_BUG_ON(p4d_none(*p4d));
586 pud = pud_offset(p4d, vmaddr);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100587 VM_BUG_ON(pud_none(*pud));
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200588 /* large puds cannot yet be handled */
589 if (pud_large(*pud))
590 return -EFAULT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100591 pmd = pmd_offset(pud, vmaddr);
592 VM_BUG_ON(pmd_none(*pmd));
Janosch Franka9e00d82018-07-13 11:28:37 +0100593 /* Are we allowed to use huge pages? */
594 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100595 return -EFAULT;
596 /* Link gmap segment table entry location to page table. */
597 rc = radix_tree_preload(GFP_KERNEL);
598 if (rc)
599 return rc;
600 ptl = pmd_lock(mm, pmd);
601 spin_lock(&gmap->guest_table_lock);
Dominik Dingel54397bb2016-04-27 11:43:07 +0200602 if (*table == _SEGMENT_ENTRY_EMPTY) {
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100603 rc = radix_tree_insert(&gmap->host_to_guest,
604 vmaddr >> PMD_SHIFT, table);
Janosch Frank58b7e202018-07-13 11:28:20 +0100605 if (!rc) {
606 if (pmd_large(*pmd)) {
Janosch Frank0959e162018-07-17 13:21:22 +0100607 *table = (pmd_val(*pmd) &
608 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
609 | _SEGMENT_ENTRY_GMAP_UC;
Janosch Frank58b7e202018-07-13 11:28:20 +0100610 } else
611 *table = pmd_val(*pmd) &
612 _SEGMENT_ENTRY_HARDWARE_BITS;
613 }
Janosch Frank0959e162018-07-17 13:21:22 +0100614 } else if (*table & _SEGMENT_ENTRY_PROTECT &&
615 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
616 unprot = (u64)*table;
617 unprot &= ~_SEGMENT_ENTRY_PROTECT;
618 unprot |= _SEGMENT_ENTRY_GMAP_UC;
619 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
Janosch Frank58b7e202018-07-13 11:28:20 +0100620 }
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100621 spin_unlock(&gmap->guest_table_lock);
622 spin_unlock(ptl);
623 radix_tree_preload_end();
624 return rc;
625}
626
627/**
628 * gmap_fault - resolve a fault on a guest address
629 * @gmap: pointer to guest mapping meta data structure
630 * @gaddr: guest address
631 * @fault_flags: flags to pass down to handle_mm_fault()
632 *
633 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
634 * if the vm address is already mapped to a different guest segment.
635 */
636int gmap_fault(struct gmap *gmap, unsigned long gaddr,
637 unsigned int fault_flags)
638{
639 unsigned long vmaddr;
640 int rc;
641 bool unlocked;
642
643 down_read(&gmap->mm->mmap_sem);
644
645retry:
646 unlocked = false;
647 vmaddr = __gmap_translate(gmap, gaddr);
648 if (IS_ERR_VALUE(vmaddr)) {
649 rc = vmaddr;
650 goto out_up;
651 }
652 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
653 &unlocked)) {
654 rc = -EFAULT;
655 goto out_up;
656 }
657 /*
658 * In the case that fixup_user_fault unlocked the mmap_sem during
659 * faultin redo __gmap_translate to not race with a map/unmap_segment.
660 */
661 if (unlocked)
662 goto retry;
663
664 rc = __gmap_link(gmap, gaddr, vmaddr);
665out_up:
666 up_read(&gmap->mm->mmap_sem);
667 return rc;
668}
669EXPORT_SYMBOL_GPL(gmap_fault);
670
671/*
672 * this function is assumed to be called with mmap_sem held
673 */
674void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
675{
676 unsigned long vmaddr;
677 spinlock_t *ptl;
678 pte_t *ptep;
679
680 /* Find the vm address for the guest address */
681 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
682 gaddr >> PMD_SHIFT);
683 if (vmaddr) {
684 vmaddr |= gaddr & ~PMD_MASK;
685 /* Get pointer to the page table entry */
686 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
687 if (likely(ptep))
688 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
689 pte_unmap_unlock(ptep, ptl);
690 }
691}
692EXPORT_SYMBOL_GPL(__gmap_zap);
693
694void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
695{
696 unsigned long gaddr, vmaddr, size;
697 struct vm_area_struct *vma;
698
699 down_read(&gmap->mm->mmap_sem);
700 for (gaddr = from; gaddr < to;
701 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
702 /* Find the vm address for the guest address */
703 vmaddr = (unsigned long)
704 radix_tree_lookup(&gmap->guest_to_host,
705 gaddr >> PMD_SHIFT);
706 if (!vmaddr)
707 continue;
708 vmaddr |= gaddr & ~PMD_MASK;
709 /* Find vma in the parent mm */
710 vma = find_vma(gmap->mm, vmaddr);
Janosch Frank1843abd2018-08-16 09:02:31 +0100711 if (!vma)
712 continue;
Dominik Dingel7d735b92018-07-13 11:28:29 +0100713 /*
714 * We do not discard pages that are backed by
715 * hugetlbfs, so we don't have to refault them.
716 */
Janosch Frank1843abd2018-08-16 09:02:31 +0100717 if (is_vm_hugetlb_page(vma))
Dominik Dingel7d735b92018-07-13 11:28:29 +0100718 continue;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100719 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
Kirill A. Shutemovecf13852017-02-22 15:46:37 -0800720 zap_page_range(vma, vmaddr, size);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100721 }
722 up_read(&gmap->mm->mmap_sem);
723}
724EXPORT_SYMBOL_GPL(gmap_discard);
725
726static LIST_HEAD(gmap_notifier_list);
727static DEFINE_SPINLOCK(gmap_notifier_lock);
728
729/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100730 * gmap_register_pte_notifier - register a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100731 * @nb: pointer to the gmap notifier block
732 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100733void gmap_register_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100734{
735 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100736 list_add_rcu(&nb->list, &gmap_notifier_list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100737 spin_unlock(&gmap_notifier_lock);
738}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100739EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100740
741/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100742 * gmap_unregister_pte_notifier - remove a pte invalidation callback
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100743 * @nb: pointer to the gmap notifier block
744 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100745void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100746{
747 spin_lock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100748 list_del_rcu(&nb->list);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100749 spin_unlock(&gmap_notifier_lock);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +0100750 synchronize_rcu();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100751}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100752EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100753
754/**
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100755 * gmap_call_notifier - call all registered invalidation callbacks
756 * @gmap: pointer to guest mapping meta data structure
757 * @start: start virtual address in the guest address space
758 * @end: end virtual address in the guest address space
759 */
760static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
761 unsigned long end)
762{
763 struct gmap_notifier *nb;
764
765 list_for_each_entry(nb, &gmap_notifier_list, list)
766 nb->notifier_call(gmap, start, end);
767}
768
769/**
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100770 * gmap_table_walk - walk the gmap page tables
771 * @gmap: pointer to guest mapping meta data structure
772 * @gaddr: virtual address in the guest address space
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100773 * @level: page table level to stop at
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100774 *
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100775 * Returns a table entry pointer for the given guest address and @level
776 * @level=0 : returns a pointer to a page table table entry (or NULL)
777 * @level=1 : returns a pointer to a segment table entry (or NULL)
778 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
779 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
780 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
781 *
782 * Returns NULL if the gmap page tables could not be walked to the
783 * requested level.
784 *
785 * Note: Can also be called for shadow gmaps.
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100786 */
787static inline unsigned long *gmap_table_walk(struct gmap *gmap,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100788 unsigned long gaddr, int level)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100789{
David Hildenbranda1d032a2020-04-03 17:30:46 +0200790 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100791 unsigned long *table;
792
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100793 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
794 return NULL;
795 if (gmap_is_shadow(gmap) && gmap->removed)
796 return NULL;
David Hildenbranda1d032a2020-04-03 17:30:46 +0200797
798 if (asce_type != _ASCE_TYPE_REGION1 &&
799 gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100800 return NULL;
David Hildenbranda1d032a2020-04-03 17:30:46 +0200801
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100802 table = gmap->table;
803 switch (gmap->asce & _ASCE_TYPE_MASK) {
804 case _ASCE_TYPE_REGION1:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200805 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100806 if (level == 4)
807 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100808 if (*table & _REGION_ENTRY_INVALID)
809 return NULL;
810 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
Joe Perches3b684a42020-03-10 21:51:32 -0700811 fallthrough;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100812 case _ASCE_TYPE_REGION2:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200813 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100814 if (level == 3)
815 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100816 if (*table & _REGION_ENTRY_INVALID)
817 return NULL;
818 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
Joe Perches3b684a42020-03-10 21:51:32 -0700819 fallthrough;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100820 case _ASCE_TYPE_REGION3:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200821 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100822 if (level == 2)
823 break;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100824 if (*table & _REGION_ENTRY_INVALID)
825 return NULL;
826 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
Joe Perches3b684a42020-03-10 21:51:32 -0700827 fallthrough;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100828 case _ASCE_TYPE_SEGMENT:
Heiko Carstensf1c11742017-07-05 07:37:27 +0200829 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100830 if (level == 1)
831 break;
832 if (*table & _REGION_ENTRY_INVALID)
833 return NULL;
834 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
Heiko Carstensf1c11742017-07-05 07:37:27 +0200835 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100836 }
837 return table;
838}
839
840/**
841 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
842 * and return the pte pointer
843 * @gmap: pointer to guest mapping meta data structure
844 * @gaddr: virtual address in the guest address space
845 * @ptl: pointer to the spinlock pointer
846 *
847 * Returns a pointer to the locked pte for a guest address, or NULL
848 */
849static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
850 spinlock_t **ptl)
851{
852 unsigned long *table;
853
David Hildenbrand96965942017-11-10 16:18:05 +0100854 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100855 /* Walk the gmap page table, lock and get pte pointer */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100856 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
David Hildenbrand96965942017-11-10 16:18:05 +0100857 if (!table || *table & _SEGMENT_ENTRY_INVALID)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100858 return NULL;
859 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
860}
861
862/**
863 * gmap_pte_op_fixup - force a page in and connect the gmap page table
864 * @gmap: pointer to guest mapping meta data structure
865 * @gaddr: virtual address in the guest address space
866 * @vmaddr: address in the host process address space
David Hildenbrand01f71912016-06-13 10:49:04 +0200867 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100868 *
869 * Returns 0 if the caller can retry __gmap_translate (might fail again),
870 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
871 * up or connecting the gmap page table.
872 */
873static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
David Hildenbrand01f71912016-06-13 10:49:04 +0200874 unsigned long vmaddr, int prot)
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100875{
876 struct mm_struct *mm = gmap->mm;
David Hildenbrand01f71912016-06-13 10:49:04 +0200877 unsigned int fault_flags;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100878 bool unlocked = false;
879
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100880 BUG_ON(gmap_is_shadow(gmap));
David Hildenbrand01f71912016-06-13 10:49:04 +0200881 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
882 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100883 return -EFAULT;
884 if (unlocked)
885 /* lost mmap_sem, caller has to retry __gmap_translate */
886 return 0;
887 /* Connect the page tables */
888 return __gmap_link(gmap, gaddr, vmaddr);
889}
890
891/**
892 * gmap_pte_op_end - release the page table lock
893 * @ptl: pointer to the spinlock pointer
894 */
895static void gmap_pte_op_end(spinlock_t *ptl)
896{
Janosch Frank5a045bb2018-07-13 11:28:16 +0100897 if (ptl)
898 spin_unlock(ptl);
899}
900
901/**
902 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
903 * and return the pmd pointer
904 * @gmap: pointer to guest mapping meta data structure
905 * @gaddr: virtual address in the guest address space
906 *
907 * Returns a pointer to the pmd for a guest address, or NULL
908 */
909static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
910{
911 pmd_t *pmdp;
912
913 BUG_ON(gmap_is_shadow(gmap));
Janosch Frank5a045bb2018-07-13 11:28:16 +0100914 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
David Hildenbrandaf4bf6c2018-08-06 17:54:07 +0200915 if (!pmdp)
916 return NULL;
Janosch Frank5a045bb2018-07-13 11:28:16 +0100917
David Hildenbrandaf4bf6c2018-08-06 17:54:07 +0200918 /* without huge pages, there is no need to take the table lock */
919 if (!gmap->mm->context.allow_gmap_hpage_1m)
920 return pmd_none(*pmdp) ? NULL : pmdp;
921
922 spin_lock(&gmap->guest_table_lock);
923 if (pmd_none(*pmdp)) {
Janosch Frank5a045bb2018-07-13 11:28:16 +0100924 spin_unlock(&gmap->guest_table_lock);
925 return NULL;
926 }
927
928 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
929 if (!pmd_large(*pmdp))
930 spin_unlock(&gmap->guest_table_lock);
931 return pmdp;
932}
933
934/**
935 * gmap_pmd_op_end - release the guest_table_lock if needed
936 * @gmap: pointer to the guest mapping meta data structure
937 * @pmdp: pointer to the pmd
938 */
939static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
940{
941 if (pmd_large(*pmdp))
942 spin_unlock(&gmap->guest_table_lock);
943}
944
945/*
Janosch Frank7c4b13a2018-07-13 11:28:21 +0100946 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
947 * @pmdp: pointer to the pmd to be protected
948 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
949 * @bits: notification bits to set
950 *
951 * Returns:
952 * 0 if successfully protected
953 * -EAGAIN if a fixup is needed
954 * -EINVAL if unsupported notifier bits have been specified
955 *
956 * Expected to be called with sg->mm->mmap_sem in read and
957 * guest_table_lock held.
958 */
959static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
960 pmd_t *pmdp, int prot, unsigned long bits)
961{
962 int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
963 int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
Janosch Frank0959e162018-07-17 13:21:22 +0100964 pmd_t new = *pmdp;
Janosch Frank7c4b13a2018-07-13 11:28:21 +0100965
966 /* Fixup needed */
967 if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
968 return -EAGAIN;
969
Janosch Frank0959e162018-07-17 13:21:22 +0100970 if (prot == PROT_NONE && !pmd_i) {
971 pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
972 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
973 }
974
975 if (prot == PROT_READ && !pmd_p) {
976 pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
977 pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
978 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
979 }
980
Janosch Frank7c4b13a2018-07-13 11:28:21 +0100981 if (bits & GMAP_NOTIFY_MPROT)
982 pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
983
984 /* Shadow GMAP protection needs split PMDs */
985 if (bits & GMAP_NOTIFY_SHADOW)
986 return -EINVAL;
987
988 return 0;
989}
990
991/*
Janosch Frank5a045bb2018-07-13 11:28:16 +0100992 * gmap_protect_pte - remove access rights to memory and set pgste bits
993 * @gmap: pointer to guest mapping meta data structure
994 * @gaddr: virtual address in the guest address space
995 * @pmdp: pointer to the pmd associated with the pte
996 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Janosch Frank2c46e972018-07-13 11:28:18 +0100997 * @bits: notification bits to set
Janosch Frank5a045bb2018-07-13 11:28:16 +0100998 *
999 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1000 * -EAGAIN if a fixup is needed.
1001 *
1002 * Expected to be called with sg->mm->mmap_sem in read
1003 */
1004static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1005 pmd_t *pmdp, int prot, unsigned long bits)
1006{
1007 int rc;
1008 pte_t *ptep;
1009 spinlock_t *ptl = NULL;
Janosch Frank2c46e972018-07-13 11:28:18 +01001010 unsigned long pbits = 0;
Janosch Frank5a045bb2018-07-13 11:28:16 +01001011
1012 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1013 return -EAGAIN;
1014
1015 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1016 if (!ptep)
1017 return -ENOMEM;
1018
Janosch Frank2c46e972018-07-13 11:28:18 +01001019 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1020 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
Janosch Frank5a045bb2018-07-13 11:28:16 +01001021 /* Protect and unlock. */
Janosch Frank2c46e972018-07-13 11:28:18 +01001022 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
Janosch Frank5a045bb2018-07-13 11:28:16 +01001023 gmap_pte_op_end(ptl);
1024 return rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001025}
1026
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001027/*
1028 * gmap_protect_range - remove access rights to memory and set pgste bits
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001029 * @gmap: pointer to guest mapping meta data structure
1030 * @gaddr: virtual address in the guest address space
1031 * @len: size of area
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001032 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1033 * @bits: pgste notification bits to set
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001034 *
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001035 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1036 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1037 *
1038 * Called with sg->mm->mmap_sem in read.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001039 */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001040static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1041 unsigned long len, int prot, unsigned long bits)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001042{
Janosch Frank7c4b13a2018-07-13 11:28:21 +01001043 unsigned long vmaddr, dist;
Janosch Frank5a045bb2018-07-13 11:28:16 +01001044 pmd_t *pmdp;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001045 int rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001046
David Hildenbrand96965942017-11-10 16:18:05 +01001047 BUG_ON(gmap_is_shadow(gmap));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001048 while (len) {
1049 rc = -EAGAIN;
Janosch Frank5a045bb2018-07-13 11:28:16 +01001050 pmdp = gmap_pmd_op_walk(gmap, gaddr);
1051 if (pmdp) {
Janosch Frank7c4b13a2018-07-13 11:28:21 +01001052 if (!pmd_large(*pmdp)) {
1053 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1054 bits);
1055 if (!rc) {
1056 len -= PAGE_SIZE;
1057 gaddr += PAGE_SIZE;
1058 }
1059 } else {
1060 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1061 bits);
1062 if (!rc) {
1063 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1064 len = len < dist ? 0 : len - dist;
1065 gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1066 }
Janosch Frank5a045bb2018-07-13 11:28:16 +01001067 }
1068 gmap_pmd_op_end(gmap, pmdp);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001069 }
1070 if (rc) {
Janosch Frank7c4b13a2018-07-13 11:28:21 +01001071 if (rc == -EINVAL)
1072 return rc;
1073
1074 /* -EAGAIN, fixup of userspace mm and gmap */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001075 vmaddr = __gmap_translate(gmap, gaddr);
1076 if (IS_ERR_VALUE(vmaddr))
1077 return vmaddr;
David Hildenbrand01f71912016-06-13 10:49:04 +02001078 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001079 if (rc)
1080 return rc;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001081 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001082 }
1083 return 0;
1084}
1085
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001086/**
1087 * gmap_mprotect_notify - change access rights for a range of ptes and
1088 * call the notifier if any pte changes again
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001089 * @gmap: pointer to guest mapping meta data structure
1090 * @gaddr: virtual address in the guest address space
1091 * @len: size of area
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001092 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001093 *
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001094 * Returns 0 if for each page in the given range a gmap mapping exists,
1095 * the new access rights could be set and the notifier could be armed.
1096 * If the gmap mapping is missing for one or more pages -EFAULT is
1097 * returned. If no memory could be allocated -ENOMEM is returned.
1098 * This function establishes missing page table entries.
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001099 */
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001100int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1101 unsigned long len, int prot)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001102{
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001103 int rc;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001104
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001105 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001106 return -EINVAL;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001107 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001108 return -EINVAL;
1109 down_read(&gmap->mm->mmap_sem);
Janosch Frank2c46e972018-07-13 11:28:18 +01001110 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001111 up_read(&gmap->mm->mmap_sem);
1112 return rc;
1113}
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01001114EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01001115
1116/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001117 * gmap_read_table - get an unsigned long value from a guest page table using
1118 * absolute addressing, without marking the page referenced.
1119 * @gmap: pointer to guest mapping meta data structure
1120 * @gaddr: virtual address in the guest address space
1121 * @val: pointer to the unsigned long value to return
1122 *
1123 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
David Hildenbrand96965942017-11-10 16:18:05 +01001124 * if reading using the virtual address failed. -EINVAL if called on a gmap
1125 * shadow.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001126 *
1127 * Called with gmap->mm->mmap_sem in read.
1128 */
1129int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1130{
1131 unsigned long address, vmaddr;
1132 spinlock_t *ptl;
1133 pte_t *ptep, pte;
1134 int rc;
1135
David Hildenbrand96965942017-11-10 16:18:05 +01001136 if (gmap_is_shadow(gmap))
1137 return -EINVAL;
1138
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001139 while (1) {
1140 rc = -EAGAIN;
1141 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1142 if (ptep) {
1143 pte = *ptep;
1144 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1145 address = pte_val(pte) & PAGE_MASK;
1146 address += gaddr & ~PAGE_MASK;
1147 *val = *(unsigned long *) address;
1148 pte_val(*ptep) |= _PAGE_YOUNG;
1149 /* Do *NOT* clear the _PAGE_INVALID bit! */
1150 rc = 0;
1151 }
1152 gmap_pte_op_end(ptl);
1153 }
1154 if (!rc)
1155 break;
1156 vmaddr = __gmap_translate(gmap, gaddr);
1157 if (IS_ERR_VALUE(vmaddr)) {
1158 rc = vmaddr;
1159 break;
1160 }
David Hildenbrand01f71912016-06-13 10:49:04 +02001161 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001162 if (rc)
1163 break;
1164 }
1165 return rc;
1166}
1167EXPORT_SYMBOL_GPL(gmap_read_table);
1168
1169/**
1170 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1171 * @sg: pointer to the shadow guest address space structure
1172 * @vmaddr: vm address associated with the rmap
1173 * @rmap: pointer to the rmap structure
1174 *
1175 * Called with the sg->guest_table_lock
1176 */
1177static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1178 struct gmap_rmap *rmap)
1179{
Heiko Carstensd12a3d62017-05-09 13:44:43 +02001180 void __rcu **slot;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001181
1182 BUG_ON(!gmap_is_shadow(sg));
1183 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1184 if (slot) {
1185 rmap->next = radix_tree_deref_slot_protected(slot,
1186 &sg->guest_table_lock);
Johannes Weiner6d75f362016-12-12 16:43:43 -08001187 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001188 } else {
1189 rmap->next = NULL;
1190 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1191 rmap);
1192 }
1193}
1194
1195/**
David Hildenbrand5c528db2018-01-23 22:26:18 +01001196 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001197 * @sg: pointer to the shadow guest address space structure
1198 * @raddr: rmap address in the shadow gmap
1199 * @paddr: address in the parent guest address space
1200 * @len: length of the memory area to protect
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001201 *
1202 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1203 * if out of memory and -EFAULT if paddr is invalid.
1204 */
1205static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
David Hildenbrand5c528db2018-01-23 22:26:18 +01001206 unsigned long paddr, unsigned long len)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001207{
1208 struct gmap *parent;
1209 struct gmap_rmap *rmap;
1210 unsigned long vmaddr;
1211 spinlock_t *ptl;
1212 pte_t *ptep;
1213 int rc;
1214
1215 BUG_ON(!gmap_is_shadow(sg));
1216 parent = sg->parent;
1217 while (len) {
1218 vmaddr = __gmap_translate(parent, paddr);
1219 if (IS_ERR_VALUE(vmaddr))
1220 return vmaddr;
1221 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1222 if (!rmap)
1223 return -ENOMEM;
1224 rmap->raddr = raddr;
1225 rc = radix_tree_preload(GFP_KERNEL);
1226 if (rc) {
1227 kfree(rmap);
1228 return rc;
1229 }
1230 rc = -EAGAIN;
1231 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1232 if (ptep) {
1233 spin_lock(&sg->guest_table_lock);
David Hildenbrand5c528db2018-01-23 22:26:18 +01001234 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001235 PGSTE_VSIE_BIT);
1236 if (!rc)
1237 gmap_insert_rmap(sg, vmaddr, rmap);
1238 spin_unlock(&sg->guest_table_lock);
1239 gmap_pte_op_end(ptl);
1240 }
1241 radix_tree_preload_end();
1242 if (rc) {
1243 kfree(rmap);
David Hildenbrand5c528db2018-01-23 22:26:18 +01001244 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001245 if (rc)
1246 return rc;
1247 continue;
1248 }
1249 paddr += PAGE_SIZE;
1250 len -= PAGE_SIZE;
1251 }
1252 return 0;
1253}
1254
1255#define _SHADOW_RMAP_MASK 0x7
1256#define _SHADOW_RMAP_REGION1 0x5
1257#define _SHADOW_RMAP_REGION2 0x4
1258#define _SHADOW_RMAP_REGION3 0x3
1259#define _SHADOW_RMAP_SEGMENT 0x2
1260#define _SHADOW_RMAP_PGTABLE 0x1
1261
1262/**
1263 * gmap_idte_one - invalidate a single region or segment table entry
1264 * @asce: region or segment table *origin* + table-type bits
1265 * @vaddr: virtual address to identify the table entry to flush
1266 *
1267 * The invalid bit of a single region or segment table entry is set
1268 * and the associated TLB entries depending on the entry are flushed.
1269 * The table-type of the @asce identifies the portion of the @vaddr
1270 * that is used as the invalidation index.
1271 */
1272static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1273{
1274 asm volatile(
1275 " .insn rrf,0xb98e0000,%0,%1,0,0"
1276 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1277}
1278
1279/**
1280 * gmap_unshadow_page - remove a page from a shadow page table
1281 * @sg: pointer to the shadow guest address space structure
1282 * @raddr: rmap address in the shadow guest address space
1283 *
1284 * Called with the sg->guest_table_lock
1285 */
1286static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1287{
1288 unsigned long *table;
1289
1290 BUG_ON(!gmap_is_shadow(sg));
1291 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1292 if (!table || *table & _PAGE_INVALID)
1293 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001294 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001295 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1296}
1297
1298/**
1299 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1300 * @sg: pointer to the shadow guest address space structure
1301 * @raddr: rmap address in the shadow guest address space
1302 * @pgt: pointer to the start of a shadow page table
1303 *
1304 * Called with the sg->guest_table_lock
1305 */
1306static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1307 unsigned long *pgt)
1308{
1309 int i;
1310
1311 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001312 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001313 pgt[i] = _PAGE_INVALID;
1314}
1315
1316/**
1317 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1318 * @sg: pointer to the shadow guest address space structure
1319 * @raddr: address in the shadow guest address space
1320 *
1321 * Called with the sg->guest_table_lock
1322 */
1323static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1324{
1325 unsigned long sto, *ste, *pgt;
1326 struct page *page;
1327
1328 BUG_ON(!gmap_is_shadow(sg));
1329 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001330 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001331 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001332 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1333 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001334 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1335 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1336 *ste = _SEGMENT_ENTRY_EMPTY;
1337 __gmap_unshadow_pgt(sg, raddr, pgt);
1338 /* Free page table */
1339 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1340 list_del(&page->lru);
1341 page_table_free_pgste(page);
1342}
1343
1344/**
1345 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1346 * @sg: pointer to the shadow guest address space structure
1347 * @raddr: rmap address in the shadow guest address space
1348 * @sgt: pointer to the start of a shadow segment table
1349 *
1350 * Called with the sg->guest_table_lock
1351 */
1352static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1353 unsigned long *sgt)
1354{
Heiko Carstens2be1da82017-11-14 14:50:08 +01001355 unsigned long *pgt;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001356 struct page *page;
1357 int i;
1358
1359 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001360 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001361 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001362 continue;
1363 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1364 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1365 __gmap_unshadow_pgt(sg, raddr, pgt);
1366 /* Free page table */
1367 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1368 list_del(&page->lru);
1369 page_table_free_pgste(page);
1370 }
1371}
1372
1373/**
1374 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1375 * @sg: pointer to the shadow guest address space structure
1376 * @raddr: rmap address in the shadow guest address space
1377 *
1378 * Called with the shadow->guest_table_lock
1379 */
1380static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1381{
1382 unsigned long r3o, *r3e, *sgt;
1383 struct page *page;
1384
1385 BUG_ON(!gmap_is_shadow(sg));
1386 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001387 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001388 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001389 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1390 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001391 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1392 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1393 *r3e = _REGION3_ENTRY_EMPTY;
1394 __gmap_unshadow_sgt(sg, raddr, sgt);
1395 /* Free segment table */
1396 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1397 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001398 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001399}
1400
1401/**
1402 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1403 * @sg: pointer to the shadow guest address space structure
1404 * @raddr: address in the shadow guest address space
1405 * @r3t: pointer to the start of a shadow region-3 table
1406 *
1407 * Called with the sg->guest_table_lock
1408 */
1409static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1410 unsigned long *r3t)
1411{
Heiko Carstens2be1da82017-11-14 14:50:08 +01001412 unsigned long *sgt;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001413 struct page *page;
1414 int i;
1415
1416 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001417 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001418 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001419 continue;
1420 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1421 r3t[i] = _REGION3_ENTRY_EMPTY;
1422 __gmap_unshadow_sgt(sg, raddr, sgt);
1423 /* Free segment table */
1424 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1425 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001426 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001427 }
1428}
1429
1430/**
1431 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1432 * @sg: pointer to the shadow guest address space structure
1433 * @raddr: rmap address in the shadow guest address space
1434 *
1435 * Called with the sg->guest_table_lock
1436 */
1437static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1438{
1439 unsigned long r2o, *r2e, *r3t;
1440 struct page *page;
1441
1442 BUG_ON(!gmap_is_shadow(sg));
1443 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001444 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001445 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001446 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1447 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001448 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1449 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1450 *r2e = _REGION2_ENTRY_EMPTY;
1451 __gmap_unshadow_r3t(sg, raddr, r3t);
1452 /* Free region 3 table */
1453 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1454 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001455 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001456}
1457
1458/**
1459 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1460 * @sg: pointer to the shadow guest address space structure
1461 * @raddr: rmap address in the shadow guest address space
1462 * @r2t: pointer to the start of a shadow region-2 table
1463 *
1464 * Called with the sg->guest_table_lock
1465 */
1466static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1467 unsigned long *r2t)
1468{
Heiko Carstens2be1da82017-11-14 14:50:08 +01001469 unsigned long *r3t;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001470 struct page *page;
1471 int i;
1472
1473 BUG_ON(!gmap_is_shadow(sg));
Heiko Carstensf1c11742017-07-05 07:37:27 +02001474 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001475 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001476 continue;
1477 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1478 r2t[i] = _REGION2_ENTRY_EMPTY;
1479 __gmap_unshadow_r3t(sg, raddr, r3t);
1480 /* Free region 3 table */
1481 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1482 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001483 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001484 }
1485}
1486
1487/**
1488 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1489 * @sg: pointer to the shadow guest address space structure
1490 * @raddr: rmap address in the shadow guest address space
1491 *
1492 * Called with the sg->guest_table_lock
1493 */
1494static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1495{
1496 unsigned long r1o, *r1e, *r2t;
1497 struct page *page;
1498
1499 BUG_ON(!gmap_is_shadow(sg));
1500 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
David Hildenbrand998f6372016-03-08 12:23:38 +01001501 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001502 return;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001503 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1504 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001505 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1506 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1507 *r1e = _REGION1_ENTRY_EMPTY;
1508 __gmap_unshadow_r2t(sg, raddr, r2t);
1509 /* Free region 2 table */
1510 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1511 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001512 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001513}
1514
1515/**
1516 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1517 * @sg: pointer to the shadow guest address space structure
1518 * @raddr: rmap address in the shadow guest address space
1519 * @r1t: pointer to the start of a shadow region-1 table
1520 *
1521 * Called with the shadow->guest_table_lock
1522 */
1523static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1524 unsigned long *r1t)
1525{
1526 unsigned long asce, *r2t;
1527 struct page *page;
1528 int i;
1529
1530 BUG_ON(!gmap_is_shadow(sg));
1531 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001532 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
David Hildenbrand998f6372016-03-08 12:23:38 +01001533 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001534 continue;
1535 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1536 __gmap_unshadow_r2t(sg, raddr, r2t);
1537 /* Clear entry and flush translation r1t -> r2t */
1538 gmap_idte_one(asce, raddr);
1539 r1t[i] = _REGION1_ENTRY_EMPTY;
1540 /* Free region 2 table */
1541 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1542 list_del(&page->lru);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001543 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001544 }
1545}
1546
1547/**
1548 * gmap_unshadow - remove a shadow page table completely
1549 * @sg: pointer to the shadow guest address space structure
1550 *
1551 * Called with sg->guest_table_lock
1552 */
1553static void gmap_unshadow(struct gmap *sg)
1554{
1555 unsigned long *table;
1556
1557 BUG_ON(!gmap_is_shadow(sg));
1558 if (sg->removed)
1559 return;
1560 sg->removed = 1;
1561 gmap_call_notifier(sg, 0, -1UL);
David Hildenbrandeea36782016-04-15 12:45:45 +02001562 gmap_flush_tlb(sg);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001563 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1564 switch (sg->asce & _ASCE_TYPE_MASK) {
1565 case _ASCE_TYPE_REGION1:
1566 __gmap_unshadow_r1t(sg, 0, table);
1567 break;
1568 case _ASCE_TYPE_REGION2:
1569 __gmap_unshadow_r2t(sg, 0, table);
1570 break;
1571 case _ASCE_TYPE_REGION3:
1572 __gmap_unshadow_r3t(sg, 0, table);
1573 break;
1574 case _ASCE_TYPE_SEGMENT:
1575 __gmap_unshadow_sgt(sg, 0, table);
1576 break;
1577 }
1578}
1579
1580/**
1581 * gmap_find_shadow - find a specific asce in the list of shadow tables
1582 * @parent: pointer to the parent gmap
1583 * @asce: ASCE for which the shadow table is created
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001584 * @edat_level: edat level to be used for the shadow translation
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001585 *
1586 * Returns the pointer to a gmap if a shadow table with the given asce is
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001587 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1588 * otherwise NULL
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001589 */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001590static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1591 int edat_level)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001592{
1593 struct gmap *sg;
1594
1595 list_for_each_entry(sg, &parent->children, list) {
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001596 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1597 sg->removed)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001598 continue;
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001599 if (!sg->initialized)
1600 return ERR_PTR(-EAGAIN);
Chuhong Yuan40e90652019-08-08 15:18:26 +08001601 refcount_inc(&sg->ref_count);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001602 return sg;
1603 }
1604 return NULL;
1605}
1606
1607/**
David Hildenbrand5b6c9632016-05-27 18:57:33 +02001608 * gmap_shadow_valid - check if a shadow guest address space matches the
1609 * given properties and is still valid
1610 * @sg: pointer to the shadow guest address space structure
1611 * @asce: ASCE for which the shadow table is requested
1612 * @edat_level: edat level to be used for the shadow translation
1613 *
1614 * Returns 1 if the gmap shadow is still valid and matches the given
1615 * properties, the caller can continue using it. Returns 0 otherwise, the
1616 * caller has to request a new shadow gmap in this case.
1617 *
1618 */
1619int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1620{
1621 if (sg->removed)
1622 return 0;
1623 return sg->orig_asce == asce && sg->edat_level == edat_level;
1624}
1625EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1626
1627/**
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001628 * gmap_shadow - create/find a shadow guest address space
1629 * @parent: pointer to the parent gmap
1630 * @asce: ASCE for which the shadow table is created
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001631 * @edat_level: edat level to be used for the shadow translation
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001632 *
1633 * The pages of the top level page table referred by the asce parameter
1634 * will be set to read-only and marked in the PGSTEs of the kvm process.
1635 * The shadow table will be removed automatically on any change to the
1636 * PTE mapping for the source table.
1637 *
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001638 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1639 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1640 * parent gmap table could not be protected.
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001641 */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001642struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1643 int edat_level)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001644{
1645 struct gmap *sg, *new;
1646 unsigned long limit;
1647 int rc;
1648
Janosch Franka9e00d82018-07-13 11:28:37 +01001649 BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001650 BUG_ON(gmap_is_shadow(parent));
1651 spin_lock(&parent->shadow_lock);
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001652 sg = gmap_find_shadow(parent, asce, edat_level);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001653 spin_unlock(&parent->shadow_lock);
1654 if (sg)
1655 return sg;
1656 /* Create a new shadow gmap */
1657 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
David Hildenbrand3218f702016-04-18 16:22:24 +02001658 if (asce & _ASCE_REAL_SPACE)
1659 limit = -1UL;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001660 new = gmap_alloc(limit);
1661 if (!new)
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001662 return ERR_PTR(-ENOMEM);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001663 new->mm = parent->mm;
1664 new->parent = gmap_get(parent);
1665 new->orig_asce = asce;
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001666 new->edat_level = edat_level;
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001667 new->initialized = false;
1668 spin_lock(&parent->shadow_lock);
1669 /* Recheck if another CPU created the same shadow */
David Hildenbrand5b062bd2016-03-08 12:17:40 +01001670 sg = gmap_find_shadow(parent, asce, edat_level);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001671 if (sg) {
1672 spin_unlock(&parent->shadow_lock);
1673 gmap_free(new);
1674 return sg;
1675 }
David Hildenbrand717c0552016-05-02 12:10:17 +02001676 if (asce & _ASCE_REAL_SPACE) {
1677 /* only allow one real-space gmap shadow */
1678 list_for_each_entry(sg, &parent->children, list) {
1679 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1680 spin_lock(&sg->guest_table_lock);
1681 gmap_unshadow(sg);
1682 spin_unlock(&sg->guest_table_lock);
1683 list_del(&sg->list);
1684 gmap_put(sg);
1685 break;
1686 }
1687 }
1688 }
Chuhong Yuan40e90652019-08-08 15:18:26 +08001689 refcount_set(&new->ref_count, 2);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001690 list_add(&new->list, &parent->children);
David Hildenbrand3218f702016-04-18 16:22:24 +02001691 if (asce & _ASCE_REAL_SPACE) {
1692 /* nothing to protect, return right away */
1693 new->initialized = true;
1694 spin_unlock(&parent->shadow_lock);
1695 return new;
1696 }
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001697 spin_unlock(&parent->shadow_lock);
1698 /* protect after insertion, so it will get properly invalidated */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001699 down_read(&parent->mm->mmap_sem);
1700 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
Heiko Carstensf1c11742017-07-05 07:37:27 +02001701 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
Janosch Frank2c46e972018-07-13 11:28:18 +01001702 PROT_READ, GMAP_NOTIFY_SHADOW);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001703 up_read(&parent->mm->mmap_sem);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001704 spin_lock(&parent->shadow_lock);
1705 new->initialized = true;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001706 if (rc) {
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001707 list_del(&new->list);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001708 gmap_free(new);
David Hildenbrand0f7f8482016-03-08 12:30:46 +01001709 new = ERR_PTR(rc);
1710 }
1711 spin_unlock(&parent->shadow_lock);
1712 return new;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001713}
1714EXPORT_SYMBOL_GPL(gmap_shadow);
1715
1716/**
1717 * gmap_shadow_r2t - create an empty shadow region 2 table
1718 * @sg: pointer to the shadow guest address space structure
1719 * @saddr: faulting address in the shadow gmap
1720 * @r2t: parent gmap address of the region 2 table to get shadowed
David Hildenbrand3218f702016-04-18 16:22:24 +02001721 * @fake: r2t references contiguous guest memory block, not a r2t
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001722 *
1723 * The r2t parameter specifies the address of the source table. The
1724 * four pages of the source table are made read-only in the parent gmap
1725 * address space. A write to the source table area @r2t will automatically
1726 * remove the shadow r2 table and all of its decendents.
1727 *
1728 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1729 * shadow table structure is incomplete, -ENOMEM if out of memory and
1730 * -EFAULT if an address in the parent gmap could not be resolved.
1731 *
1732 * Called with sg->mm->mmap_sem in read.
1733 */
David Hildenbrand3218f702016-04-18 16:22:24 +02001734int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1735 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001736{
1737 unsigned long raddr, origin, offset, len;
1738 unsigned long *s_r2t, *table;
1739 struct page *page;
1740 int rc;
1741
1742 BUG_ON(!gmap_is_shadow(sg));
1743 /* Allocate a shadow region second table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001744 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001745 if (!page)
1746 return -ENOMEM;
1747 page->index = r2t & _REGION_ENTRY_ORIGIN;
David Hildenbrand3218f702016-04-18 16:22:24 +02001748 if (fake)
1749 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001750 s_r2t = (unsigned long *) page_to_phys(page);
1751 /* Install shadow region second table */
1752 spin_lock(&sg->guest_table_lock);
1753 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1754 if (!table) {
1755 rc = -EAGAIN; /* Race with unshadow */
1756 goto out_free;
1757 }
1758 if (!(*table & _REGION_ENTRY_INVALID)) {
1759 rc = 0; /* Already established */
1760 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001761 } else if (*table & _REGION_ENTRY_ORIGIN) {
1762 rc = -EAGAIN; /* Race with shadow */
1763 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001764 }
1765 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001766 /* mark as invalid as long as the parent table is not protected */
1767 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1768 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001769 if (sg->edat_level >= 1)
1770 *table |= (r2t & _REGION_ENTRY_PROTECT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001771 list_add(&page->lru, &sg->crst_list);
David Hildenbrand3218f702016-04-18 16:22:24 +02001772 if (fake) {
1773 /* nothing to protect for fake tables */
1774 *table &= ~_REGION_ENTRY_INVALID;
1775 spin_unlock(&sg->guest_table_lock);
1776 return 0;
1777 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001778 spin_unlock(&sg->guest_table_lock);
1779 /* Make r2t read-only in parent gmap page table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001780 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001781 origin = r2t & _REGION_ENTRY_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001782 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1783 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
David Hildenbrand5c528db2018-01-23 22:26:18 +01001784 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
David Hildenbrand998f6372016-03-08 12:23:38 +01001785 spin_lock(&sg->guest_table_lock);
1786 if (!rc) {
1787 table = gmap_table_walk(sg, saddr, 4);
1788 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1789 (unsigned long) s_r2t)
1790 rc = -EAGAIN; /* Race with unshadow */
1791 else
1792 *table &= ~_REGION_ENTRY_INVALID;
1793 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001794 gmap_unshadow_r2t(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001795 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001796 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001797 return rc;
1798out_free:
1799 spin_unlock(&sg->guest_table_lock);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001800 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001801 return rc;
1802}
1803EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1804
1805/**
1806 * gmap_shadow_r3t - create a shadow region 3 table
1807 * @sg: pointer to the shadow guest address space structure
1808 * @saddr: faulting address in the shadow gmap
1809 * @r3t: parent gmap address of the region 3 table to get shadowed
David Hildenbrand3218f702016-04-18 16:22:24 +02001810 * @fake: r3t references contiguous guest memory block, not a r3t
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001811 *
1812 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1813 * shadow table structure is incomplete, -ENOMEM if out of memory and
1814 * -EFAULT if an address in the parent gmap could not be resolved.
1815 *
1816 * Called with sg->mm->mmap_sem in read.
1817 */
David Hildenbrand3218f702016-04-18 16:22:24 +02001818int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1819 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001820{
1821 unsigned long raddr, origin, offset, len;
1822 unsigned long *s_r3t, *table;
1823 struct page *page;
1824 int rc;
1825
1826 BUG_ON(!gmap_is_shadow(sg));
1827 /* Allocate a shadow region second table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001828 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001829 if (!page)
1830 return -ENOMEM;
1831 page->index = r3t & _REGION_ENTRY_ORIGIN;
David Hildenbrand3218f702016-04-18 16:22:24 +02001832 if (fake)
1833 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001834 s_r3t = (unsigned long *) page_to_phys(page);
1835 /* Install shadow region second table */
1836 spin_lock(&sg->guest_table_lock);
1837 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1838 if (!table) {
1839 rc = -EAGAIN; /* Race with unshadow */
1840 goto out_free;
1841 }
1842 if (!(*table & _REGION_ENTRY_INVALID)) {
1843 rc = 0; /* Already established */
1844 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001845 } else if (*table & _REGION_ENTRY_ORIGIN) {
1846 rc = -EAGAIN; /* Race with shadow */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001847 }
1848 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001849 /* mark as invalid as long as the parent table is not protected */
1850 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1851 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001852 if (sg->edat_level >= 1)
1853 *table |= (r3t & _REGION_ENTRY_PROTECT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001854 list_add(&page->lru, &sg->crst_list);
David Hildenbrand3218f702016-04-18 16:22:24 +02001855 if (fake) {
1856 /* nothing to protect for fake tables */
1857 *table &= ~_REGION_ENTRY_INVALID;
1858 spin_unlock(&sg->guest_table_lock);
1859 return 0;
1860 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001861 spin_unlock(&sg->guest_table_lock);
1862 /* Make r3t read-only in parent gmap page table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001863 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001864 origin = r3t & _REGION_ENTRY_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001865 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1866 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
David Hildenbrand5c528db2018-01-23 22:26:18 +01001867 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
David Hildenbrand998f6372016-03-08 12:23:38 +01001868 spin_lock(&sg->guest_table_lock);
1869 if (!rc) {
1870 table = gmap_table_walk(sg, saddr, 3);
1871 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1872 (unsigned long) s_r3t)
1873 rc = -EAGAIN; /* Race with unshadow */
1874 else
1875 *table &= ~_REGION_ENTRY_INVALID;
1876 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001877 gmap_unshadow_r3t(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001878 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001879 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001880 return rc;
1881out_free:
1882 spin_unlock(&sg->guest_table_lock);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001883 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001884 return rc;
1885}
1886EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1887
1888/**
1889 * gmap_shadow_sgt - create a shadow segment table
1890 * @sg: pointer to the shadow guest address space structure
1891 * @saddr: faulting address in the shadow gmap
1892 * @sgt: parent gmap address of the segment table to get shadowed
David Hildenbrand18b898092016-04-18 13:42:05 +02001893 * @fake: sgt references contiguous guest memory block, not a sgt
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001894 *
1895 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1896 * shadow table structure is incomplete, -ENOMEM if out of memory and
1897 * -EFAULT if an address in the parent gmap could not be resolved.
1898 *
1899 * Called with sg->mm->mmap_sem in read.
1900 */
David Hildenbrand18b898092016-04-18 13:42:05 +02001901int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1902 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001903{
1904 unsigned long raddr, origin, offset, len;
1905 unsigned long *s_sgt, *table;
1906 struct page *page;
1907 int rc;
1908
David Hildenbrand18b898092016-04-18 13:42:05 +02001909 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001910 /* Allocate a shadow segment table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001911 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001912 if (!page)
1913 return -ENOMEM;
1914 page->index = sgt & _REGION_ENTRY_ORIGIN;
David Hildenbrand18b898092016-04-18 13:42:05 +02001915 if (fake)
1916 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001917 s_sgt = (unsigned long *) page_to_phys(page);
1918 /* Install shadow region second table */
1919 spin_lock(&sg->guest_table_lock);
1920 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1921 if (!table) {
1922 rc = -EAGAIN; /* Race with unshadow */
1923 goto out_free;
1924 }
1925 if (!(*table & _REGION_ENTRY_INVALID)) {
1926 rc = 0; /* Already established */
1927 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01001928 } else if (*table & _REGION_ENTRY_ORIGIN) {
1929 rc = -EAGAIN; /* Race with shadow */
1930 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001931 }
1932 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
David Hildenbrand998f6372016-03-08 12:23:38 +01001933 /* mark as invalid as long as the parent table is not protected */
1934 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1935 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001936 if (sg->edat_level >= 1)
1937 *table |= sgt & _REGION_ENTRY_PROTECT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001938 list_add(&page->lru, &sg->crst_list);
David Hildenbrand18b898092016-04-18 13:42:05 +02001939 if (fake) {
1940 /* nothing to protect for fake tables */
1941 *table &= ~_REGION_ENTRY_INVALID;
1942 spin_unlock(&sg->guest_table_lock);
1943 return 0;
1944 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001945 spin_unlock(&sg->guest_table_lock);
1946 /* Make sgt read-only in parent gmap page table */
Heiko Carstensf1c11742017-07-05 07:37:27 +02001947 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001948 origin = sgt & _REGION_ENTRY_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02001949 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1950 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
David Hildenbrand5c528db2018-01-23 22:26:18 +01001951 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
David Hildenbrand998f6372016-03-08 12:23:38 +01001952 spin_lock(&sg->guest_table_lock);
1953 if (!rc) {
1954 table = gmap_table_walk(sg, saddr, 2);
1955 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1956 (unsigned long) s_sgt)
1957 rc = -EAGAIN; /* Race with unshadow */
1958 else
1959 *table &= ~_REGION_ENTRY_INVALID;
1960 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001961 gmap_unshadow_sgt(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001962 }
David Hildenbrand998f6372016-03-08 12:23:38 +01001963 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001964 return rc;
1965out_free:
1966 spin_unlock(&sg->guest_table_lock);
Heiko Carstensf1c11742017-07-05 07:37:27 +02001967 __free_pages(page, CRST_ALLOC_ORDER);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001968 return rc;
1969}
1970EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1971
1972/**
1973 * gmap_shadow_lookup_pgtable - find a shadow page table
1974 * @sg: pointer to the shadow guest address space structure
1975 * @saddr: the address in the shadow aguest address space
1976 * @pgt: parent gmap address of the page table to get shadowed
1977 * @dat_protection: if the pgtable is marked as protected by dat
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001978 * @fake: pgt references contiguous guest memory block, not a pgtable
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001979 *
1980 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1981 * table was not found.
1982 *
1983 * Called with sg->mm->mmap_sem in read.
1984 */
1985int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001986 unsigned long *pgt, int *dat_protection,
1987 int *fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01001988{
1989 unsigned long *table;
1990 struct page *page;
1991 int rc;
1992
1993 BUG_ON(!gmap_is_shadow(sg));
1994 spin_lock(&sg->guest_table_lock);
1995 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1996 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1997 /* Shadow page tables are full pages (pte+pgste) */
1998 page = pfn_to_page(*table >> PAGE_SHIFT);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02001999 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002000 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02002001 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002002 rc = 0;
2003 } else {
2004 rc = -EAGAIN;
2005 }
2006 spin_unlock(&sg->guest_table_lock);
2007 return rc;
2008
2009}
2010EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2011
2012/**
2013 * gmap_shadow_pgt - instantiate a shadow page table
2014 * @sg: pointer to the shadow guest address space structure
2015 * @saddr: faulting address in the shadow gmap
2016 * @pgt: parent gmap address of the page table to get shadowed
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02002017 * @fake: pgt references contiguous guest memory block, not a pgtable
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002018 *
2019 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2020 * shadow table structure is incomplete, -ENOMEM if out of memory,
2021 * -EFAULT if an address in the parent gmap could not be resolved and
2022 *
2023 * Called with gmap->mm->mmap_sem in read
2024 */
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02002025int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2026 int fake)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002027{
2028 unsigned long raddr, origin;
2029 unsigned long *s_pgt, *table;
2030 struct page *page;
2031 int rc;
2032
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02002033 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002034 /* Allocate a shadow page table */
2035 page = page_table_alloc_pgste(sg->mm);
2036 if (!page)
2037 return -ENOMEM;
2038 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02002039 if (fake)
2040 page->index |= GMAP_SHADOW_FAKE_TABLE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002041 s_pgt = (unsigned long *) page_to_phys(page);
2042 /* Install shadow page table */
2043 spin_lock(&sg->guest_table_lock);
2044 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2045 if (!table) {
2046 rc = -EAGAIN; /* Race with unshadow */
2047 goto out_free;
2048 }
2049 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2050 rc = 0; /* Already established */
2051 goto out_free;
David Hildenbrand998f6372016-03-08 12:23:38 +01002052 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2053 rc = -EAGAIN; /* Race with shadow */
2054 goto out_free;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002055 }
David Hildenbrand998f6372016-03-08 12:23:38 +01002056 /* mark as invalid as long as the parent table is not protected */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002057 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
David Hildenbrand998f6372016-03-08 12:23:38 +01002058 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002059 list_add(&page->lru, &sg->pt_list);
David Hildenbrandfd8d4e32016-04-18 13:24:52 +02002060 if (fake) {
2061 /* nothing to protect for fake tables */
2062 *table &= ~_SEGMENT_ENTRY_INVALID;
2063 spin_unlock(&sg->guest_table_lock);
2064 return 0;
2065 }
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002066 spin_unlock(&sg->guest_table_lock);
2067 /* Make pgt read-only in parent gmap page table (not the pgste) */
Heiko Carstensf1c11742017-07-05 07:37:27 +02002068 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002069 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
David Hildenbrand5c528db2018-01-23 22:26:18 +01002070 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
David Hildenbrand998f6372016-03-08 12:23:38 +01002071 spin_lock(&sg->guest_table_lock);
2072 if (!rc) {
2073 table = gmap_table_walk(sg, saddr, 1);
2074 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2075 (unsigned long) s_pgt)
2076 rc = -EAGAIN; /* Race with unshadow */
2077 else
2078 *table &= ~_SEGMENT_ENTRY_INVALID;
2079 } else {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002080 gmap_unshadow_pgt(sg, raddr);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002081 }
David Hildenbrand998f6372016-03-08 12:23:38 +01002082 spin_unlock(&sg->guest_table_lock);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002083 return rc;
2084out_free:
2085 spin_unlock(&sg->guest_table_lock);
2086 page_table_free_pgste(page);
2087 return rc;
2088
2089}
2090EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2091
2092/**
2093 * gmap_shadow_page - create a shadow page mapping
2094 * @sg: pointer to the shadow guest address space structure
2095 * @saddr: faulting address in the shadow gmap
David Hildenbranda9d23e72016-03-08 12:21:41 +01002096 * @pte: pte in parent gmap address space to get shadowed
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002097 *
2098 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2099 * shadow table structure is incomplete, -ENOMEM if out of memory and
2100 * -EFAULT if an address in the parent gmap could not be resolved.
2101 *
2102 * Called with sg->mm->mmap_sem in read.
2103 */
David Hildenbranda9d23e72016-03-08 12:21:41 +01002104int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002105{
2106 struct gmap *parent;
2107 struct gmap_rmap *rmap;
David Hildenbranda9d23e72016-03-08 12:21:41 +01002108 unsigned long vmaddr, paddr;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002109 spinlock_t *ptl;
2110 pte_t *sptep, *tptep;
David Hildenbrand01f71912016-06-13 10:49:04 +02002111 int prot;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002112 int rc;
2113
2114 BUG_ON(!gmap_is_shadow(sg));
2115 parent = sg->parent;
David Hildenbrand01f71912016-06-13 10:49:04 +02002116 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002117
2118 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
2119 if (!rmap)
2120 return -ENOMEM;
2121 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2122
2123 while (1) {
David Hildenbranda9d23e72016-03-08 12:21:41 +01002124 paddr = pte_val(pte) & PAGE_MASK;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002125 vmaddr = __gmap_translate(parent, paddr);
2126 if (IS_ERR_VALUE(vmaddr)) {
2127 rc = vmaddr;
2128 break;
2129 }
2130 rc = radix_tree_preload(GFP_KERNEL);
2131 if (rc)
2132 break;
2133 rc = -EAGAIN;
2134 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2135 if (sptep) {
2136 spin_lock(&sg->guest_table_lock);
2137 /* Get page table pointer */
2138 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2139 if (!tptep) {
2140 spin_unlock(&sg->guest_table_lock);
2141 gmap_pte_op_end(ptl);
2142 radix_tree_preload_end();
2143 break;
2144 }
David Hildenbranda9d23e72016-03-08 12:21:41 +01002145 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002146 if (rc > 0) {
2147 /* Success and a new mapping */
2148 gmap_insert_rmap(sg, vmaddr, rmap);
2149 rmap = NULL;
2150 rc = 0;
2151 }
2152 gmap_pte_op_end(ptl);
2153 spin_unlock(&sg->guest_table_lock);
2154 }
2155 radix_tree_preload_end();
2156 if (!rc)
2157 break;
David Hildenbrand01f71912016-06-13 10:49:04 +02002158 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002159 if (rc)
2160 break;
2161 }
2162 kfree(rmap);
2163 return rc;
2164}
2165EXPORT_SYMBOL_GPL(gmap_shadow_page);
2166
2167/**
2168 * gmap_shadow_notify - handle notifications for shadow gmap
2169 *
2170 * Called with sg->parent->shadow_lock.
2171 */
2172static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
Janosch Frankc0b4bd22017-12-13 13:53:22 +01002173 unsigned long gaddr)
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002174{
2175 struct gmap_rmap *rmap, *rnext, *head;
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002176 unsigned long start, end, bits, raddr;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002177
2178 BUG_ON(!gmap_is_shadow(sg));
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002179
2180 spin_lock(&sg->guest_table_lock);
2181 if (sg->removed) {
2182 spin_unlock(&sg->guest_table_lock);
2183 return;
2184 }
2185 /* Check for top level table */
2186 start = sg->orig_asce & _ASCE_ORIGIN;
Heiko Carstensf1c11742017-07-05 07:37:27 +02002187 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
David Hildenbrand3218f702016-04-18 16:22:24 +02002188 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2189 gaddr < end) {
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002190 /* The complete shadow table has to go */
2191 gmap_unshadow(sg);
2192 spin_unlock(&sg->guest_table_lock);
2193 list_del(&sg->list);
2194 gmap_put(sg);
2195 return;
2196 }
2197 /* Remove the page table tree from on specific entry */
Heiko Carstensf1c11742017-07-05 07:37:27 +02002198 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002199 gmap_for_each_rmap_safe(rmap, rnext, head) {
2200 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2201 raddr = rmap->raddr ^ bits;
2202 switch (bits) {
2203 case _SHADOW_RMAP_REGION1:
2204 gmap_unshadow_r2t(sg, raddr);
2205 break;
2206 case _SHADOW_RMAP_REGION2:
2207 gmap_unshadow_r3t(sg, raddr);
2208 break;
2209 case _SHADOW_RMAP_REGION3:
2210 gmap_unshadow_sgt(sg, raddr);
2211 break;
2212 case _SHADOW_RMAP_SEGMENT:
2213 gmap_unshadow_pgt(sg, raddr);
2214 break;
2215 case _SHADOW_RMAP_PGTABLE:
2216 gmap_unshadow_page(sg, raddr);
2217 break;
2218 }
2219 kfree(rmap);
2220 }
2221 spin_unlock(&sg->guest_table_lock);
2222}
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002223
2224/**
2225 * ptep_notify - call all invalidation callbacks for a specific pte.
2226 * @mm: pointer to the process mm_struct
2227 * @addr: virtual address in the process address space
2228 * @pte: pointer to the page table entry
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002229 * @bits: bits from the pgste that caused the notify call
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002230 *
2231 * This function is assumed to be called with the page table lock held
2232 * for the pte to notify.
2233 */
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002234void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2235 pte_t *pte, unsigned long bits)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002236{
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002237 unsigned long offset, gaddr = 0;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002238 unsigned long *table;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +01002239 struct gmap *gmap, *sg, *next;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002240
2241 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
Heiko Carstensf1c11742017-07-05 07:37:27 +02002242 offset = offset * (PAGE_SIZE / sizeof(pte_t));
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002243 rcu_read_lock();
2244 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2245 spin_lock(&gmap->guest_table_lock);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002246 table = radix_tree_lookup(&gmap->host_to_guest,
2247 vmaddr >> PMD_SHIFT);
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002248 if (table)
2249 gaddr = __gmap_segment_gaddr(table) + offset;
2250 spin_unlock(&gmap->guest_table_lock);
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002251 if (!table)
2252 continue;
2253
2254 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2255 spin_lock(&gmap->shadow_lock);
2256 list_for_each_entry_safe(sg, next,
2257 &gmap->children, list)
Janosch Frankc0b4bd22017-12-13 13:53:22 +01002258 gmap_shadow_notify(sg, vmaddr, gaddr);
Janosch Frank2fa5ed72017-02-08 08:59:56 +01002259 spin_unlock(&gmap->shadow_lock);
2260 }
2261 if (bits & PGSTE_IN_BIT)
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002262 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002263 }
Martin Schwidefsky8ecb1a52016-03-08 11:54:14 +01002264 rcu_read_unlock();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002265}
2266EXPORT_SYMBOL_GPL(ptep_notify);
2267
Janosch Frank6a376272018-07-13 11:28:22 +01002268static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2269 unsigned long gaddr)
2270{
2271 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
2272 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2273}
2274
Janosch Frank0959e162018-07-17 13:21:22 +01002275/**
2276 * gmap_pmdp_xchg - exchange a gmap pmd with another
2277 * @gmap: pointer to the guest address space structure
2278 * @pmdp: pointer to the pmd entry
2279 * @new: replacement entry
2280 * @gaddr: the affected guest address
2281 *
2282 * This function is assumed to be called with the guest_table_lock
2283 * held.
2284 */
2285static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2286 unsigned long gaddr)
2287{
2288 gaddr &= HPAGE_MASK;
2289 pmdp_notify_gmap(gmap, pmdp, gaddr);
2290 pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
2291 if (MACHINE_HAS_TLB_GUEST)
2292 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2293 IDTE_GLOBAL);
2294 else if (MACHINE_HAS_IDTE)
2295 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2296 else
2297 __pmdp_csp(pmdp);
2298 *pmdp = new;
2299}
2300
Janosch Frank6a376272018-07-13 11:28:22 +01002301static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2302 int purge)
2303{
2304 pmd_t *pmdp;
2305 struct gmap *gmap;
2306 unsigned long gaddr;
2307
2308 rcu_read_lock();
2309 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2310 spin_lock(&gmap->guest_table_lock);
2311 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2312 vmaddr >> PMD_SHIFT);
2313 if (pmdp) {
2314 gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2315 pmdp_notify_gmap(gmap, pmdp, gaddr);
Janosch Frank0959e162018-07-17 13:21:22 +01002316 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2317 _SEGMENT_ENTRY_GMAP_UC));
Janosch Frank6a376272018-07-13 11:28:22 +01002318 if (purge)
2319 __pmdp_csp(pmdp);
2320 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
2321 }
2322 spin_unlock(&gmap->guest_table_lock);
2323 }
2324 rcu_read_unlock();
2325}
2326
2327/**
2328 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2329 * flushing
2330 * @mm: pointer to the process mm_struct
2331 * @vmaddr: virtual address in the process address space
2332 */
2333void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2334{
2335 gmap_pmdp_clear(mm, vmaddr, 0);
2336}
2337EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2338
2339/**
2340 * gmap_pmdp_csp - csp all affected guest pmd entries
2341 * @mm: pointer to the process mm_struct
2342 * @vmaddr: virtual address in the process address space
2343 */
2344void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2345{
2346 gmap_pmdp_clear(mm, vmaddr, 1);
2347}
2348EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2349
2350/**
2351 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2352 * @mm: pointer to the process mm_struct
2353 * @vmaddr: virtual address in the process address space
2354 */
2355void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2356{
2357 unsigned long *entry, gaddr;
2358 struct gmap *gmap;
2359 pmd_t *pmdp;
2360
2361 rcu_read_lock();
2362 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2363 spin_lock(&gmap->guest_table_lock);
2364 entry = radix_tree_delete(&gmap->host_to_guest,
2365 vmaddr >> PMD_SHIFT);
2366 if (entry) {
2367 pmdp = (pmd_t *)entry;
2368 gaddr = __gmap_segment_gaddr(entry);
2369 pmdp_notify_gmap(gmap, pmdp, gaddr);
Janosch Frank0959e162018-07-17 13:21:22 +01002370 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2371 _SEGMENT_ENTRY_GMAP_UC));
Janosch Frank6a376272018-07-13 11:28:22 +01002372 if (MACHINE_HAS_TLB_GUEST)
2373 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2374 gmap->asce, IDTE_LOCAL);
2375 else if (MACHINE_HAS_IDTE)
2376 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2377 *entry = _SEGMENT_ENTRY_EMPTY;
2378 }
2379 spin_unlock(&gmap->guest_table_lock);
2380 }
2381 rcu_read_unlock();
2382}
2383EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2384
2385/**
2386 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2387 * @mm: pointer to the process mm_struct
2388 * @vmaddr: virtual address in the process address space
2389 */
2390void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2391{
2392 unsigned long *entry, gaddr;
2393 struct gmap *gmap;
2394 pmd_t *pmdp;
2395
2396 rcu_read_lock();
2397 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2398 spin_lock(&gmap->guest_table_lock);
2399 entry = radix_tree_delete(&gmap->host_to_guest,
2400 vmaddr >> PMD_SHIFT);
2401 if (entry) {
2402 pmdp = (pmd_t *)entry;
2403 gaddr = __gmap_segment_gaddr(entry);
2404 pmdp_notify_gmap(gmap, pmdp, gaddr);
Janosch Frank0959e162018-07-17 13:21:22 +01002405 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2406 _SEGMENT_ENTRY_GMAP_UC));
Janosch Frank6a376272018-07-13 11:28:22 +01002407 if (MACHINE_HAS_TLB_GUEST)
2408 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2409 gmap->asce, IDTE_GLOBAL);
2410 else if (MACHINE_HAS_IDTE)
2411 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2412 else
2413 __pmdp_csp(pmdp);
2414 *entry = _SEGMENT_ENTRY_EMPTY;
2415 }
2416 spin_unlock(&gmap->guest_table_lock);
2417 }
2418 rcu_read_unlock();
2419}
2420EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2421
Janosch Frank0959e162018-07-17 13:21:22 +01002422/**
2423 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2424 * @gmap: pointer to guest address space
2425 * @pmdp: pointer to the pmd to be tested
2426 * @gaddr: virtual address in the guest address space
2427 *
2428 * This function is assumed to be called with the guest_table_lock
2429 * held.
2430 */
Vasily Gorbikffbd2682019-07-17 19:41:09 +02002431static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2432 unsigned long gaddr)
Janosch Frank0959e162018-07-17 13:21:22 +01002433{
2434 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2435 return false;
2436
2437 /* Already protected memory, which did not change is clean */
2438 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2439 !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2440 return false;
2441
2442 /* Clear UC indication and reset protection */
2443 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
2444 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2445 return true;
2446}
2447
2448/**
2449 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2450 * @gmap: pointer to guest address space
2451 * @bitmap: dirty bitmap for this pmd
2452 * @gaddr: virtual address in the guest address space
2453 * @vmaddr: virtual address in the host address space
2454 *
2455 * This function is assumed to be called with the guest_table_lock
2456 * held.
2457 */
2458void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2459 unsigned long gaddr, unsigned long vmaddr)
2460{
2461 int i;
2462 pmd_t *pmdp;
2463 pte_t *ptep;
2464 spinlock_t *ptl;
2465
2466 pmdp = gmap_pmd_op_walk(gmap, gaddr);
2467 if (!pmdp)
2468 return;
2469
2470 if (pmd_large(*pmdp)) {
2471 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2472 bitmap_fill(bitmap, _PAGE_ENTRIES);
2473 } else {
2474 for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2475 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2476 if (!ptep)
2477 continue;
2478 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2479 set_bit(i, bitmap);
2480 spin_unlock(ptl);
2481 }
2482 }
2483 gmap_pmd_op_end(gmap, pmdp);
2484}
2485EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2486
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002487static inline void thp_split_mm(struct mm_struct *mm)
2488{
2489#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2490 struct vm_area_struct *vma;
2491 unsigned long addr;
2492
2493 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2494 for (addr = vma->vm_start;
2495 addr < vma->vm_end;
2496 addr += PAGE_SIZE)
2497 follow_page(vma, addr, FOLL_SPLIT);
2498 vma->vm_flags &= ~VM_HUGEPAGE;
2499 vma->vm_flags |= VM_NOHUGEPAGE;
2500 }
2501 mm->def_flags |= VM_NOHUGEPAGE;
2502#endif
2503}
2504
2505/*
Christian Borntraegerfa41ba02017-08-24 12:55:08 +02002506 * Remove all empty zero pages from the mapping for lazy refaulting
2507 * - This must be called after mm->context.has_pgste is set, to avoid
2508 * future creation of zero pages
2509 * - This must be called after THP was enabled
2510 */
2511static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2512 unsigned long end, struct mm_walk *walk)
2513{
2514 unsigned long addr;
2515
2516 for (addr = start; addr != end; addr += PAGE_SIZE) {
2517 pte_t *ptep;
2518 spinlock_t *ptl;
2519
2520 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2521 if (is_zero_pfn(pte_pfn(*ptep)))
2522 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2523 pte_unmap_unlock(ptep, ptl);
2524 }
2525 return 0;
2526}
2527
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002528static const struct mm_walk_ops zap_zero_walk_ops = {
2529 .pmd_entry = __zap_zero_pages,
2530};
Christian Borntraegerfa41ba02017-08-24 12:55:08 +02002531
2532/*
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002533 * switch on pgstes for its userspace process (for kvm)
2534 */
2535int s390_enable_sie(void)
2536{
2537 struct mm_struct *mm = current->mm;
2538
2539 /* Do we have pgstes? if yes, we are done */
2540 if (mm_has_pgste(mm))
2541 return 0;
2542 /* Fail if the page tables are 2K */
2543 if (!mm_alloc_pgste(mm))
2544 return -EINVAL;
2545 down_write(&mm->mmap_sem);
2546 mm->context.has_pgste = 1;
2547 /* split thp mappings and disable thp for future mappings */
2548 thp_split_mm(mm);
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002549 walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002550 up_write(&mm->mmap_sem);
2551 return 0;
2552}
2553EXPORT_SYMBOL_GPL(s390_enable_sie);
2554
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002555int gmap_mark_unmergeable(void)
2556{
2557 struct mm_struct *mm = current->mm;
2558 struct vm_area_struct *vma;
Christian Borntraeger7a265362020-03-27 08:06:42 +01002559 int ret;
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002560
2561 for (vma = mm->mmap; vma; vma = vma->vm_next) {
Christian Borntraeger7a265362020-03-27 08:06:42 +01002562 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
2563 MADV_UNMERGEABLE, &vma->vm_flags);
2564 if (ret)
2565 return ret;
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002566 }
2567 mm->def_flags &= ~VM_MERGEABLE;
2568 return 0;
2569}
2570EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
2571
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002572/*
2573 * Enable storage key handling from now on and initialize the storage
2574 * keys with the default key.
2575 */
Dominik Dingel964c2c02018-07-13 11:28:25 +01002576static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2577 unsigned long next, struct mm_walk *walk)
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002578{
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002579 /* Clear storage key */
2580 ptep_zap_key(walk->mm, addr, pte);
2581 return 0;
2582}
2583
Dominik Dingel964c2c02018-07-13 11:28:25 +01002584static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2585 unsigned long hmask, unsigned long next,
2586 struct mm_walk *walk)
2587{
2588 pmd_t *pmd = (pmd_t *)pte;
2589 unsigned long start, end;
Janosch Frank3afdfca2018-07-13 11:28:26 +01002590 struct page *page = pmd_page(*pmd);
Dominik Dingel964c2c02018-07-13 11:28:25 +01002591
2592 /*
2593 * The write check makes sure we do not set a key on shared
2594 * memory. This is needed as the walker does not differentiate
2595 * between actual guest memory and the process executable or
2596 * shared libraries.
2597 */
2598 if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2599 !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2600 return 0;
2601
2602 start = pmd_val(*pmd) & HPAGE_MASK;
2603 end = start + HPAGE_SIZE - 1;
2604 __storage_key_init_range(start, end);
Janosch Frank3afdfca2018-07-13 11:28:26 +01002605 set_bit(PG_arch_1, &page->flags);
Dominik Dingel964c2c02018-07-13 11:28:25 +01002606 return 0;
2607}
2608
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002609static const struct mm_walk_ops enable_skey_walk_ops = {
2610 .hugetlb_entry = __s390_enable_skey_hugetlb,
2611 .pte_entry = __s390_enable_skey_pte,
2612};
2613
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002614int s390_enable_skey(void)
2615{
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002616 struct mm_struct *mm = current->mm;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002617 int rc = 0;
2618
2619 down_write(&mm->mmap_sem);
Janosch Frank55531b72018-02-15 16:33:47 +01002620 if (mm_uses_skeys(mm))
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002621 goto out_up;
2622
Janosch Frank55531b72018-02-15 16:33:47 +01002623 mm->context.uses_skeys = 1;
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002624 rc = gmap_mark_unmergeable();
2625 if (rc) {
2626 mm->context.uses_skeys = 0;
2627 goto out_up;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002628 }
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002629 walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002630
2631out_up:
2632 up_write(&mm->mmap_sem);
2633 return rc;
2634}
2635EXPORT_SYMBOL_GPL(s390_enable_skey);
2636
2637/*
2638 * Reset CMMA state, make all pages stable again.
2639 */
2640static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2641 unsigned long next, struct mm_walk *walk)
2642{
2643 ptep_zap_unused(walk->mm, addr, pte, 1);
2644 return 0;
2645}
2646
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002647static const struct mm_walk_ops reset_cmma_walk_ops = {
2648 .pte_entry = __s390_reset_cmma,
2649};
2650
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002651void s390_reset_cmma(struct mm_struct *mm)
2652{
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002653 down_write(&mm->mmap_sem);
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002654 walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +01002655 up_write(&mm->mmap_sem);
2656}
2657EXPORT_SYMBOL_GPL(s390_reset_cmma);
Christian Borntraeger12748002019-12-16 10:48:11 -05002658
2659/*
2660 * make inaccessible pages accessible again
2661 */
2662static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
2663 unsigned long next, struct mm_walk *walk)
2664{
2665 pte_t pte = READ_ONCE(*ptep);
2666
2667 if (pte_present(pte))
2668 WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK));
2669 return 0;
2670}
2671
2672static const struct mm_walk_ops reset_acc_walk_ops = {
2673 .pte_entry = __s390_reset_acc,
2674};
2675
2676#include <linux/sched/mm.h>
2677void s390_reset_acc(struct mm_struct *mm)
2678{
2679 /*
2680 * we might be called during
2681 * reset: we walk the pages and clear
2682 * close of all kvm file descriptors: we walk the pages and clear
2683 * exit of process on fd closure: vma already gone, do nothing
2684 */
2685 if (!mmget_not_zero(mm))
2686 return;
2687 down_read(&mm->mmap_sem);
2688 walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
2689 up_read(&mm->mmap_sem);
2690 mmput(mm);
2691}
2692EXPORT_SYMBOL_GPL(s390_reset_acc);