blob: 6850bd04bcb94fcbceec2f549f39568f253cfeca [file] [log] [blame]
Bharata B Raoca9f4942019-11-25 08:36:26 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
5 *
6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
7 */
8
9/*
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
14 *
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
17 *
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
Bharata B Rao60f0a642019-11-25 08:36:27 +053022 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
Bharata B Raoca9f4942019-11-25 08:36:26 +053026 */
27
28/*
29 * Notes on locking
30 *
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
38 *
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
46 *
47 * Locking order
48 *
49 * 1. kvm->srcu - Protects KVM memslots
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -070050 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
Bharata B Raoca9f4942019-11-25 08:36:26 +053051 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
53 */
54
55/*
56 * Notes on page size
57 *
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
63 *
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
68 *
Bharata B Rao60f0a642019-11-25 08:36:27 +053069 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71 *
Bharata B Rao008e3592019-11-25 08:36:28 +053072 * HV invalidating a page: When a regular page belonging to secure
73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74 * page size. Using 64K page size is correct here because any non-secure
75 * page will essentially be of 64K page size. Splitting by UV during sharing
76 * and page-out ensures this.
77 *
78 * Page fault handling: When HV handles page fault of a page belonging
79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80 * Using 64K size is correct here too as UV would have split the 2MB page
81 * into 64k mappings and would have done page-outs earlier.
82 *
Bharata B Raoca9f4942019-11-25 08:36:26 +053083 * In summary, the current secure pages handling code in HV assumes
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
87 */
88
89#include <linux/pagemap.h>
90#include <linux/migrate.h>
91#include <linux/kvm_host.h>
92#include <linux/ksm.h>
93#include <asm/ultravisor.h>
94#include <asm/mman.h>
95#include <asm/kvm_ppc.h>
96
97static struct dev_pagemap kvmppc_uvmem_pgmap;
98static unsigned long *kvmppc_uvmem_bitmap;
99static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
100
101#define KVMPPC_UVMEM_PFN (1UL << 63)
102
103struct kvmppc_uvmem_slot {
104 struct list_head list;
105 unsigned long nr_pfns;
106 unsigned long base_pfn;
107 unsigned long *pfns;
108};
109
110struct kvmppc_uvmem_page_pvt {
111 struct kvm *kvm;
112 unsigned long gpa;
Bharata B Rao60f0a642019-11-25 08:36:27 +0530113 bool skip_page_out;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530114};
115
Paul Mackerras9a5788c2020-03-19 15:29:55 +1100116bool kvmppc_uvmem_available(void)
117{
118 /*
119 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
120 * and our data structures have been initialized successfully.
121 */
122 return !!kvmppc_uvmem_bitmap;
123}
124
Bharata B Raoca9f4942019-11-25 08:36:26 +0530125int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
126{
127 struct kvmppc_uvmem_slot *p;
128
129 p = kzalloc(sizeof(*p), GFP_KERNEL);
130 if (!p)
131 return -ENOMEM;
132 p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
133 if (!p->pfns) {
134 kfree(p);
135 return -ENOMEM;
136 }
137 p->nr_pfns = slot->npages;
138 p->base_pfn = slot->base_gfn;
139
140 mutex_lock(&kvm->arch.uvmem_lock);
141 list_add(&p->list, &kvm->arch.uvmem_pfns);
142 mutex_unlock(&kvm->arch.uvmem_lock);
143
144 return 0;
145}
146
147/*
148 * All device PFNs are already released by the time we come here.
149 */
150void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
151{
152 struct kvmppc_uvmem_slot *p, *next;
153
154 mutex_lock(&kvm->arch.uvmem_lock);
155 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
156 if (p->base_pfn == slot->base_gfn) {
157 vfree(p->pfns);
158 list_del(&p->list);
159 kfree(p);
160 break;
161 }
162 }
163 mutex_unlock(&kvm->arch.uvmem_lock);
164}
165
166static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn,
167 struct kvm *kvm)
168{
169 struct kvmppc_uvmem_slot *p;
170
171 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
172 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
173 unsigned long index = gfn - p->base_pfn;
174
175 p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN;
176 return;
177 }
178 }
179}
180
181static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
182{
183 struct kvmppc_uvmem_slot *p;
184
185 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
186 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
187 p->pfns[gfn - p->base_pfn] = 0;
188 return;
189 }
190 }
191}
192
193static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
194 unsigned long *uvmem_pfn)
195{
196 struct kvmppc_uvmem_slot *p;
197
198 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
199 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
200 unsigned long index = gfn - p->base_pfn;
201
202 if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
203 if (uvmem_pfn)
204 *uvmem_pfn = p->pfns[index] &
205 ~KVMPPC_UVMEM_PFN;
206 return true;
207 } else
208 return false;
209 }
210 }
211 return false;
212}
213
214unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
215{
216 struct kvm_memslots *slots;
217 struct kvm_memory_slot *memslot;
218 int ret = H_SUCCESS;
219 int srcu_idx;
220
Laurent Dufour377f02d2020-03-20 11:26:43 +0100221 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
222
Bharata B Raoca9f4942019-11-25 08:36:26 +0530223 if (!kvmppc_uvmem_bitmap)
224 return H_UNSUPPORTED;
225
226 /* Only radix guests can be secure guests */
227 if (!kvm_is_radix(kvm))
228 return H_UNSUPPORTED;
229
Paul Mackerras9a5788c2020-03-19 15:29:55 +1100230 /* NAK the transition to secure if not enabled */
231 if (!kvm->arch.svm_enabled)
232 return H_AUTHORITY;
233
Bharata B Raoca9f4942019-11-25 08:36:26 +0530234 srcu_idx = srcu_read_lock(&kvm->srcu);
235 slots = kvm_memslots(kvm);
236 kvm_for_each_memslot(memslot, slots) {
237 if (kvmppc_uvmem_slot_init(kvm, memslot)) {
238 ret = H_PARAMETER;
239 goto out;
240 }
241 ret = uv_register_mem_slot(kvm->arch.lpid,
242 memslot->base_gfn << PAGE_SHIFT,
243 memslot->npages * PAGE_SIZE,
244 0, memslot->id);
245 if (ret < 0) {
246 kvmppc_uvmem_slot_free(kvm, memslot);
247 ret = H_PARAMETER;
248 goto out;
249 }
250 }
Bharata B Raoca9f4942019-11-25 08:36:26 +0530251out:
252 srcu_read_unlock(&kvm->srcu, srcu_idx);
253 return ret;
254}
255
256unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
257{
258 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
259 return H_UNSUPPORTED;
260
261 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
262 pr_info("LPID %d went secure\n", kvm->arch.lpid);
263 return H_SUCCESS;
264}
265
266/*
Bharata B Raoc3262252019-11-25 08:36:29 +0530267 * Drop device pages that we maintain for the secure guest
268 *
269 * We first mark the pages to be skipped from UV_PAGE_OUT when there
270 * is HV side fault on these pages. Next we *get* these pages, forcing
271 * fault on them, do fault time migration to replace the device PTEs in
272 * QEMU page table with normal PTEs from newly allocated pages.
273 */
274void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
Sukadev Bhattiproluce477a72019-12-19 13:51:45 -0800275 struct kvm *kvm, bool skip_page_out)
Bharata B Raoc3262252019-11-25 08:36:29 +0530276{
277 int i;
278 struct kvmppc_uvmem_page_pvt *pvt;
279 unsigned long pfn, uvmem_pfn;
280 unsigned long gfn = free->base_gfn;
281
282 for (i = free->npages; i; --i, ++gfn) {
283 struct page *uvmem_page;
284
285 mutex_lock(&kvm->arch.uvmem_lock);
286 if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
287 mutex_unlock(&kvm->arch.uvmem_lock);
288 continue;
289 }
290
291 uvmem_page = pfn_to_page(uvmem_pfn);
292 pvt = uvmem_page->zone_device_data;
Sukadev Bhattiproluce477a72019-12-19 13:51:45 -0800293 pvt->skip_page_out = skip_page_out;
Bharata B Raoc3262252019-11-25 08:36:29 +0530294 mutex_unlock(&kvm->arch.uvmem_lock);
295
296 pfn = gfn_to_pfn(kvm, gfn);
297 if (is_error_noslot_pfn(pfn))
298 continue;
299 kvm_release_pfn_clean(pfn);
300 }
301}
302
Sukadev Bhattiprolu3a439702020-01-06 18:02:37 -0800303unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
304{
305 int srcu_idx;
306 struct kvm_memory_slot *memslot;
307
308 /*
309 * Expect to be called only after INIT_START and before INIT_DONE.
310 * If INIT_DONE was completed, use normal VM termination sequence.
311 */
312 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
313 return H_UNSUPPORTED;
314
315 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
316 return H_STATE;
317
318 srcu_idx = srcu_read_lock(&kvm->srcu);
319
320 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
321 kvmppc_uvmem_drop_pages(memslot, kvm, false);
322
323 srcu_read_unlock(&kvm->srcu, srcu_idx);
324
325 kvm->arch.secure_guest = 0;
326 uv_svm_terminate(kvm->arch.lpid);
327
328 return H_PARAMETER;
329}
330
Bharata B Raoc3262252019-11-25 08:36:29 +0530331/*
Bharata B Raoca9f4942019-11-25 08:36:26 +0530332 * Get a free device PFN from the pool
333 *
334 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
335 * PFN will be used to keep track of the secure page on HV side.
336 *
337 * Called with kvm->arch.uvmem_lock held
338 */
339static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
340{
341 struct page *dpage = NULL;
342 unsigned long bit, uvmem_pfn;
343 struct kvmppc_uvmem_page_pvt *pvt;
344 unsigned long pfn_last, pfn_first;
345
346 pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
347 pfn_last = pfn_first +
348 (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
349
350 spin_lock(&kvmppc_uvmem_bitmap_lock);
351 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
352 pfn_last - pfn_first);
353 if (bit >= (pfn_last - pfn_first))
354 goto out;
355 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
356 spin_unlock(&kvmppc_uvmem_bitmap_lock);
357
358 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
359 if (!pvt)
360 goto out_clear;
361
362 uvmem_pfn = bit + pfn_first;
363 kvmppc_uvmem_pfn_insert(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
364
365 pvt->gpa = gpa;
366 pvt->kvm = kvm;
367
368 dpage = pfn_to_page(uvmem_pfn);
369 dpage->zone_device_data = pvt;
370 get_page(dpage);
371 lock_page(dpage);
372 return dpage;
373out_clear:
374 spin_lock(&kvmppc_uvmem_bitmap_lock);
375 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
376out:
377 spin_unlock(&kvmppc_uvmem_bitmap_lock);
378 return NULL;
379}
380
381/*
382 * Alloc a PFN from private device memory pool and copy page from normal
383 * memory to secure memory using UV_PAGE_IN uvcall.
384 */
385static int
386kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
387 unsigned long end, unsigned long gpa, struct kvm *kvm,
388 unsigned long page_shift, bool *downgrade)
389{
390 unsigned long src_pfn, dst_pfn = 0;
391 struct migrate_vma mig;
392 struct page *spage;
393 unsigned long pfn;
394 struct page *dpage;
395 int ret = 0;
396
397 memset(&mig, 0, sizeof(mig));
398 mig.vma = vma;
399 mig.start = start;
400 mig.end = end;
401 mig.src = &src_pfn;
402 mig.dst = &dst_pfn;
Ralph Campbell51431922020-07-23 15:30:00 -0700403 mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530404
405 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700406 * We come here with mmap_lock write lock held just for
407 * ksm_madvise(), otherwise we only need read mmap_lock.
Bharata B Raoca9f4942019-11-25 08:36:26 +0530408 * Hence downgrade to read lock once ksm_madvise() is done.
409 */
410 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
411 MADV_UNMERGEABLE, &vma->vm_flags);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700412 mmap_write_downgrade(kvm->mm);
Bharata B Raoca9f4942019-11-25 08:36:26 +0530413 *downgrade = true;
414 if (ret)
415 return ret;
416
417 ret = migrate_vma_setup(&mig);
418 if (ret)
419 return ret;
420
421 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
422 ret = -1;
423 goto out_finalize;
424 }
425
426 dpage = kvmppc_uvmem_get_page(gpa, kvm);
427 if (!dpage) {
428 ret = -1;
429 goto out_finalize;
430 }
431
432 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
433 spage = migrate_pfn_to_page(*mig.src);
434 if (spage)
435 uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
436 page_shift);
437
438 *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
439 migrate_vma_pages(&mig);
440out_finalize:
441 migrate_vma_finalize(&mig);
442 return ret;
443}
444
445/*
Bharata B Rao60f0a642019-11-25 08:36:27 +0530446 * Shares the page with HV, thus making it a normal page.
447 *
448 * - If the page is already secure, then provision a new page and share
449 * - If the page is a normal page, share the existing page
450 *
451 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
452 * to unmap the device page from QEMU's page tables.
453 */
454static unsigned long
455kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
456{
457
458 int ret = H_PARAMETER;
459 struct page *uvmem_page;
460 struct kvmppc_uvmem_page_pvt *pvt;
461 unsigned long pfn;
462 unsigned long gfn = gpa >> page_shift;
463 int srcu_idx;
464 unsigned long uvmem_pfn;
465
466 srcu_idx = srcu_read_lock(&kvm->srcu);
467 mutex_lock(&kvm->arch.uvmem_lock);
468 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
469 uvmem_page = pfn_to_page(uvmem_pfn);
470 pvt = uvmem_page->zone_device_data;
471 pvt->skip_page_out = true;
472 }
473
474retry:
475 mutex_unlock(&kvm->arch.uvmem_lock);
476 pfn = gfn_to_pfn(kvm, gfn);
477 if (is_error_noslot_pfn(pfn))
478 goto out;
479
480 mutex_lock(&kvm->arch.uvmem_lock);
481 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
482 uvmem_page = pfn_to_page(uvmem_pfn);
483 pvt = uvmem_page->zone_device_data;
484 pvt->skip_page_out = true;
485 kvm_release_pfn_clean(pfn);
486 goto retry;
487 }
488
489 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
490 ret = H_SUCCESS;
491 kvm_release_pfn_clean(pfn);
492 mutex_unlock(&kvm->arch.uvmem_lock);
493out:
494 srcu_read_unlock(&kvm->srcu, srcu_idx);
495 return ret;
496}
497
498/*
Bharata B Raoca9f4942019-11-25 08:36:26 +0530499 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
Bharata B Rao60f0a642019-11-25 08:36:27 +0530500 *
501 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
502 * memory in is visible from both UV and HV.
Bharata B Raoca9f4942019-11-25 08:36:26 +0530503 */
504unsigned long
505kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
506 unsigned long flags, unsigned long page_shift)
507{
508 bool downgrade = false;
509 unsigned long start, end;
510 struct vm_area_struct *vma;
511 int srcu_idx;
512 unsigned long gfn = gpa >> page_shift;
513 int ret;
514
515 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
516 return H_UNSUPPORTED;
517
518 if (page_shift != PAGE_SHIFT)
519 return H_P3;
520
Bharata B Rao60f0a642019-11-25 08:36:27 +0530521 if (flags & ~H_PAGE_IN_SHARED)
Bharata B Raoca9f4942019-11-25 08:36:26 +0530522 return H_P2;
523
Bharata B Rao60f0a642019-11-25 08:36:27 +0530524 if (flags & H_PAGE_IN_SHARED)
525 return kvmppc_share_page(kvm, gpa, page_shift);
526
Bharata B Raoca9f4942019-11-25 08:36:26 +0530527 ret = H_PARAMETER;
528 srcu_idx = srcu_read_lock(&kvm->srcu);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700529 mmap_write_lock(kvm->mm);
Bharata B Raoca9f4942019-11-25 08:36:26 +0530530
531 start = gfn_to_hva(kvm, gfn);
532 if (kvm_is_error_hva(start))
533 goto out;
534
535 mutex_lock(&kvm->arch.uvmem_lock);
536 /* Fail the page-in request of an already paged-in page */
537 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
538 goto out_unlock;
539
540 end = start + (1UL << page_shift);
541 vma = find_vma_intersection(kvm->mm, start, end);
542 if (!vma || vma->vm_start > start || vma->vm_end < end)
543 goto out_unlock;
544
545 if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
546 &downgrade))
547 ret = H_SUCCESS;
548out_unlock:
549 mutex_unlock(&kvm->arch.uvmem_lock);
550out:
551 if (downgrade)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700552 mmap_read_unlock(kvm->mm);
Bharata B Raoca9f4942019-11-25 08:36:26 +0530553 else
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700554 mmap_write_unlock(kvm->mm);
Bharata B Raoca9f4942019-11-25 08:36:26 +0530555 srcu_read_unlock(&kvm->srcu, srcu_idx);
556 return ret;
557}
558
559/*
560 * Provision a new page on HV side and copy over the contents
561 * from secure memory using UV_PAGE_OUT uvcall.
562 */
563static int
564kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
565 unsigned long end, unsigned long page_shift,
566 struct kvm *kvm, unsigned long gpa)
567{
568 unsigned long src_pfn, dst_pfn = 0;
569 struct migrate_vma mig;
570 struct page *dpage, *spage;
Bharata B Rao60f0a642019-11-25 08:36:27 +0530571 struct kvmppc_uvmem_page_pvt *pvt;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530572 unsigned long pfn;
573 int ret = U_SUCCESS;
574
575 memset(&mig, 0, sizeof(mig));
576 mig.vma = vma;
577 mig.start = start;
578 mig.end = end;
579 mig.src = &src_pfn;
580 mig.dst = &dst_pfn;
Ralph Campbell51431922020-07-23 15:30:00 -0700581 mig.pgmap_owner = &kvmppc_uvmem_pgmap;
582 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530583
584 mutex_lock(&kvm->arch.uvmem_lock);
585 /* The requested page is already paged-out, nothing to do */
586 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
587 goto out;
588
589 ret = migrate_vma_setup(&mig);
590 if (ret)
Bharata B Raoe032e3b2020-01-22 10:25:42 +0530591 goto out;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530592
593 spage = migrate_pfn_to_page(*mig.src);
594 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
595 goto out_finalize;
596
597 if (!is_zone_device_page(spage))
598 goto out_finalize;
599
600 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
601 if (!dpage) {
602 ret = -1;
603 goto out_finalize;
604 }
605
606 lock_page(dpage);
Bharata B Rao60f0a642019-11-25 08:36:27 +0530607 pvt = spage->zone_device_data;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530608 pfn = page_to_pfn(dpage);
609
Bharata B Rao60f0a642019-11-25 08:36:27 +0530610 /*
611 * This function is used in two cases:
612 * - When HV touches a secure page, for which we do UV_PAGE_OUT
613 * - When a secure page is converted to shared page, we *get*
614 * the page to essentially unmap the device page. In this
615 * case we skip page-out.
616 */
617 if (!pvt->skip_page_out)
618 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
619 gpa, 0, page_shift);
620
Bharata B Raoca9f4942019-11-25 08:36:26 +0530621 if (ret == U_SUCCESS)
622 *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
623 else {
624 unlock_page(dpage);
625 __free_page(dpage);
626 goto out_finalize;
627 }
628
629 migrate_vma_pages(&mig);
630out_finalize:
631 migrate_vma_finalize(&mig);
632out:
633 mutex_unlock(&kvm->arch.uvmem_lock);
634 return ret;
635}
636
637/*
638 * Fault handler callback that gets called when HV touches any page that
639 * has been moved to secure memory, we ask UV to give back the page by
640 * issuing UV_PAGE_OUT uvcall.
641 *
642 * This eventually results in dropping of device PFN and the newly
643 * provisioned page/PFN gets populated in QEMU page tables.
644 */
645static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
646{
647 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
648
649 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
650 vmf->address + PAGE_SIZE, PAGE_SHIFT,
651 pvt->kvm, pvt->gpa))
652 return VM_FAULT_SIGBUS;
653 else
654 return 0;
655}
656
657/*
658 * Release the device PFN back to the pool
659 *
660 * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT.
661 * Gets called with kvm->arch.uvmem_lock held.
662 */
663static void kvmppc_uvmem_page_free(struct page *page)
664{
665 unsigned long pfn = page_to_pfn(page) -
666 (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
667 struct kvmppc_uvmem_page_pvt *pvt;
668
669 spin_lock(&kvmppc_uvmem_bitmap_lock);
670 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
671 spin_unlock(&kvmppc_uvmem_bitmap_lock);
672
673 pvt = page->zone_device_data;
674 page->zone_device_data = NULL;
675 kvmppc_uvmem_pfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
676 kfree(pvt);
677}
678
679static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
680 .page_free = kvmppc_uvmem_page_free,
681 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
682};
683
684/*
685 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
686 */
687unsigned long
688kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
689 unsigned long flags, unsigned long page_shift)
690{
691 unsigned long gfn = gpa >> page_shift;
692 unsigned long start, end;
693 struct vm_area_struct *vma;
694 int srcu_idx;
695 int ret;
696
697 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
698 return H_UNSUPPORTED;
699
700 if (page_shift != PAGE_SHIFT)
701 return H_P3;
702
703 if (flags)
704 return H_P2;
705
706 ret = H_PARAMETER;
707 srcu_idx = srcu_read_lock(&kvm->srcu);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700708 mmap_read_lock(kvm->mm);
Bharata B Raoca9f4942019-11-25 08:36:26 +0530709 start = gfn_to_hva(kvm, gfn);
710 if (kvm_is_error_hva(start))
711 goto out;
712
713 end = start + (1UL << page_shift);
714 vma = find_vma_intersection(kvm->mm, start, end);
715 if (!vma || vma->vm_start > start || vma->vm_end < end)
716 goto out;
717
718 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
719 ret = H_SUCCESS;
720out:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700721 mmap_read_unlock(kvm->mm);
Bharata B Raoca9f4942019-11-25 08:36:26 +0530722 srcu_read_unlock(&kvm->srcu, srcu_idx);
723 return ret;
724}
725
Bharata B Rao008e3592019-11-25 08:36:28 +0530726int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
727{
728 unsigned long pfn;
729 int ret = U_SUCCESS;
730
731 pfn = gfn_to_pfn(kvm, gfn);
732 if (is_error_noslot_pfn(pfn))
733 return -EFAULT;
734
735 mutex_lock(&kvm->arch.uvmem_lock);
736 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
737 goto out;
738
739 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
740 0, PAGE_SHIFT);
741out:
742 kvm_release_pfn_clean(pfn);
743 mutex_unlock(&kvm->arch.uvmem_lock);
744 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
745}
746
Bharata B Raoca9f4942019-11-25 08:36:26 +0530747static u64 kvmppc_get_secmem_size(void)
748{
749 struct device_node *np;
750 int i, len;
751 const __be32 *prop;
752 u64 size = 0;
753
Laurent Dufour512721d2020-04-16 18:27:15 +0200754 /*
755 * First try the new ibm,secure-memory nodes which supersede the
756 * secure-memory-ranges property.
757 * If we found some, no need to read the deprecated ones.
758 */
759 for_each_compatible_node(np, NULL, "ibm,secure-memory") {
760 prop = of_get_property(np, "reg", &len);
761 if (!prop)
762 continue;
763 size += of_read_number(prop + 2, 2);
764 }
765 if (size)
766 return size;
767
Bharata B Raoca9f4942019-11-25 08:36:26 +0530768 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
769 if (!np)
770 goto out;
771
772 prop = of_get_property(np, "secure-memory-ranges", &len);
773 if (!prop)
774 goto out_put;
775
776 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
777 size += of_read_number(prop + (i * 4) + 2, 2);
778
779out_put:
780 of_node_put(np);
781out:
782 return size;
783}
784
785int kvmppc_uvmem_init(void)
786{
787 int ret = 0;
788 unsigned long size;
789 struct resource *res;
790 void *addr;
791 unsigned long pfn_last, pfn_first;
792
793 size = kvmppc_get_secmem_size();
794 if (!size) {
795 /*
796 * Don't fail the initialization of kvm-hv module if
797 * the platform doesn't export ibm,uv-firmware node.
798 * Let normal guests run on such PEF-disabled platform.
799 */
800 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
801 goto out;
802 }
803
804 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
805 if (IS_ERR(res)) {
806 ret = PTR_ERR(res);
807 goto out;
808 }
809
810 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
811 kvmppc_uvmem_pgmap.res = *res;
812 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
Christoph Hellwigf894ddd2020-03-16 20:32:13 +0100813 /* just one global instance: */
814 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
Bharata B Raoca9f4942019-11-25 08:36:26 +0530815 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
816 if (IS_ERR(addr)) {
817 ret = PTR_ERR(addr);
818 goto out_free_region;
819 }
820
821 pfn_first = res->start >> PAGE_SHIFT;
822 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
823 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
824 sizeof(unsigned long), GFP_KERNEL);
825 if (!kvmppc_uvmem_bitmap) {
826 ret = -ENOMEM;
827 goto out_unmap;
828 }
829
830 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
831 return ret;
832out_unmap:
833 memunmap_pages(&kvmppc_uvmem_pgmap);
834out_free_region:
835 release_mem_region(res->start, size);
836out:
837 return ret;
838}
839
840void kvmppc_uvmem_free(void)
841{
Fabiano Rosas9bee4842020-03-19 19:55:10 -0300842 if (!kvmppc_uvmem_bitmap)
843 return;
844
Bharata B Raoca9f4942019-11-25 08:36:26 +0530845 memunmap_pages(&kvmppc_uvmem_pgmap);
846 release_mem_region(kvmppc_uvmem_pgmap.res.start,
847 resource_size(&kvmppc_uvmem_pgmap.res));
848 kfree(kvmppc_uvmem_bitmap);
849}