Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (C) 2020 Google LLC |
| 4 | * Author: Quentin Perret <qperret@google.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/kvm_host.h> |
| 8 | #include <asm/kvm_hyp.h> |
| 9 | #include <asm/kvm_mmu.h> |
| 10 | #include <asm/kvm_pgtable.h> |
Will Deacon | 9429f4b | 2021-12-02 17:10:48 +0000 | [diff] [blame] | 11 | #include <asm/kvm_pkvm.h> |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 12 | |
| 13 | #include <nvhe/early_alloc.h> |
Marc Zyngier | 3061725 | 2021-10-13 13:03:43 +0100 | [diff] [blame] | 14 | #include <nvhe/fixed_config.h> |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 15 | #include <nvhe/gfp.h> |
| 16 | #include <nvhe/memory.h> |
Quentin Perret | 1025c8c | 2021-03-19 10:01:43 +0000 | [diff] [blame] | 17 | #include <nvhe/mem_protect.h> |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 18 | #include <nvhe/mm.h> |
| 19 | #include <nvhe/trap_handler.h> |
| 20 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 21 | unsigned long hyp_nr_cpus; |
| 22 | |
| 23 | #define hyp_percpu_size ((unsigned long)__per_cpu_end - \ |
| 24 | (unsigned long)__per_cpu_start) |
| 25 | |
| 26 | static void *vmemmap_base; |
| 27 | static void *hyp_pgt_base; |
Quentin Perret | 7c350ea | 2021-06-08 11:45:15 +0000 | [diff] [blame] | 28 | static void *host_s2_pgt_base; |
Quentin Perret | eaa9b88 | 2021-05-14 08:56:39 +0000 | [diff] [blame] | 29 | static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; |
Quentin Perret | 53a563b | 2021-12-08 15:22:56 +0000 | [diff] [blame] | 30 | static struct hyp_pool hpool; |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 31 | |
| 32 | static int divide_memory_pool(void *virt, unsigned long size) |
| 33 | { |
| 34 | unsigned long vstart, vend, nr_pages; |
| 35 | |
| 36 | hyp_early_alloc_init(virt, size); |
| 37 | |
| 38 | hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend); |
| 39 | nr_pages = (vend - vstart) >> PAGE_SHIFT; |
| 40 | vmemmap_base = hyp_early_alloc_contig(nr_pages); |
| 41 | if (!vmemmap_base) |
| 42 | return -ENOMEM; |
| 43 | |
| 44 | nr_pages = hyp_s1_pgtable_pages(); |
| 45 | hyp_pgt_base = hyp_early_alloc_contig(nr_pages); |
| 46 | if (!hyp_pgt_base) |
| 47 | return -ENOMEM; |
| 48 | |
Quentin Perret | 7c350ea | 2021-06-08 11:45:15 +0000 | [diff] [blame] | 49 | nr_pages = host_s2_pgtable_pages(); |
| 50 | host_s2_pgt_base = hyp_early_alloc_contig(nr_pages); |
| 51 | if (!host_s2_pgt_base) |
Quentin Perret | 04e5de0 | 2021-03-19 10:01:34 +0000 | [diff] [blame] | 52 | return -ENOMEM; |
| 53 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 54 | return 0; |
| 55 | } |
| 56 | |
| 57 | static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, |
| 58 | unsigned long *per_cpu_base, |
| 59 | u32 hyp_va_bits) |
| 60 | { |
| 61 | void *start, *end, *virt = hyp_phys_to_virt(phys); |
| 62 | unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT; |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 63 | enum kvm_pgtable_prot prot; |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 64 | int ret, i; |
| 65 | |
| 66 | /* Recreate the hyp page-table using the early page allocator */ |
| 67 | hyp_early_alloc_init(hyp_pgt_base, pgt_size); |
| 68 | ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits, |
| 69 | &hyp_early_alloc_mm_ops); |
| 70 | if (ret) |
| 71 | return ret; |
| 72 | |
| 73 | ret = hyp_create_idmap(hyp_va_bits); |
| 74 | if (ret) |
| 75 | return ret; |
| 76 | |
| 77 | ret = hyp_map_vectors(); |
| 78 | if (ret) |
| 79 | return ret; |
| 80 | |
| 81 | ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base)); |
| 82 | if (ret) |
| 83 | return ret; |
| 84 | |
| 85 | ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC); |
| 86 | if (ret) |
| 87 | return ret; |
| 88 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 89 | ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO); |
| 90 | if (ret) |
| 91 | return ret; |
| 92 | |
| 93 | ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP); |
| 94 | if (ret) |
| 95 | return ret; |
| 96 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 97 | ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP); |
| 98 | if (ret) |
| 99 | return ret; |
| 100 | |
| 101 | for (i = 0; i < hyp_nr_cpus; i++) { |
| 102 | start = (void *)kern_hyp_va(per_cpu_base[i]); |
| 103 | end = start + PAGE_ALIGN(hyp_percpu_size); |
| 104 | ret = pkvm_create_mappings(start, end, PAGE_HYP); |
| 105 | if (ret) |
| 106 | return ret; |
| 107 | |
| 108 | end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va; |
| 109 | start = end - PAGE_SIZE; |
| 110 | ret = pkvm_create_mappings(start, end, PAGE_HYP); |
| 111 | if (ret) |
| 112 | return ret; |
| 113 | } |
| 114 | |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 115 | /* |
| 116 | * Map the host's .bss and .rodata sections RO in the hypervisor, but |
| 117 | * transfer the ownership from the host to the hypervisor itself to |
| 118 | * make sure it can't be donated or shared with another entity. |
| 119 | * |
| 120 | * The ownership transition requires matching changes in the host |
| 121 | * stage-2. This will be done later (see finalize_host_mappings()) once |
| 122 | * the hyp_vmemmap is addressable. |
| 123 | */ |
| 124 | prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED); |
| 125 | ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot); |
| 126 | if (ret) |
| 127 | return ret; |
| 128 | |
| 129 | ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot); |
| 130 | if (ret) |
| 131 | return ret; |
| 132 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static void update_nvhe_init_params(void) |
| 137 | { |
| 138 | struct kvm_nvhe_init_params *params; |
| 139 | unsigned long i; |
| 140 | |
| 141 | for (i = 0; i < hyp_nr_cpus; i++) { |
| 142 | params = per_cpu_ptr(&kvm_init_params, i); |
| 143 | params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); |
Fuad Tabba | fade9c2 | 2021-05-24 09:30:01 +0100 | [diff] [blame] | 144 | dcache_clean_inval_poc((unsigned long)params, |
Fuad Tabba | 814b186 | 2021-05-24 09:29:55 +0100 | [diff] [blame] | 145 | (unsigned long)params + sizeof(*params)); |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 146 | } |
| 147 | } |
| 148 | |
| 149 | static void *hyp_zalloc_hyp_page(void *arg) |
| 150 | { |
| 151 | return hyp_alloc_pages(&hpool, 0); |
| 152 | } |
| 153 | |
Quentin Perret | d978b9c | 2021-06-08 11:45:16 +0000 | [diff] [blame] | 154 | static void hpool_get_page(void *addr) |
| 155 | { |
| 156 | hyp_get_page(&hpool, addr); |
| 157 | } |
| 158 | |
| 159 | static void hpool_put_page(void *addr) |
| 160 | { |
| 161 | hyp_put_page(&hpool, addr); |
| 162 | } |
| 163 | |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 164 | static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, |
| 165 | kvm_pte_t *ptep, |
| 166 | enum kvm_pgtable_walk_flags flag, |
| 167 | void * const arg) |
| 168 | { |
Quentin Perret | d6b4bd3 | 2021-12-15 16:12:20 +0000 | [diff] [blame] | 169 | struct kvm_pgtable_mm_ops *mm_ops = arg; |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 170 | enum kvm_pgtable_prot prot; |
| 171 | enum pkvm_page_state state; |
| 172 | kvm_pte_t pte = *ptep; |
| 173 | phys_addr_t phys; |
| 174 | |
| 175 | if (!kvm_pte_valid(pte)) |
| 176 | return 0; |
| 177 | |
Quentin Perret | d6b4bd3 | 2021-12-15 16:12:20 +0000 | [diff] [blame] | 178 | /* |
| 179 | * Fix-up the refcount for the page-table pages as the early allocator |
| 180 | * was unable to access the hyp_vmemmap and so the buddy allocator has |
| 181 | * initialised the refcount to '1'. |
| 182 | */ |
| 183 | mm_ops->get_page(ptep); |
| 184 | if (flag != KVM_PGTABLE_WALK_LEAF) |
| 185 | return 0; |
| 186 | |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 187 | if (level != (KVM_PGTABLE_MAX_LEVELS - 1)) |
| 188 | return -EINVAL; |
| 189 | |
| 190 | phys = kvm_pte_to_phys(pte); |
| 191 | if (!addr_is_memory(phys)) |
Quentin Perret | 50a8d33 | 2021-11-08 15:46:32 +0000 | [diff] [blame] | 192 | return -EINVAL; |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 193 | |
| 194 | /* |
| 195 | * Adjust the host stage-2 mappings to match the ownership attributes |
| 196 | * configured in the hypervisor stage-1. |
| 197 | */ |
| 198 | state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte)); |
| 199 | switch (state) { |
| 200 | case PKVM_PAGE_OWNED: |
| 201 | return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id); |
| 202 | case PKVM_PAGE_SHARED_OWNED: |
| 203 | prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED); |
| 204 | break; |
| 205 | case PKVM_PAGE_SHARED_BORROWED: |
| 206 | prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED); |
| 207 | break; |
| 208 | default: |
| 209 | return -EINVAL; |
| 210 | } |
| 211 | |
| 212 | return host_stage2_idmap_locked(phys, PAGE_SIZE, prot); |
| 213 | } |
| 214 | |
| 215 | static int finalize_host_mappings(void) |
| 216 | { |
| 217 | struct kvm_pgtable_walker walker = { |
| 218 | .cb = finalize_host_mappings_walker, |
Quentin Perret | d6b4bd3 | 2021-12-15 16:12:20 +0000 | [diff] [blame] | 219 | .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, |
| 220 | .arg = pkvm_pgtable.mm_ops, |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 221 | }; |
Quentin Perret | 50a8d33 | 2021-11-08 15:46:32 +0000 | [diff] [blame] | 222 | int i, ret; |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 223 | |
Quentin Perret | 50a8d33 | 2021-11-08 15:46:32 +0000 | [diff] [blame] | 224 | for (i = 0; i < hyp_memblock_nr; i++) { |
| 225 | struct memblock_region *reg = &hyp_memory[i]; |
| 226 | u64 start = (u64)hyp_phys_to_virt(reg->base); |
| 227 | |
| 228 | ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker); |
| 229 | if (ret) |
| 230 | return ret; |
| 231 | } |
| 232 | |
| 233 | return 0; |
Quentin Perret | 2c50166 | 2021-08-09 16:24:44 +0100 | [diff] [blame] | 234 | } |
| 235 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 236 | void __noreturn __pkvm_init_finalise(void) |
| 237 | { |
| 238 | struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); |
| 239 | struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt; |
| 240 | unsigned long nr_pages, reserved_pages, pfn; |
| 241 | int ret; |
| 242 | |
| 243 | /* Now that the vmemmap is backed, install the full-fledged allocator */ |
| 244 | pfn = hyp_virt_to_pfn(hyp_pgt_base); |
| 245 | nr_pages = hyp_s1_pgtable_pages(); |
| 246 | reserved_pages = hyp_early_alloc_nr_used_pages(); |
| 247 | ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages); |
| 248 | if (ret) |
| 249 | goto out; |
| 250 | |
Quentin Perret | 7c350ea | 2021-06-08 11:45:15 +0000 | [diff] [blame] | 251 | ret = kvm_host_prepare_stage2(host_s2_pgt_base); |
Quentin Perret | 1025c8c | 2021-03-19 10:01:43 +0000 | [diff] [blame] | 252 | if (ret) |
| 253 | goto out; |
| 254 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 255 | pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) { |
| 256 | .zalloc_page = hyp_zalloc_hyp_page, |
| 257 | .phys_to_virt = hyp_phys_to_virt, |
| 258 | .virt_to_phys = hyp_virt_to_phys, |
Quentin Perret | d978b9c | 2021-06-08 11:45:16 +0000 | [diff] [blame] | 259 | .get_page = hpool_get_page, |
| 260 | .put_page = hpool_put_page, |
Will Deacon | 34ec7cb | 2021-12-15 16:12:21 +0000 | [diff] [blame] | 261 | .page_count = hyp_page_count, |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 262 | }; |
| 263 | pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; |
| 264 | |
Quentin Perret | d6b4bd3 | 2021-12-15 16:12:20 +0000 | [diff] [blame] | 265 | ret = finalize_host_mappings(); |
| 266 | if (ret) |
| 267 | goto out; |
| 268 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 269 | out: |
| 270 | /* |
| 271 | * We tail-called to here from handle___pkvm_init() and will not return, |
| 272 | * so make sure to propagate the return value to the host. |
| 273 | */ |
| 274 | cpu_reg(host_ctxt, 1) = ret; |
| 275 | |
| 276 | __host_enter(host_ctxt); |
| 277 | } |
| 278 | |
| 279 | int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, |
| 280 | unsigned long *per_cpu_base, u32 hyp_va_bits) |
| 281 | { |
| 282 | struct kvm_nvhe_init_params *params; |
| 283 | void *virt = hyp_phys_to_virt(phys); |
| 284 | void (*fn)(phys_addr_t params_pa, void *finalize_fn_va); |
| 285 | int ret; |
| 286 | |
Fuad Tabba | 6c30bfb | 2021-10-10 15:56:32 +0100 | [diff] [blame] | 287 | BUG_ON(kvm_check_pvm_sysreg_table()); |
| 288 | |
Quentin Perret | f320bc7 | 2021-03-19 10:01:25 +0000 | [diff] [blame] | 289 | if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) |
| 290 | return -EINVAL; |
| 291 | |
| 292 | hyp_spin_lock_init(&pkvm_pgd_lock); |
| 293 | hyp_nr_cpus = nr_cpus; |
| 294 | |
| 295 | ret = divide_memory_pool(virt, size); |
| 296 | if (ret) |
| 297 | return ret; |
| 298 | |
| 299 | ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits); |
| 300 | if (ret) |
| 301 | return ret; |
| 302 | |
| 303 | update_nvhe_init_params(); |
| 304 | |
| 305 | /* Jump in the idmap page to switch to the new page-tables */ |
| 306 | params = this_cpu_ptr(&kvm_init_params); |
| 307 | fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd); |
| 308 | fn(__hyp_pa(params), __pkvm_init_finalise); |
| 309 | |
| 310 | unreachable(); |
| 311 | } |