blob: ea95bbc6ba802014abcbbc706e5a04ea579e0721 [file] [log] [blame]
Will Deaconb1e57de2020-09-11 14:25:10 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 */
9
10#include <linux/bitfield.h>
11#include <asm/kvm_pgtable.h>
12
Will Deaconb1e57de2020-09-11 14:25:10 +010013#define KVM_PTE_VALID BIT(0)
14
15#define KVM_PTE_TYPE BIT(1)
16#define KVM_PTE_TYPE_BLOCK 0
17#define KVM_PTE_TYPE_PAGE 1
18#define KVM_PTE_TYPE_TABLE 1
19
20#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
21#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
22
23#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
24
Will Deaconbb0e92c2020-09-11 14:25:11 +010025#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
26#define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
27#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3
28#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1
29#define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
30#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
31#define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
32
Will Deacon6d9d2112020-09-11 14:25:14 +010033#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
34#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
35#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
36#define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
37#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
38#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
39
Will Deaconb1e57de2020-09-11 14:25:10 +010040#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
41
Will Deaconbb0e92c2020-09-11 14:25:11 +010042#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
43
Will Deacon6d9d2112020-09-11 14:25:14 +010044#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
45
Yanan Wang694d0712021-01-14 20:13:49 +080046#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
47 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
48 KVM_PTE_LEAF_ATTR_HI_S2_XN)
49
Will Deaconb1e57de2020-09-11 14:25:10 +010050struct kvm_pgtable_walk_data {
51 struct kvm_pgtable *pgt;
52 struct kvm_pgtable_walker *walker;
53
54 u64 addr;
55 u64 end;
56};
57
58static u64 kvm_granule_shift(u32 level)
59{
60 /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
61 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
62}
63
64static u64 kvm_granule_size(u32 level)
65{
66 return BIT(kvm_granule_shift(level));
67}
68
69static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
70{
71 u64 granule = kvm_granule_size(level);
72
73 /*
74 * Reject invalid block mappings and don't bother with 4TB mappings for
75 * 52-bit PAs.
76 */
77 if (level == 0 || (PAGE_SIZE != SZ_4K && level == 1))
78 return false;
79
80 if (granule > (end - addr))
81 return false;
82
83 return IS_ALIGNED(addr, granule) && IS_ALIGNED(phys, granule);
84}
85
86static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
87{
88 u64 shift = kvm_granule_shift(level);
89 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
90
91 return (data->addr >> shift) & mask;
92}
93
94static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
95{
96 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
97 u64 mask = BIT(pgt->ia_bits) - 1;
98
99 return (addr & mask) >> shift;
100}
101
102static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
103{
104 return __kvm_pgd_page_idx(data->pgt, data->addr);
105}
106
107static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
108{
109 struct kvm_pgtable pgt = {
110 .ia_bits = ia_bits,
111 .start_level = start_level,
112 };
113
114 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
115}
116
117static bool kvm_pte_valid(kvm_pte_t pte)
118{
119 return pte & KVM_PTE_VALID;
120}
121
122static bool kvm_pte_table(kvm_pte_t pte, u32 level)
123{
124 if (level == KVM_PGTABLE_MAX_LEVELS - 1)
125 return false;
126
127 if (!kvm_pte_valid(pte))
128 return false;
129
130 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
131}
132
133static u64 kvm_pte_to_phys(kvm_pte_t pte)
134{
135 u64 pa = pte & KVM_PTE_ADDR_MASK;
136
137 if (PAGE_SHIFT == 16)
138 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
139
140 return pa;
141}
142
143static kvm_pte_t kvm_phys_to_pte(u64 pa)
144{
145 kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
146
147 if (PAGE_SHIFT == 16)
148 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
149
150 return pte;
151}
152
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000153static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
Will Deaconb1e57de2020-09-11 14:25:10 +0100154{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000155 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
Will Deaconb1e57de2020-09-11 14:25:10 +0100156}
157
158static void kvm_set_invalid_pte(kvm_pte_t *ptep)
159{
160 kvm_pte_t pte = *ptep;
161 WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID);
162}
163
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000164static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
165 struct kvm_pgtable_mm_ops *mm_ops)
Will Deaconb1e57de2020-09-11 14:25:10 +0100166{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000167 kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
Will Deaconb1e57de2020-09-11 14:25:10 +0100168
169 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
170 pte |= KVM_PTE_VALID;
171
172 WARN_ON(kvm_pte_valid(old));
173 smp_store_release(ptep, pte);
174}
175
Yanan Wang8ed80052021-01-14 20:13:48 +0800176static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
Will Deaconb1e57de2020-09-11 14:25:10 +0100177{
Yanan Wang8ed80052021-01-14 20:13:48 +0800178 kvm_pte_t pte = kvm_phys_to_pte(pa);
Will Deaconb1e57de2020-09-11 14:25:10 +0100179 u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
180 KVM_PTE_TYPE_BLOCK;
181
182 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
183 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
184 pte |= KVM_PTE_VALID;
185
Yanan Wang8ed80052021-01-14 20:13:48 +0800186 return pte;
Will Deaconb1e57de2020-09-11 14:25:10 +0100187}
188
189static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
190 u32 level, kvm_pte_t *ptep,
191 enum kvm_pgtable_walk_flags flag)
192{
193 struct kvm_pgtable_walker *walker = data->walker;
194 return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
195}
196
197static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
198 kvm_pte_t *pgtable, u32 level);
199
200static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
201 kvm_pte_t *ptep, u32 level)
202{
203 int ret = 0;
204 u64 addr = data->addr;
205 kvm_pte_t *childp, pte = *ptep;
206 bool table = kvm_pte_table(pte, level);
207 enum kvm_pgtable_walk_flags flags = data->walker->flags;
208
209 if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
210 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
211 KVM_PGTABLE_WALK_TABLE_PRE);
212 }
213
214 if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
215 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
216 KVM_PGTABLE_WALK_LEAF);
217 pte = *ptep;
218 table = kvm_pte_table(pte, level);
219 }
220
221 if (ret)
222 goto out;
223
224 if (!table) {
Jia He357ad202021-03-05 18:52:54 +0000225 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
Will Deaconb1e57de2020-09-11 14:25:10 +0100226 data->addr += kvm_granule_size(level);
227 goto out;
228 }
229
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000230 childp = kvm_pte_follow(pte, data->pgt->mm_ops);
Will Deaconb1e57de2020-09-11 14:25:10 +0100231 ret = __kvm_pgtable_walk(data, childp, level + 1);
232 if (ret)
233 goto out;
234
235 if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
236 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
237 KVM_PGTABLE_WALK_TABLE_POST);
238 }
239
240out:
241 return ret;
242}
243
244static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
245 kvm_pte_t *pgtable, u32 level)
246{
247 u32 idx;
248 int ret = 0;
249
250 if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
251 return -EINVAL;
252
253 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
254 kvm_pte_t *ptep = &pgtable[idx];
255
256 if (data->addr >= data->end)
257 break;
258
259 ret = __kvm_pgtable_visit(data, ptep, level);
260 if (ret)
261 break;
262 }
263
264 return ret;
265}
266
267static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
268{
269 u32 idx;
270 int ret = 0;
271 struct kvm_pgtable *pgt = data->pgt;
272 u64 limit = BIT(pgt->ia_bits);
273
274 if (data->addr > limit || data->end > limit)
275 return -ERANGE;
276
277 if (!pgt->pgd)
278 return -EINVAL;
279
280 for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
281 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
282
283 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
284 if (ret)
285 break;
286 }
287
288 return ret;
289}
290
291int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
292 struct kvm_pgtable_walker *walker)
293{
294 struct kvm_pgtable_walk_data walk_data = {
295 .pgt = pgt,
296 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
297 .end = PAGE_ALIGN(walk_data.addr + size),
298 .walker = walker,
299 };
300
301 return _kvm_pgtable_walk(&walk_data);
302}
Will Deaconbb0e92c2020-09-11 14:25:11 +0100303
304struct hyp_map_data {
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000305 u64 phys;
306 kvm_pte_t attr;
307 struct kvm_pgtable_mm_ops *mm_ops;
Will Deaconbb0e92c2020-09-11 14:25:11 +0100308};
309
310static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
311 struct hyp_map_data *data)
312{
313 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
314 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
315 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
316 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
317 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
318 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
319
320 if (!(prot & KVM_PGTABLE_PROT_R))
321 return -EINVAL;
322
323 if (prot & KVM_PGTABLE_PROT_X) {
324 if (prot & KVM_PGTABLE_PROT_W)
325 return -EINVAL;
326
327 if (device)
328 return -EINVAL;
329 } else {
330 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
331 }
332
333 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
334 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
335 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
336 data->attr = attr;
337 return 0;
338}
339
340static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
341 kvm_pte_t *ptep, struct hyp_map_data *data)
342{
Yanan Wang8ed80052021-01-14 20:13:48 +0800343 kvm_pte_t new, old = *ptep;
Will Deaconbb0e92c2020-09-11 14:25:11 +0100344 u64 granule = kvm_granule_size(level), phys = data->phys;
345
346 if (!kvm_block_mapping_supported(addr, end, phys, level))
347 return false;
348
Yanan Wang8ed80052021-01-14 20:13:48 +0800349 /* Tolerate KVM recreating the exact same mapping */
350 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
351 if (old != new && !WARN_ON(kvm_pte_valid(old)))
352 smp_store_release(ptep, new);
353
Will Deaconbb0e92c2020-09-11 14:25:11 +0100354 data->phys += granule;
355 return true;
356}
357
358static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
359 enum kvm_pgtable_walk_flags flag, void * const arg)
360{
361 kvm_pte_t *childp;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000362 struct hyp_map_data *data = arg;
363 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
Will Deaconbb0e92c2020-09-11 14:25:11 +0100364
365 if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
366 return 0;
367
368 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
369 return -EINVAL;
370
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000371 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
Will Deaconbb0e92c2020-09-11 14:25:11 +0100372 if (!childp)
373 return -ENOMEM;
374
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000375 kvm_set_table_pte(ptep, childp, mm_ops);
Will Deaconbb0e92c2020-09-11 14:25:11 +0100376 return 0;
377}
378
379int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
380 enum kvm_pgtable_prot prot)
381{
382 int ret;
383 struct hyp_map_data map_data = {
384 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000385 .mm_ops = pgt->mm_ops,
Will Deaconbb0e92c2020-09-11 14:25:11 +0100386 };
387 struct kvm_pgtable_walker walker = {
388 .cb = hyp_map_walker,
389 .flags = KVM_PGTABLE_WALK_LEAF,
390 .arg = &map_data,
391 };
392
393 ret = hyp_map_set_prot_attr(prot, &map_data);
394 if (ret)
395 return ret;
396
397 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
398 dsb(ishst);
399 isb();
400 return ret;
401}
402
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000403int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
404 struct kvm_pgtable_mm_ops *mm_ops)
Will Deaconbb0e92c2020-09-11 14:25:11 +0100405{
406 u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
407
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000408 pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
Will Deaconbb0e92c2020-09-11 14:25:11 +0100409 if (!pgt->pgd)
410 return -ENOMEM;
411
412 pgt->ia_bits = va_bits;
413 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000414 pgt->mm_ops = mm_ops;
Will Deaconbb0e92c2020-09-11 14:25:11 +0100415 pgt->mmu = NULL;
416 return 0;
417}
418
419static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
420 enum kvm_pgtable_walk_flags flag, void * const arg)
421{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000422 struct kvm_pgtable_mm_ops *mm_ops = arg;
423
424 mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
Will Deaconbb0e92c2020-09-11 14:25:11 +0100425 return 0;
426}
427
428void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
429{
430 struct kvm_pgtable_walker walker = {
431 .cb = hyp_free_walker,
432 .flags = KVM_PGTABLE_WALK_TABLE_POST,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000433 .arg = pgt->mm_ops,
Will Deaconbb0e92c2020-09-11 14:25:11 +0100434 };
435
436 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000437 pgt->mm_ops->put_page(pgt->pgd);
Will Deaconbb0e92c2020-09-11 14:25:11 +0100438 pgt->pgd = NULL;
439}
Will Deacon71233d02020-09-11 14:25:13 +0100440
Will Deacon6d9d2112020-09-11 14:25:14 +0100441struct stage2_map_data {
442 u64 phys;
443 kvm_pte_t attr;
444
445 kvm_pte_t *anchor;
446
447 struct kvm_s2_mmu *mmu;
448 struct kvm_mmu_memory_cache *memcache;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000449
450 struct kvm_pgtable_mm_ops *mm_ops;
Will Deacon6d9d2112020-09-11 14:25:14 +0100451};
452
453static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
454 struct stage2_map_data *data)
455{
456 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
457 kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) :
458 PAGE_S2_MEMATTR(NORMAL);
459 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
460
461 if (!(prot & KVM_PGTABLE_PROT_X))
462 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
463 else if (device)
464 return -EINVAL;
465
466 if (prot & KVM_PGTABLE_PROT_R)
467 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
468
469 if (prot & KVM_PGTABLE_PROT_W)
470 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
471
472 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
473 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
474 data->attr = attr;
475 return 0;
476}
477
Yanan Wang694d0712021-01-14 20:13:49 +0800478static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
479 kvm_pte_t *ptep,
480 struct stage2_map_data *data)
Will Deacon6d9d2112020-09-11 14:25:14 +0100481{
Yanan Wang8ed80052021-01-14 20:13:48 +0800482 kvm_pte_t new, old = *ptep;
Will Deacon6d9d2112020-09-11 14:25:14 +0100483 u64 granule = kvm_granule_size(level), phys = data->phys;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000484 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
Will Deacon6d9d2112020-09-11 14:25:14 +0100485
486 if (!kvm_block_mapping_supported(addr, end, phys, level))
Yanan Wang694d0712021-01-14 20:13:49 +0800487 return -E2BIG;
Will Deacon6d9d2112020-09-11 14:25:14 +0100488
Yanan Wang8ed80052021-01-14 20:13:48 +0800489 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
490 if (kvm_pte_valid(old)) {
Yanan Wang694d0712021-01-14 20:13:49 +0800491 /*
492 * Skip updating the PTE if we are trying to recreate the exact
493 * same mapping or only change the access permissions. Instead,
494 * the vCPU will exit one more time from guest if still needed
495 * and then go through the path of relaxing permissions.
496 */
497 if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)))
498 return -EAGAIN;
Yanan Wang5c646b72020-12-02 04:10:32 +0800499
Yanan Wang8ed80052021-01-14 20:13:48 +0800500 /*
501 * There's an existing different valid leaf entry, so perform
502 * break-before-make.
503 */
504 kvm_set_invalid_pte(ptep);
505 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000506 mm_ops->put_page(ptep);
Yanan Wang8ed80052021-01-14 20:13:48 +0800507 }
Will Deacon6d9d2112020-09-11 14:25:14 +0100508
Yanan Wang8ed80052021-01-14 20:13:48 +0800509 smp_store_release(ptep, new);
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000510 mm_ops->get_page(ptep);
Will Deacon6d9d2112020-09-11 14:25:14 +0100511 data->phys += granule;
Yanan Wang694d0712021-01-14 20:13:49 +0800512 return 0;
Will Deacon6d9d2112020-09-11 14:25:14 +0100513}
514
515static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
516 kvm_pte_t *ptep,
517 struct stage2_map_data *data)
518{
519 if (data->anchor)
520 return 0;
521
522 if (!kvm_block_mapping_supported(addr, end, data->phys, level))
523 return 0;
524
525 kvm_set_invalid_pte(ptep);
Yanan Wang3a0b8702020-12-02 04:10:33 +0800526
527 /*
528 * Invalidate the whole stage-2, as we may have numerous leaf
529 * entries below us which would otherwise need invalidating
530 * individually.
531 */
532 kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
Will Deacon6d9d2112020-09-11 14:25:14 +0100533 data->anchor = ptep;
534 return 0;
535}
536
537static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
538 struct stage2_map_data *data)
539{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000540 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
Will Deacon6d9d2112020-09-11 14:25:14 +0100541 kvm_pte_t *childp, pte = *ptep;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000542 int ret;
Will Deacon6d9d2112020-09-11 14:25:14 +0100543
544 if (data->anchor) {
545 if (kvm_pte_valid(pte))
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000546 mm_ops->put_page(ptep);
Will Deacon6d9d2112020-09-11 14:25:14 +0100547
548 return 0;
549 }
550
Yanan Wang694d0712021-01-14 20:13:49 +0800551 ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
552 if (ret != -E2BIG)
553 return ret;
Will Deacon6d9d2112020-09-11 14:25:14 +0100554
555 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
556 return -EINVAL;
557
558 if (!data->memcache)
559 return -ENOMEM;
560
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000561 childp = mm_ops->zalloc_page(data->memcache);
Will Deacon6d9d2112020-09-11 14:25:14 +0100562 if (!childp)
563 return -ENOMEM;
564
565 /*
566 * If we've run into an existing block mapping then replace it with
567 * a table. Accesses beyond 'end' that fall within the new table
568 * will be mapped lazily.
569 */
570 if (kvm_pte_valid(pte)) {
571 kvm_set_invalid_pte(ptep);
572 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000573 mm_ops->put_page(ptep);
Will Deacon6d9d2112020-09-11 14:25:14 +0100574 }
575
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000576 kvm_set_table_pte(ptep, childp, mm_ops);
577 mm_ops->get_page(ptep);
Yanan Wang8ed80052021-01-14 20:13:48 +0800578
Will Deacon6d9d2112020-09-11 14:25:14 +0100579 return 0;
580}
581
582static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
583 kvm_pte_t *ptep,
584 struct stage2_map_data *data)
585{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000586 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
Will Deacon6d9d2112020-09-11 14:25:14 +0100587 int ret = 0;
588
589 if (!data->anchor)
590 return 0;
591
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000592 mm_ops->put_page(kvm_pte_follow(*ptep, mm_ops));
593 mm_ops->put_page(ptep);
Will Deacon6d9d2112020-09-11 14:25:14 +0100594
595 if (data->anchor == ptep) {
596 data->anchor = NULL;
597 ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
598 }
599
600 return ret;
601}
602
603/*
604 * This is a little fiddly, as we use all three of the walk flags. The idea
605 * is that the TABLE_PRE callback runs for table entries on the way down,
606 * looking for table entries which we could conceivably replace with a
607 * block entry for this mapping. If it finds one, then it sets the 'anchor'
608 * field in 'struct stage2_map_data' to point at the table entry, before
609 * clearing the entry to zero and descending into the now detached table.
610 *
611 * The behaviour of the LEAF callback then depends on whether or not the
612 * anchor has been set. If not, then we're not using a block mapping higher
613 * up the table and we perform the mapping at the existing leaves instead.
614 * If, on the other hand, the anchor _is_ set, then we drop references to
615 * all valid leaves so that the pages beneath the anchor can be freed.
616 *
617 * Finally, the TABLE_POST callback does nothing if the anchor has not
618 * been set, but otherwise frees the page-table pages while walking back up
619 * the page-table, installing the block entry when it revisits the anchor
620 * pointer and clearing the anchor to NULL.
621 */
622static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
623 enum kvm_pgtable_walk_flags flag, void * const arg)
624{
625 struct stage2_map_data *data = arg;
626
627 switch (flag) {
628 case KVM_PGTABLE_WALK_TABLE_PRE:
629 return stage2_map_walk_table_pre(addr, end, level, ptep, data);
630 case KVM_PGTABLE_WALK_LEAF:
631 return stage2_map_walk_leaf(addr, end, level, ptep, data);
632 case KVM_PGTABLE_WALK_TABLE_POST:
633 return stage2_map_walk_table_post(addr, end, level, ptep, data);
634 }
635
636 return -EINVAL;
637}
638
639int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
640 u64 phys, enum kvm_pgtable_prot prot,
641 struct kvm_mmu_memory_cache *mc)
642{
643 int ret;
644 struct stage2_map_data map_data = {
645 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
646 .mmu = pgt->mmu,
647 .memcache = mc,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000648 .mm_ops = pgt->mm_ops,
Will Deacon6d9d2112020-09-11 14:25:14 +0100649 };
650 struct kvm_pgtable_walker walker = {
651 .cb = stage2_map_walker,
652 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
653 KVM_PGTABLE_WALK_LEAF |
654 KVM_PGTABLE_WALK_TABLE_POST,
655 .arg = &map_data,
656 };
657
658 ret = stage2_map_set_prot_attr(prot, &map_data);
659 if (ret)
660 return ret;
661
662 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
663 dsb(ishst);
664 return ret;
665}
666
667static void stage2_flush_dcache(void *addr, u64 size)
668{
669 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
670 return;
671
672 __flush_dcache_area(addr, size);
673}
674
675static bool stage2_pte_cacheable(kvm_pte_t pte)
676{
Will Deacone2fc6a92020-10-29 14:47:16 +0000677 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
Will Deacon6d9d2112020-09-11 14:25:14 +0100678 return memattr == PAGE_S2_MEMATTR(NORMAL);
679}
680
681static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
682 enum kvm_pgtable_walk_flags flag,
683 void * const arg)
684{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000685 struct kvm_pgtable *pgt = arg;
686 struct kvm_s2_mmu *mmu = pgt->mmu;
687 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
Will Deacon6d9d2112020-09-11 14:25:14 +0100688 kvm_pte_t pte = *ptep, *childp = NULL;
689 bool need_flush = false;
690
691 if (!kvm_pte_valid(pte))
692 return 0;
693
694 if (kvm_pte_table(pte, level)) {
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000695 childp = kvm_pte_follow(pte, mm_ops);
Will Deacon6d9d2112020-09-11 14:25:14 +0100696
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000697 if (mm_ops->page_count(childp) != 1)
Will Deacon6d9d2112020-09-11 14:25:14 +0100698 return 0;
699 } else if (stage2_pte_cacheable(pte)) {
700 need_flush = true;
701 }
702
703 /*
704 * This is similar to the map() path in that we unmap the entire
705 * block entry and rely on the remaining portions being faulted
706 * back lazily.
707 */
708 kvm_set_invalid_pte(ptep);
709 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000710 mm_ops->put_page(ptep);
Will Deacon6d9d2112020-09-11 14:25:14 +0100711
712 if (need_flush) {
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000713 stage2_flush_dcache(kvm_pte_follow(pte, mm_ops),
Will Deacon6d9d2112020-09-11 14:25:14 +0100714 kvm_granule_size(level));
715 }
716
717 if (childp)
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000718 mm_ops->put_page(childp);
Will Deacon6d9d2112020-09-11 14:25:14 +0100719
720 return 0;
721}
722
723int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
724{
725 struct kvm_pgtable_walker walker = {
726 .cb = stage2_unmap_walker,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000727 .arg = pgt,
Will Deacon6d9d2112020-09-11 14:25:14 +0100728 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
729 };
730
731 return kvm_pgtable_walk(pgt, addr, size, &walker);
732}
733
Will Deacone0e5a072020-09-11 14:25:18 +0100734struct stage2_attr_data {
735 kvm_pte_t attr_set;
736 kvm_pte_t attr_clr;
737 kvm_pte_t pte;
Will Deaconb259d132020-09-30 14:18:01 +0100738 u32 level;
Will Deacone0e5a072020-09-11 14:25:18 +0100739};
740
741static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
742 enum kvm_pgtable_walk_flags flag,
743 void * const arg)
744{
745 kvm_pte_t pte = *ptep;
746 struct stage2_attr_data *data = arg;
747
748 if (!kvm_pte_valid(pte))
749 return 0;
750
Will Deaconb259d132020-09-30 14:18:01 +0100751 data->level = level;
Will Deacone0e5a072020-09-11 14:25:18 +0100752 data->pte = pte;
753 pte &= ~data->attr_clr;
754 pte |= data->attr_set;
755
756 /*
757 * We may race with the CPU trying to set the access flag here,
758 * but worst-case the access flag update gets lost and will be
759 * set on the next access instead.
760 */
761 if (data->pte != pte)
762 WRITE_ONCE(*ptep, pte);
763
764 return 0;
765}
766
767static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
768 u64 size, kvm_pte_t attr_set,
Will Deaconb259d132020-09-30 14:18:01 +0100769 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
770 u32 *level)
Will Deacone0e5a072020-09-11 14:25:18 +0100771{
772 int ret;
773 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
774 struct stage2_attr_data data = {
775 .attr_set = attr_set & attr_mask,
776 .attr_clr = attr_clr & attr_mask,
777 };
778 struct kvm_pgtable_walker walker = {
779 .cb = stage2_attr_walker,
780 .arg = &data,
781 .flags = KVM_PGTABLE_WALK_LEAF,
782 };
783
784 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
785 if (ret)
786 return ret;
787
788 if (orig_pte)
789 *orig_pte = data.pte;
Will Deaconb259d132020-09-30 14:18:01 +0100790
791 if (level)
792 *level = data.level;
Will Deacone0e5a072020-09-11 14:25:18 +0100793 return 0;
794}
795
Quentin Perret73d49df2020-09-11 14:25:20 +0100796int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
797{
798 return stage2_update_leaf_attrs(pgt, addr, size, 0,
Will Deaconb259d132020-09-30 14:18:01 +0100799 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
800 NULL, NULL);
Quentin Perret73d49df2020-09-11 14:25:20 +0100801}
802
Will Deacone0e5a072020-09-11 14:25:18 +0100803kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
804{
805 kvm_pte_t pte = 0;
806 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
Will Deaconb259d132020-09-30 14:18:01 +0100807 &pte, NULL);
Will Deacone0e5a072020-09-11 14:25:18 +0100808 dsb(ishst);
809 return pte;
810}
811
812kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
813{
814 kvm_pte_t pte = 0;
815 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
Will Deaconb259d132020-09-30 14:18:01 +0100816 &pte, NULL);
Will Deacone0e5a072020-09-11 14:25:18 +0100817 /*
818 * "But where's the TLBI?!", you scream.
819 * "Over in the core code", I sigh.
820 *
821 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
822 */
823 return pte;
824}
825
826bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
827{
828 kvm_pte_t pte = 0;
Will Deaconb259d132020-09-30 14:18:01 +0100829 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
Will Deacone0e5a072020-09-11 14:25:18 +0100830 return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
831}
832
Will Deaconadcd4e22020-09-11 14:25:24 +0100833int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
834 enum kvm_pgtable_prot prot)
835{
836 int ret;
Will Deaconb259d132020-09-30 14:18:01 +0100837 u32 level;
Will Deaconadcd4e22020-09-11 14:25:24 +0100838 kvm_pte_t set = 0, clr = 0;
839
840 if (prot & KVM_PGTABLE_PROT_R)
841 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
842
843 if (prot & KVM_PGTABLE_PROT_W)
844 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
845
846 if (prot & KVM_PGTABLE_PROT_X)
847 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
848
Will Deaconb259d132020-09-30 14:18:01 +0100849 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
850 if (!ret)
851 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
Will Deaconadcd4e22020-09-11 14:25:24 +0100852 return ret;
853}
854
Quentin Perret93c66b42020-09-11 14:25:22 +0100855static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
856 enum kvm_pgtable_walk_flags flag,
857 void * const arg)
858{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000859 struct kvm_pgtable_mm_ops *mm_ops = arg;
Quentin Perret93c66b42020-09-11 14:25:22 +0100860 kvm_pte_t pte = *ptep;
861
862 if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte))
863 return 0;
864
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000865 stage2_flush_dcache(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
Quentin Perret93c66b42020-09-11 14:25:22 +0100866 return 0;
867}
868
869int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
870{
871 struct kvm_pgtable_walker walker = {
872 .cb = stage2_flush_walker,
873 .flags = KVM_PGTABLE_WALK_LEAF,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000874 .arg = pgt->mm_ops,
Quentin Perret93c66b42020-09-11 14:25:22 +0100875 };
876
877 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
878 return 0;
879
880 return kvm_pgtable_walk(pgt, addr, size, &walker);
881}
882
Quentin Perret834cd932021-03-19 10:01:27 +0000883int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000884 struct kvm_pgtable_mm_ops *mm_ops)
Will Deacon71233d02020-09-11 14:25:13 +0100885{
886 size_t pgd_sz;
Quentin Perret834cd932021-03-19 10:01:27 +0000887 u64 vtcr = arch->vtcr;
Will Deacon71233d02020-09-11 14:25:13 +0100888 u32 ia_bits = VTCR_EL2_IPA(vtcr);
889 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
890 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
891
892 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000893 pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
Will Deacon71233d02020-09-11 14:25:13 +0100894 if (!pgt->pgd)
895 return -ENOMEM;
896
897 pgt->ia_bits = ia_bits;
898 pgt->start_level = start_level;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000899 pgt->mm_ops = mm_ops;
Quentin Perret834cd932021-03-19 10:01:27 +0000900 pgt->mmu = &arch->mmu;
Will Deacon71233d02020-09-11 14:25:13 +0100901
902 /* Ensure zeroed PGD pages are visible to the hardware walker */
903 dsb(ishst);
904 return 0;
905}
906
907static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
908 enum kvm_pgtable_walk_flags flag,
909 void * const arg)
910{
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000911 struct kvm_pgtable_mm_ops *mm_ops = arg;
Will Deacon71233d02020-09-11 14:25:13 +0100912 kvm_pte_t pte = *ptep;
913
914 if (!kvm_pte_valid(pte))
915 return 0;
916
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000917 mm_ops->put_page(ptep);
Will Deacon71233d02020-09-11 14:25:13 +0100918
919 if (kvm_pte_table(pte, level))
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000920 mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
Will Deacon71233d02020-09-11 14:25:13 +0100921
922 return 0;
923}
924
925void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
926{
927 size_t pgd_sz;
928 struct kvm_pgtable_walker walker = {
929 .cb = stage2_free_walker,
930 .flags = KVM_PGTABLE_WALK_LEAF |
931 KVM_PGTABLE_WALK_TABLE_POST,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000932 .arg = pgt->mm_ops,
Will Deacon71233d02020-09-11 14:25:13 +0100933 };
934
935 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
936 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000937 pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
Will Deacon71233d02020-09-11 14:25:13 +0100938 pgt->pgd = NULL;
939}