KVM: arm64: Add kvm_pgtable_stage2_find_range()
Since the host stage 2 will be identity mapped, and since it will own
most of memory, it would preferable for performance to try and use large
block mappings whenever that is possible. To ease this, introduce a new
helper in the KVM page-table code which allows to search for large
ranges of available IPA space. This will be used in the host memory
abort path to greedily idmap large portion of the PA space.
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-32-qperret@google.com
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index f4a514a..dc6ef2c 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -48,6 +48,8 @@
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN)
+#define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55)
+
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56)
#define KVM_MAX_OWNER_ID 1
@@ -77,15 +79,20 @@ static bool kvm_phys_is_valid(u64 phys)
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
}
-static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
+static bool kvm_level_supports_block_mapping(u32 level)
{
- u64 granule = kvm_granule_size(level);
-
/*
* Reject invalid block mappings and don't bother with 4TB mappings for
* 52-bit PAs.
*/
- if (level == 0 || (PAGE_SIZE != SZ_4K && level == 1))
+ return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
+}
+
+static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
+{
+ u64 granule = kvm_granule_size(level);
+
+ if (!kvm_level_supports_block_mapping(level))
return false;
if (granule > (end - addr))
@@ -1053,3 +1060,77 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
pgt->pgd = NULL;
}
+
+#define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \
+ KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \
+ KVM_PTE_LEAF_ATTR_S2_IGNORED)
+
+static int stage2_check_permission_walker(u64 addr, u64 end, u32 level,
+ kvm_pte_t *ptep,
+ enum kvm_pgtable_walk_flags flag,
+ void * const arg)
+{
+ kvm_pte_t old_attr, pte = *ptep, *new_attr = arg;
+
+ /*
+ * Compatible mappings are either invalid and owned by the page-table
+ * owner (whose id is 0), or valid with matching permission attributes.
+ */
+ if (kvm_pte_valid(pte)) {
+ old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK;
+ if (old_attr != *new_attr)
+ return -EEXIST;
+ } else if (pte) {
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_prot prot,
+ struct kvm_mem_range *range)
+{
+ kvm_pte_t attr;
+ struct kvm_pgtable_walker check_perm_walker = {
+ .cb = stage2_check_permission_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = &attr,
+ };
+ u64 granule, start, end;
+ u32 level;
+ int ret;
+
+ ret = stage2_set_prot_attr(prot, &attr);
+ if (ret)
+ return ret;
+ attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
+
+ for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) {
+ granule = kvm_granule_size(level);
+ start = ALIGN_DOWN(addr, granule);
+ end = start + granule;
+
+ if (!kvm_level_supports_block_mapping(level))
+ continue;
+
+ if (start < range->start || range->end < end)
+ continue;
+
+ /*
+ * Check the presence of existing mappings with incompatible
+ * permissions within the current block range, and try one level
+ * deeper if one is found.
+ */
+ ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker);
+ if (ret != -EEXIST)
+ break;
+ }
+
+ if (!ret) {
+ range->start = start;
+ range->end = end;
+ }
+
+ return ret;
+}