@@ -103,6 +103,7 @@ struct kvm_vm {
struct sparsebit *vpages_mapped;
bool has_irqchip;
bool pgd_created;
+ bool has_split_va_space;
vm_paddr_t ucall_mmio_addr;
vm_paddr_t pgd;
vm_vaddr_t gdt;
@@ -186,6 +186,43 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
+/*
+ * Initializes vm->vpages_valid to match the canonical VA space of the
+ * architecture.
+ *
+ * Most architectures split the range addressed by a single page table into a
+ * low and high region based on the MSB of the VA. On architectures with this
+ * behavior the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
+ *
+ * arm64 is a bit different from the rest of the crowd, as the low and high
+ * regions of the VA space are addressed by distinct paging structures
+ * (TTBR{0,1}_EL1). KVM selftests on arm64 only uses TTBR0_EL1, meaning that we
+ * only have a low VA region. As there is no VA split based on the MSB, the VA
+ * region spans [0, 2^va_bits).
+ */
+static void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
+{
+ sparsebit_num_t contig_va_bits = vm->va_bits;
+ sparsebit_num_t nr_contig_pages;
+
+ /*
+ * Depending on the architecture, the MSB of the VA could split between
+ * low and high regions. When that is the case each region has
+ * va_bits - 1 of address.
+ */
+ if (vm->has_split_va_space)
+ contig_va_bits--;
+
+ nr_contig_pages = (1ULL << contig_va_bits) >> vm->page_shift;
+
+ sparsebit_set_num(vm->vpages_valid, 0, nr_contig_pages);
+
+ if (vm->has_split_va_space)
+ sparsebit_set_num(vm->vpages_valid,
+ -(1ULL << contig_va_bits),
+ nr_contig_pages);
+}
+
struct kvm_vm *____vm_create(enum vm_guest_mode mode)
{
struct kvm_vm *vm;
@@ -268,17 +305,17 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
#ifdef __aarch64__
if (vm->pa_bits != 40)
vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
+
+ /* selftests use TTBR0 only, meaning there is a single VA region. */
+ vm->has_split_va_space = false;
+#else
+ vm->has_split_va_space = true;
#endif
vm_open(vm);
- /* Limit to VA-bit canonical virtual addresses. */
vm->vpages_valid = sparsebit_alloc();
- sparsebit_set_num(vm->vpages_valid,
- 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
- sparsebit_set_num(vm->vpages_valid,
- (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
- (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
+ vm_vaddr_populate_bitmap(vm);
/* Limit physical addresses to PA-bits. */
vm->max_gfn = vm_compute_max_gfn(vm);
An interesting feature of the Arm architecture is that the stage-1 MMU supports two distinct VA regions, controlled by TTBR{0,1}_EL1. As KVM selftests on arm64 only uses TTBR0_EL1, the VA space is constrained to [0, 2^(va_bits)). This is different from other architectures that allow for addressing low and high regions of the VA space from a single page table. KVM selftests' VA space allocator presumes the valid address range is split between low and high memory based the MSB, which of course is a poor match for arm64's TTBR0 region. Add a helper that correctly handles both addressing schemes with a comment describing each. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> --- .../selftests/kvm/include/kvm_util_base.h | 1 + tools/testing/selftests/kvm/lib/kvm_util.c | 49 ++++++++++++++++--- 2 files changed, 44 insertions(+), 6 deletions(-)