@@ -25,6 +25,7 @@ enum {
GZVM_FUNC_MEMREGION_PURPOSE = 15,
GZVM_FUNC_SET_DTB_CONFIG = 16,
GZVM_FUNC_MAP_GUEST = 17,
+ GZVM_FUNC_MAP_GUEST_BLOCK = 18,
NR_GZVM_FUNC,
};
@@ -50,6 +51,7 @@ enum {
#define MT_HVC_GZVM_MEMREGION_PURPOSE GZVM_HCALL_ID(GZVM_FUNC_MEMREGION_PURPOSE)
#define MT_HVC_GZVM_SET_DTB_CONFIG GZVM_HCALL_ID(GZVM_FUNC_SET_DTB_CONFIG)
#define MT_HVC_GZVM_MAP_GUEST GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST)
+#define MT_HVC_GZVM_MAP_GUEST_BLOCK GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST_BLOCK)
#define GIC_V3_NR_LRS 16
@@ -331,10 +331,11 @@ static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
fallthrough;
case GZVM_CAP_PVM_SET_PROTECTED_VM:
/*
- * To improve performance for protected VM, we have to populate VM's memory
- * before VM booting
+ * If the hypervisor doesn't support block-based demand paging, we
+ * populate memory in advance to improve performance for protected VM.
*/
- populate_all_mem_regions(gzvm);
+ if (gzvm->demand_page_gran == PAGE_SIZE)
+ populate_all_mem_regions(gzvm);
ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
return ret;
case GZVM_CAP_PVM_GET_PVMFW_SIZE:
@@ -351,12 +352,16 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp)
{
+ struct arm_smccc_res res = {0};
int ret;
switch (cap->cap) {
case GZVM_CAP_PROTECTED_VM:
ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
return ret;
+ case GZVM_CAP_BLOCK_BASED_DEMAND_PAGING:
+ ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
+ return ret;
default:
break;
}
@@ -397,3 +402,11 @@ int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
pfn, gfn, nr_pages, 0, 0, &res);
}
+
+int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages)
+{
+ struct arm_smccc_res res;
+
+ return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST_BLOCK, vm_id,
+ memslot_id, gfn, nr_pages, 0, 0, 0, &res);
+}
@@ -114,6 +114,44 @@ int gzvm_vm_allocate_guest_page(struct gzvm_memslot *slot, u64 gfn, u64 *pfn)
return 0;
}
+static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
+{
+ u64 pfn, __gfn;
+ int ret, i;
+
+ u32 nr_entries = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE;
+ struct gzvm_memslot *memslot = &vm->memslot[memslot_id];
+ u64 start_gfn = ALIGN_DOWN(gfn, nr_entries);
+ u32 total_pages = memslot->npages;
+ u64 base_gfn = memslot->base_gfn;
+
+ /* if the demand region is less than a block, adjust the nr_entries */
+ if (start_gfn + nr_entries > base_gfn + total_pages)
+ nr_entries = base_gfn + total_pages - start_gfn;
+
+ mutex_lock(&vm->demand_paging_lock);
+ for (i = 0, __gfn = start_gfn; i < nr_entries; i++, __gfn++) {
+ ret = gzvm_vm_allocate_guest_page(memslot, __gfn, &pfn);
+ if (unlikely(ret)) {
+ ret = -ERR_FAULT;
+ goto err_unlock;
+ }
+ vm->demand_page_buffer[i] = pfn;
+ }
+
+ ret = gzvm_arch_map_guest_block(vm->vm_id, memslot_id, start_gfn,
+ nr_entries);
+ if (unlikely(ret)) {
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+err_unlock:
+ mutex_unlock(&vm->demand_paging_lock);
+
+ return ret;
+}
+
static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
{
int ret;
@@ -150,5 +188,8 @@ int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu)
if (unlikely(memslot_id < 0))
return -EFAULT;
- return handle_single_demand_page(vm, memslot_id, gfn);
+ if (vm->demand_page_gran == PAGE_SIZE)
+ return handle_single_demand_page(vm, memslot_id, gfn);
+ else
+ return handle_block_demand_page(vm, memslot_id, gfn);
}
@@ -294,6 +294,8 @@ static long gzvm_vm_ioctl(struct file *filp, unsigned int ioctl,
static void gzvm_destroy_vm(struct gzvm *gzvm)
{
+ size_t allocated_size;
+
pr_debug("VM-%u is going to be destroyed\n", gzvm->vm_id);
mutex_lock(&gzvm->lock);
@@ -306,6 +308,11 @@ static void gzvm_destroy_vm(struct gzvm *gzvm)
list_del(&gzvm->vm_list);
mutex_unlock(&gzvm_list_lock);
+ if (gzvm->demand_page_buffer) {
+ allocated_size = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE * sizeof(u64);
+ free_pages_exact(gzvm->demand_page_buffer, allocated_size);
+ }
+
mutex_unlock(&gzvm->lock);
kfree(gzvm);
@@ -325,6 +332,46 @@ static const struct file_operations gzvm_vm_fops = {
.llseek = noop_llseek,
};
+/**
+ * setup_vm_demand_paging - Query hypervisor suitable demand page size and set
+ * @vm: gzvm instance for setting up demand page size
+ *
+ * Return: void
+ */
+static void setup_vm_demand_paging(struct gzvm *vm)
+{
+ u32 buf_size = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE * sizeof(u64);
+ struct gzvm_enable_cap cap = {0};
+ void *buffer;
+ int ret;
+
+ mutex_init(&vm->demand_paging_lock);
+ buffer = alloc_pages_exact(buf_size, GFP_KERNEL);
+ if (!buffer) {
+ /* Fall back to use default page size for demand paging */
+ vm->demand_page_gran = PAGE_SIZE;
+ vm->demand_page_buffer = NULL;
+ return;
+ }
+
+ cap.cap = GZVM_CAP_BLOCK_BASED_DEMAND_PAGING;
+ cap.args[0] = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE;
+ cap.args[1] = (__u64)virt_to_phys(buffer);
+ /* demand_page_buffer is freed when destroy VM */
+ vm->demand_page_buffer = buffer;
+
+ ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
+ if (ret == 0) {
+ vm->demand_page_gran = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE;
+ /* freed when destroy vm */
+ vm->demand_page_buffer = buffer;
+ } else {
+ vm->demand_page_gran = PAGE_SIZE;
+ vm->demand_page_buffer = NULL;
+ free_pages_exact(buffer, buf_size);
+ }
+}
+
static struct gzvm *gzvm_create_vm(unsigned long vm_type)
{
int ret;
@@ -358,6 +405,8 @@ static struct gzvm *gzvm_create_vm(unsigned long vm_type)
return ERR_PTR(ret);
}
+ setup_vm_demand_paging(gzvm);
+
mutex_lock(&gzvm_list_lock);
list_add(&gzvm->vm_list, &gzvm_list);
mutex_unlock(&gzvm_list_lock);
@@ -44,6 +44,8 @@
#define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
+#define GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE (2 * 1024 * 1024) /* 2MB */
+
/* struct mem_region_addr_range - Identical to ffa memory constituent */
struct mem_region_addr_range {
/* the base IPA of the constituent memory region, aligned to 4 kiB */
@@ -106,6 +108,19 @@ struct gzvm {
struct srcu_struct irq_srcu;
/* lock for irq injection */
struct mutex irq_lock;
+
+ /*
+ * demand page granularity: how much memory we allocate for VM in a
+ * single page fault
+ */
+ u32 demand_page_gran;
+ /* the mailbox for transferring large portion pages */
+ u64 *demand_page_buffer;
+ /*
+ * lock for preventing multiple cpu using the same demand page mailbox
+ * at the same time
+ */
+ struct mutex demand_paging_lock;
};
long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args);
@@ -126,6 +141,7 @@ int gzvm_arch_create_vm(unsigned long vm_type);
int gzvm_arch_destroy_vm(u16 vm_id);
int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
u64 nr_pages);
+int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages);
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp);
@@ -18,6 +18,8 @@
#define GZVM_CAP_VM_GPA_SIZE 0xa5
#define GZVM_CAP_PROTECTED_VM 0xffbadab1
+/* query hypervisor supported block-based demand page */
+#define GZVM_CAP_BLOCK_BASED_DEMAND_PAGING 0x9201
/* sub-commands put in args[0] for GZVM_CAP_PROTECTED_VM */
#define GZVM_CAP_PVM_SET_PVMFW_GPA 0