@@ -124,3 +124,7 @@ config HAVE_KVM_ARCH_GMEM_PREPARE
config HAVE_KVM_ARCH_GMEM_INVALIDATE
bool
depends on KVM_PRIVATE_MEM
+
+config KVM_GMEM_MAPPABLE
+ select KVM_PRIVATE_MEM
+ bool
@@ -671,9 +671,88 @@ bool kvm_slot_gmem_is_guest_mappable(struct kvm_memory_slot *slot, gfn_t gfn)
return gmem_is_guest_mappable(inode, pgoff);
}
+
+static vm_fault_t kvm_gmem_fault(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct folio *folio;
+ vm_fault_t ret = VM_FAULT_LOCKED;
+
+ filemap_invalidate_lock_shared(inode->i_mapping);
+
+ folio = kvm_gmem_get_folio(inode, vmf->pgoff);
+ if (IS_ERR(folio)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_filemap;
+ }
+
+ if (folio_test_hwpoison(folio)) {
+ ret = VM_FAULT_HWPOISON;
+ goto out_folio;
+ }
+
+ if (!gmem_is_mappable(inode, vmf->pgoff)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_folio;
+ }
+
+ if (WARN_ON_ONCE(folio_test_guestmem(folio))) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_folio;
+ }
+
+ if (!folio_test_uptodate(folio)) {
+ unsigned long nr_pages = folio_nr_pages(folio);
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++)
+ clear_highpage(folio_page(folio, i));
+
+ folio_mark_uptodate(folio);
+ }
+
+ vmf->page = folio_file_page(folio, vmf->pgoff);
+
+out_folio:
+ if (ret != VM_FAULT_LOCKED) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
+out_filemap:
+ filemap_invalidate_unlock_shared(inode->i_mapping);
+
+ return ret;
+}
+
+static const struct vm_operations_struct kvm_gmem_vm_ops = {
+ .fault = kvm_gmem_fault,
+};
+
+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) !=
+ (VM_SHARED | VM_MAYSHARE)) {
+ return -EINVAL;
+ }
+
+ file_accessed(file);
+ vm_flags_set(vma, VM_DONTDUMP);
+ vma->vm_ops = &kvm_gmem_vm_ops;
+
+ return 0;
+}
+#else
+static int gmem_set_mappable(struct inode *inode, pgoff_t start, pgoff_t end)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+#define kvm_gmem_mmap NULL
#endif /* CONFIG_KVM_GMEM_MAPPABLE */
static struct file_operations kvm_gmem_fops = {
+ .mmap = kvm_gmem_mmap,
.open = generic_file_open,
.release = kvm_gmem_release,
.fallocate = kvm_gmem_fallocate,
@@ -860,6 +939,14 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
goto err_gmem;
}
+ if (IS_ENABLED(CONFIG_KVM_GMEM_MAPPABLE)) {
+ err = gmem_set_mappable(file_inode(file), 0, size >> PAGE_SHIFT);
+ if (err) {
+ fput(file);
+ goto err_gmem;
+ }
+ }
+
kvm_get_kvm(kvm);
gmem->kvm = kvm;
xa_init(&gmem->bindings);