@@ -391,7 +391,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
struct vfio_domain *domain, *d;
long unlocked = 0;
- if (!dma->size)
+ if (!dma->size || dma->type != VFIO_IOVA_USER)
return;
/*
* We use the IOMMU to track the physical addresses, otherwise we'd
@@ -727,6 +727,9 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
dma = rb_entry(n, struct vfio_dma, node);
iova = dma->iova;
+ if (dma->type == VFIO_IOVA_RESERVED)
+ continue;
+
while (iova < dma->iova + dma->size) {
phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
size_t size;
Before allowing the end-user to create VFIO_IOVA_RESERVED dma slots, let's implement the expected behavior for removal and replay. As opposed to user dma slots, IOVAs are not systematically bound to PAs and PAs are not pinned. VFIO just initializes the IOVA "aperture". IOVAs are allocated outside of the VFIO framework, typically the MSI layer which is responsible to free and unmap them. The MSI mapping resources are freeed by the IOMMU driver on domain destruction. Signed-off-by: Eric Auger <eric.auger@linaro.org> --- v7 -> v8: - do no destroy anything anymore, just bypass unmap/unpin and iommu_map on replay --- drivers/vfio/vfio_iommu_type1.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) -- 1.9.1