@@ -618,6 +618,7 @@ struct iommufd_vdevice {
u64 id; /* per-vIOMMU virtual ID */
struct mutex tsm_lock;
bool tsm_bound;
+ bool trusted_dma_enabled;
};
int iommufd_vdevice_tsm_bind(struct iommufd_vdevice *vdev);
@@ -224,6 +224,37 @@ int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
return rc;
}
+static int iommufd_vdevice_enable_trusted_dma(struct iommufd_vdevice *vdev)
+{
+ struct iommufd_viommu *viommu = vdev->viommu;
+ int rc;
+
+ if (vdev->trusted_dma_enabled)
+ return 0;
+
+ if (viommu->ops->setup_trusted_vdev) {
+ rc = viommu->ops->setup_trusted_vdev(viommu, vdev->id);
+ if (rc)
+ return rc;
+ }
+
+ vdev->trusted_dma_enabled = true;
+ return 0;
+}
+
+static void iommufd_vdevice_disable_trusted_dma(struct iommufd_vdevice *vdev)
+{
+ struct iommufd_viommu *viommu = vdev->viommu;
+
+ if (!vdev->trusted_dma_enabled)
+ return;
+
+ if (viommu->ops->remove_trusted_vdev)
+ viommu->ops->remove_trusted_vdev(viommu, vdev->id);
+
+ vdev->trusted_dma_enabled = false;
+}
+
int iommufd_vdevice_tsm_bind(struct iommufd_vdevice *vdev)
{
struct kvm *kvm;
@@ -241,12 +272,19 @@ int iommufd_vdevice_tsm_bind(struct iommufd_vdevice *vdev)
goto out_unlock;
}
- rc = pci_tsm_bind(to_pci_dev(vdev->dev), kvm, vdev->id);
+ rc = iommufd_vdevice_enable_trusted_dma(vdev);
if (rc)
goto out_unlock;
+ rc = pci_tsm_bind(to_pci_dev(vdev->dev), kvm, vdev->id);
+ if (rc)
+ goto out_disable_trusted_dma;
+
vdev->tsm_bound = true;
+ goto out_unlock;
+out_disable_trusted_dma:
+ iommufd_vdevice_disable_trusted_dma(vdev);
out_unlock:
mutex_unlock(&vdev->tsm_lock);
return rc;
@@ -259,6 +297,7 @@ void iommufd_vdevice_tsm_unbind(struct iommufd_vdevice *vdev)
goto out_unlock;
pci_tsm_unbind(to_pci_dev(vdev->dev));
+ iommufd_vdevice_disable_trusted_dma(vdev);
vdev->tsm_bound = false;
out_unlock:
@@ -136,6 +136,8 @@ struct iommufd_viommu_ops {
const struct iommu_user_data *user_data);
int (*cache_invalidate)(struct iommufd_viommu *viommu,
struct iommu_user_data_array *array);
+ int (*setup_trusted_vdev)(struct iommufd_viommu *viommu, u64 vdev_id);
+ void (*remove_trusted_vdev)(struct iommufd_viommu *viommu, u64 vdev_id);
};
#if IS_ENABLED(CONFIG_IOMMUFD)
Add handlers for setting up/removing trusted IOMMU configurations against vdevice. IOMMUFD calls these handlers on TSM bind/unbind. Most vendors extend the trusted IOMMU engine for private device assignment, thus require extra IOMMU configuration for TSM bind. E.g. Intel TDX Connect requires host to build extra trusted Device Conext Table entries (but not present), while AMD requires to clear Domain-ID on non-secure DTE. Existing DMA setup flow against IOMMUFD are driven by userspace, usually start with allocating a domain, then attach the domain to the device. While trusted DMA setup is embedded in TSM bind/unbind() IOCTLs. This is because platform secure firmwares have various configuration enforcements for trusted. E.g. Intel TDX Connect enforces trusted IOPT detach after TDI STOP but before TDI metadata free. Using coarser uAPIs like TSM bind/unbind that wrap all trusted configurations prevent these low level complexities propagating to userspace. Coarser uAPI means userspace lose the flexibility to attach different domains to trusted part of the device. Also it cannot operate on the trusted domain. That seems not a problem cause VMM is out of the TCB so secure firmware either disallows VMM touching the trusted domain or only allows a fixed configuration set. E.g. TDX Connect enforces all assigned devices in the same VM must share the same trusted domain. It also specifies every value of the trusted Context Table entries. So just setup everything for trusted DMA in IOMMU driver is a reasonable choice. OPEN: Should these handlers be viommu ops or vdevice ops? Signed-off-by: Xu Yilun <yilun.xu@linux.intel.com> --- drivers/iommu/iommufd/iommufd_private.h | 1 + drivers/iommu/iommufd/viommu.c | 41 ++++++++++++++++++++++++- include/linux/iommufd.h | 2 ++ 3 files changed, 43 insertions(+), 1 deletion(-)