@@ -484,6 +484,14 @@ void iopt_remove_access(struct io_pagetable *iopt,
struct iommufd_access *access, u32 iopt_access_list_id);
void iommufd_access_destroy_object(struct iommufd_object *obj);
+/* iommufd_access for internal use */
+struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
+#define iommufd_access_destroy_internal(ictx, access) \
+ iommufd_object_destroy_user(ictx, &(access)->obj)
+int iommufd_access_attach_internal(struct iommufd_access *access,
+ struct iommufd_ioas *ioas);
+#define iommufd_access_detach_internal(access) iommufd_access_detach(access)
+
struct iommufd_eventq {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
@@ -1084,7 +1084,39 @@ void iommufd_access_destroy_object(struct iommufd_object *obj)
if (access->ioas)
WARN_ON(iommufd_access_change_ioas(access, NULL));
mutex_unlock(&access->ioas_lock);
- iommufd_ctx_put(access->ictx);
+ if (access->ops)
+ iommufd_ctx_put(access->ictx);
+}
+
+static struct iommufd_access *__iommufd_access_create(struct iommufd_ctx *ictx)
+{
+ struct iommufd_access *access;
+
+ /*
+ * There is no uAPI for the access object, but to keep things symmetric
+ * use the object infrastructure anyhow.
+ */
+ access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
+ if (IS_ERR(access))
+ return access;
+
+ /* The calling driver is a user until iommufd_access_destroy() */
+ refcount_inc(&access->obj.users);
+ mutex_init(&access->ioas_lock);
+ return access;
+}
+
+struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx)
+{
+ struct iommufd_access *access;
+
+ access = __iommufd_access_create(ictx);
+ if (IS_ERR(access))
+ return access;
+ access->iova_alignment = PAGE_SIZE;
+
+ iommufd_object_finalize(ictx, &access->obj);
+ return access;
}
/**
@@ -1106,11 +1138,7 @@ iommufd_access_create(struct iommufd_ctx *ictx,
{
struct iommufd_access *access;
- /*
- * There is no uAPI for the access object, but to keep things symmetric
- * use the object infrastructure anyhow.
- */
- access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
+ access = __iommufd_access_create(ictx);
if (IS_ERR(access))
return access;
@@ -1122,13 +1150,10 @@ iommufd_access_create(struct iommufd_ctx *ictx,
else
access->iova_alignment = 1;
- /* The calling driver is a user until iommufd_access_destroy() */
- refcount_inc(&access->obj.users);
access->ictx = ictx;
iommufd_ctx_get(ictx);
iommufd_object_finalize(ictx, &access->obj);
*id = access->obj.id;
- mutex_init(&access->ioas_lock);
return access;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_create, "IOMMUFD");
@@ -1173,6 +1198,22 @@ int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, "IOMMUFD");
+int iommufd_access_attach_internal(struct iommufd_access *access,
+ struct iommufd_ioas *ioas)
+{
+ int rc;
+
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(access->ioas)) {
+ mutex_unlock(&access->ioas_lock);
+ return -EINVAL;
+ }
+
+ rc = iommufd_access_change_ioas(access, ioas);
+ mutex_unlock(&access->ioas_lock);
+ return rc;
+}
+
int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id)
{
int rc;
Now, access->ops can be NULL, to support an internal use case for the new HW queue object. Since an access object in this case will be allocated by an inernal iommufd object, the refcount on the ictx should be skipped, so as not to deadlock the release of the ictx as it would otherwise wait for the release of the access first during the release of the internal object that could wait for the release of ictx: ictx --releases--> hw_queue --releases--> access ^ | |_________________releases________________v Add a set of lightweight internal APIs to unlink access and ictx: ictx --releases--> hw_queue --releases--> access Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> --- drivers/iommu/iommufd/iommufd_private.h | 8 ++++ drivers/iommu/iommufd/device.c | 59 +++++++++++++++++++++---- 2 files changed, 58 insertions(+), 9 deletions(-)