@@ -369,8 +369,19 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
}
static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
- struct mtk_iommu_data *data)
+ struct mtk_iommu_data *data,
+ unsigned int domid)
{
+ const struct mtk_iommu_iova_region *region;
+
+ /* Use the exist domain as there is only one pgtable here. */
+ if (data->m4u_dom) {
+ dom->iop = data->m4u_dom->iop;
+ dom->cfg = data->m4u_dom->cfg;
+ dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap;
+ goto update_iova_region;
+ }
+
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
@@ -394,8 +405,11 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
- dom->domain.geometry.aperture_start = 0;
- dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+update_iova_region:
+ /* Update the iova region for this domain */
+ region = data->plat_data->iova_region + domid;
+ dom->domain.geometry.aperture_start = region->iova_base;
+ dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
dom->domain.geometry.force_aperture = true;
return 0;
}
@@ -441,7 +455,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
return domid;
if (!dom->data) {
- if (mtk_iommu_domain_finalise(dom, data))
+ if (mtk_iommu_domain_finalise(dom, data, domid))
return -ENODEV;
dom->data = data;
}
@@ -569,6 +583,7 @@ static void mtk_iommu_release_device(struct device *dev)
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ struct iommu_group *group;
int domid;
if (!data)
@@ -578,15 +593,15 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
if (domid < 0)
return ERR_PTR(domid);
- /* All the client devices are in the same m4u iommu-group */
- if (!data->m4u_group) {
- data->m4u_group = iommu_group_alloc();
- if (IS_ERR(data->m4u_group))
- dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ group = data->m4u_group[domid];
+ if (!group) {
+ group = iommu_group_alloc();
+ if (!IS_ERR(group))
+ data->m4u_group[domid] = group;
} else {
- iommu_group_ref_get(data->m4u_group);
+ iommu_group_ref_get(group);
}
- return data->m4u_group;
+ return group;
}
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
@@ -22,6 +22,8 @@
#define MTK_LARB_COM_MAX 8
#define MTK_LARB_SUBCOM_MAX 4
+#define MTK_IOMMU_GROUP_MAX 8
+
struct mtk_iommu_suspend_reg {
union {
u32 standard_axi_mode;/* v1 */
@@ -67,7 +69,7 @@ struct mtk_iommu_data {
phys_addr_t protect_base; /* protect memory base */
struct mtk_iommu_suspend_reg reg;
struct mtk_iommu_domain *m4u_dom;
- struct iommu_group *m4u_group;
+ struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX];
bool enable_4GB;
spinlock_t tlb_lock; /* lock for tlb range flush */
Some HW IP(ex: CCU) require the special iova range. That means the iova got from dma_alloc_attrs for that devices must locate in his special range. In this patch, we prepare a iommu group(domain) for each a iova range requirement. Meanwhile we still use one pagetable which support 16GB iova. After this patch, If the iova range of a master is over 4G, the master should: a) Declare its special dma-ranges in its dtsi node. For example, If we preassign the iova 4G-8G for vcodec, then the vcodec dtsi node should add this: /* * iova start at 0x1_0000_0000, pa still start at 0x4000_0000 * size is 0x1_0000_0000. */ dma-ranges = <0x1 0x0 0x0 0x40000000 0x1 0x0>; /* 4G ~ 8G */ Note: we don't have a actual bus concept here. the master doesn't have its special parent node, thus this dma-ranges can only be put in the master's node. b) Update the dma_mask: dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33)); Signed-off-by: Yong Wu <yong.wu@mediatek.com> --- drivers/iommu/mtk_iommu.c | 37 ++++++++++++++++++++++++++----------- drivers/iommu/mtk_iommu.h | 4 +++- 2 files changed, 29 insertions(+), 12 deletions(-)