@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -471,6 +472,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
tegra_smmu_free_asid(smmu, as->id);
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ as->pd_dma = 0;
as->smmu = NULL;
@@ -534,6 +536,38 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
unsigned long offset = pd_index * sizeof(*pd);
+ bool unmap = false;
+
+ /*
+ * XXX Move this outside of this function. Perhaps add a struct
+ * iommu_domain parameter to ->{get,put}_resv_regions() so that
+ * the mapping can be done there.
+ *
+ * The problem here is that as->smmu is only known once we attach
+ * the domain to a device (because then we look up the right SMMU
+ * instance via the dev->archdata.iommu pointer). When the direct
+ * mappings are created for reserved regions, the domain has not
+ * been attached to a device yet, so we don't know. We currently
+ * fix that up in ->apply_resv_regions() because that is the first
+ * time where we have access to a struct device that will be used
+ * with the IOMMU domain. However, that's asymmetric and doesn't
+ * take care of the page directory mapping either, so we need to
+ * come up with something better.
+ */
+ if (WARN_ON_ONCE(as->pd_dma == 0)) {
+ as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, as->pd_dma))
+ return;
+
+ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ return;
+ }
+
+ unmap = true;
+ }
/* Set the page directory entry first */
pd[pd_index] = value;
@@ -546,6 +580,12 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush_ptc(smmu, as->pd_dma, offset);
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
+
+ if (unmap) {
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ as->pd_dma = 0;
+ }
}
static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
@@ -846,7 +886,6 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
smmu = tegra_smmu_find(args.np);
if (smmu) {
err = tegra_smmu_configure(smmu, dev, &args);
-
if (err < 0) {
of_node_put(args.np);
return ERR_PTR(err);
@@ -864,6 +903,13 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
return &smmu->iommu;
}
+static void tegra_smmu_release_device(struct device *dev)
+{
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
+
+ put_device(smmu->dev);
+}
+
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
{
@@ -964,7 +1010,9 @@ static int tegra_smmu_of_xlate(struct device *dev,
static const struct iommu_ops tegra_smmu_ops = {
.domain_alloc = tegra_smmu_domain_alloc,
.probe_device = tegra_smmu_probe_device,
+ .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
+ .get_resv_regions = of_iommu_get_resv_regions,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
.default_domain_ops = &(const struct iommu_domain_ops) {