@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -539,6 +540,38 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
unsigned long offset = pd_index * sizeof(*pd);
+ bool unmap = false;
+
+ /*
+ * XXX Move this outside of this function. Perhaps add a struct
+ * iommu_domain parameter to ->{get,put}_resv_regions() so that
+ * the mapping can be done there.
+ *
+ * The problem here is that as->smmu is only known once we attach
+ * the domain to a device (because then we look up the right SMMU
+ * instance via the dev->archdata.iommu pointer). When the direct
+ * mappings are created for reserved regions, the domain has not
+ * been attached to a device yet, so we don't know. We currently
+ * fix that up in ->apply_resv_regions() because that is the first
+ * time where we have access to a struct device that will be used
+ * with the IOMMU domain. However, that's asymmetric and doesn't
+ * take care of the page directory mapping either, so we need to
+ * come up with something better.
+ */
+ if (as->pd_dma == 0) {
+ as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, as->pd_dma))
+ return;
+
+ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ return;
+ }
+
+ unmap = true;
+ }
/* Set the page directory entry first */
pd[pd_index] = value;
@@ -551,6 +584,12 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush_ptc(smmu, as->pd_dma, offset);
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
+
+ if (unmap) {
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ as->pd_dma = 0;
+ }
}
static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
@@ -945,6 +984,40 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev)
return group->group;
}
+static void tegra_smmu_apply_resv_region(struct device *dev,
+ struct iommu_domain *domain,
+ struct iommu_resv_region *region)
+{
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+
+ /*
+ * ->attach_dev() may not have been called yet at this point, so the
+ * address space may not have been associated with an SMMU instance.
+ * Set up the association here to make sure subsequent code can rely
+ * on the SMMU instance being known.
+ *
+ * Also make sure that the SMMU instance doesn't conflict if an SMMU
+ * has been associated with the address space already. This can happen
+ * if a domain is shared between multiple devices.
+ *
+ * Note that this is purely theoretic because there are no known SoCs
+ * with multiple instances of this SMMU.
+ *
+ * XXX Deal with this elsewhere. One possibility would be to pass the
+ * struct iommu_domain that we're operating on to ->get_resv_regions()
+ * and ->put_resv_regions() so that the connection between it and the
+ * struct device (in order to find the SMMU instance) can already be
+ * established at that time. This would be nicely symmetric because a
+ * ->put_resv_regions() could undo that again so that ->attach_dev()
+ * could start from a clean slate.
+ */
+ if (as->smmu && as->smmu != smmu)
+ WARN(1, "conflicting SMMU instances\n");
+
+ as->smmu = smmu;
+}
+
static int tegra_smmu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -978,6 +1051,9 @@ static const struct iommu_ops tegra_smmu_ops = {
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
+ .get_resv_regions = of_iommu_get_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
+ .apply_resv_region = tegra_smmu_apply_resv_region,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
};