@@ -1161,7 +1161,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
ret = iommu_map(mapping->domain, iova, phys, len, 0);
@@ -1254,7 +1254,7 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
@@ -1274,7 +1274,7 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
}
@@ -1293,7 +1293,7 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1305,7 +1305,7 @@ static dma_addr_t __arm_iommu_map_page_at(struct device *dev, struct page *page,
dma_addr_t dma_addr;
int ret, len = PAGE_ALIGN(size + offset);
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_cpu_to_dev(page, offset, size, dir);
dma_addr = __alloc_iova_at(mapping, req, len);
@@ -1349,7 +1349,7 @@ dma_addr_t arm_iommu_map_page_at(struct device *dev, struct page *page,
unsigned int phys;
int ret;
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_cpu_to_dev(page, offset, size, dir);
/* Check if iova area is reserved in advance. */
@@ -1386,7 +1386,7 @@ static void __arm_iommu_unmap_page_at(struct device *dev, dma_addr_t handle,
if (!iova)
return;
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len);
@@ -1430,7 +1430,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
if (!iova)
return;
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && (dir != DMA_NONE))
__dma_page_dev_to_cpu(page, offset, size, dir);
}
@@ -56,7 +56,7 @@ static void map_iovmm_area(struct nvmap_handle *h)
BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
iova = dma_map_page_at(to_iovmm_dev(h), h->pgalloc.pages[i],
- va, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ va, 0, PAGE_SIZE, DMA_NONE);
BUG_ON(iova != va);
}
h->pgalloc.dirty = false;
@@ -504,7 +504,7 @@ void nvmap_free_vm(struct device *dev, struct tegra_iovmm_area *area)
dma_addr_t iova;
iova = area->iovm_start + i * PAGE_SIZE;
- dma_unmap_page(dev, iova, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, iova, PAGE_SIZE, DMA_NONE);
}
kfree(area);
}
@@ -55,9 +55,19 @@ struct dma_map_ops {
static inline int valid_dma_direction(int dma_direction)
{
- return ((dma_direction == DMA_BIDIRECTIONAL) ||
- (dma_direction == DMA_TO_DEVICE) ||
- (dma_direction == DMA_FROM_DEVICE));
+ int ret = 1;
+
+ switch (dma_direction) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ break;
+ default:
+ ret = !!ret;
+ break;
+ }
+ return ret;
}
static inline int is_device_dma_capable(struct device *dev)