@@ -164,6 +164,9 @@ struct stm32_dcmi {
int errors_count;
int overrun_count;
int buffers_count;
+
+ /* Ensure DMA operations atomicity */
+ struct mutex dma_lock;
};
static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
@@ -314,6 +317,13 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
return ret;
}
+ /*
+ * Avoid call of dmaengine_terminate_all() between
+ * dmaengine_prep_slave_single() and dmaengine_submit()
+ * by locking the whole DMA submission sequence
+ */
+ mutex_lock(&dcmi->dma_lock);
+
/* Prepare a DMA transaction */
desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
buf->size,
@@ -322,6 +332,7 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
if (!desc) {
dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
__func__, &buf->paddr, buf->size);
+ mutex_unlock(&dcmi->dma_lock);
return -EINVAL;
}
@@ -333,9 +344,12 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
dcmi->dma_cookie = dmaengine_submit(desc);
if (dma_submit_error(dcmi->dma_cookie)) {
dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
+ mutex_unlock(&dcmi->dma_lock);
return -ENXIO;
}
+ mutex_unlock(&dcmi->dma_lock);
+
dma_async_issue_pending(dcmi->dma_chan);
return 0;
@@ -717,7 +731,9 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
spin_unlock_irq(&dcmi->irqlock);
/* Stop all pending DMA operations */
+ mutex_lock(&dcmi->dma_lock);
dmaengine_terminate_all(dcmi->dma_chan);
+ mutex_unlock(&dcmi->dma_lock);
pm_runtime_put(dcmi->dev);
@@ -1719,6 +1735,7 @@ static int dcmi_probe(struct platform_device *pdev)
spin_lock_init(&dcmi->irqlock);
mutex_init(&dcmi->lock);
+ mutex_init(&dcmi->dma_lock);
init_completion(&dcmi->complete);
INIT_LIST_HEAD(&dcmi->buffers);