Perform the dmaengine prep_slave_sg call within buf_prepare and
mark the descriptor as reusable in order to avoid having to redo
this at every start of the dma.
This also allow to remove the mutex used by the driver to protect
dma descriptors related piece of codes.

Signed-off-by: Alain Volmat <[email protected]>
---
 drivers/media/platform/st/stm32/stm32-dcmi.c | 78 ++++++++++++++++------------
 1 file changed, 45 insertions(+), 33 deletions(-)

diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c 
b/drivers/media/platform/st/stm32/stm32-dcmi.c
index 27b283474096..d8ef06bd7506 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -113,6 +113,7 @@ struct dcmi_buf {
        struct vb2_v4l2_buffer  vb;
        bool                    prepared;
        struct sg_table         sgt;
+       struct dma_async_tx_descriptor *dma_desc;
        size_t                  size;
        struct list_head        list;
 };
@@ -163,9 +164,6 @@ struct stm32_dcmi {
        int                             overrun_count;
        int                             buffers_count;
 
-       /* Ensure DMA operations atomicity */
-       struct mutex                    dma_lock;
-
        struct media_device             mdev;
        struct media_pad                vid_cap_pad;
        struct media_pipeline           pipeline;
@@ -300,39 +298,13 @@ static void dcmi_dma_callback(void *param)
 static int dcmi_start_dma(struct stm32_dcmi *dcmi,
                          struct dcmi_buf *buf)
 {
-       struct dma_async_tx_descriptor *desc = NULL;
-
-       /*
-        * Avoid call of dmaengine_terminate_sync() between
-        * dmaengine_prep_slave_single() and dmaengine_submit()
-        * by locking the whole DMA submission sequence
-        */
-       mutex_lock(&dcmi->dma_lock);
-
-       /* Prepare a DMA transaction */
-       desc = dmaengine_prep_slave_sg(dcmi->dma_chan, buf->sgt.sgl, 
buf->sgt.nents,
-                                      DMA_DEV_TO_MEM,
-                                      DMA_PREP_INTERRUPT);
-       if (!desc) {
-               dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_sg failed\n", 
__func__);
-               mutex_unlock(&dcmi->dma_lock);
-               return -EINVAL;
-       }
-
-       /* Set completion callback routine for notification */
-       desc->callback = dcmi_dma_callback;
-       desc->callback_param = dcmi;
-
        /* Push current DMA transaction in the pending queue */
-       dcmi->dma_cookie = dmaengine_submit(desc);
+       dcmi->dma_cookie = dmaengine_submit(buf->dma_desc);
        if (dma_submit_error(dcmi->dma_cookie)) {
                dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
-               mutex_unlock(&dcmi->dma_lock);
                return -ENXIO;
        }
 
-       mutex_unlock(&dcmi->dma_lock);
-
        dma_async_issue_pending(dcmi->dma_chan);
 
        return 0;
@@ -547,6 +519,31 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb)
                        size -= bytes;
                }
 
+               /* Prepare a DMA transaction */
+               buf->dma_desc = dmaengine_prep_slave_sg(dcmi->dma_chan,
+                                                       buf->sgt.sgl,
+                                                       buf->sgt.nents,
+                                                       DMA_DEV_TO_MEM,
+                                                       DMA_PREP_INTERRUPT);
+               if (!buf->dma_desc) {
+                       dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_sg 
failed\n", __func__);
+                       sg_free_table(&buf->sgt);
+                       return -EIO;
+               }
+
+               /* Set completion callback routine for notification */
+               buf->dma_desc->callback = dcmi_dma_callback;
+               buf->dma_desc->callback_param = dcmi;
+
+               /* Mark the descriptor as reusable to avoid having to prepare 
it */
+               ret = dmaengine_desc_set_reuse(buf->dma_desc);
+               if (ret) {
+                       dev_err(dcmi->dev, "%s: DMA dmaengine_desc_set_reuse 
failed\n", __func__);
+                       dmaengine_desc_free(buf->dma_desc);
+                       sg_free_table(&buf->sgt);
+                       return -EIO;
+               }
+
                buf->prepared = true;
 
                vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
@@ -555,6 +552,23 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb)
        return 0;
 }
 
+static void dcmi_buf_cleanup(struct vb2_buffer *vb)
+{
+       struct stm32_dcmi *dcmi =  vb2_get_drv_priv(vb->vb2_queue);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+       int ret;
+
+       if (!buf->prepared)
+               return;
+
+       ret = dmaengine_desc_free(buf->dma_desc);
+       if (ret)
+               dev_err(dcmi->dev, "%s: Failed to free the mdma descriptor 
(0x%x)\n",
+                       __func__, ret);
+       sg_free_table(&buf->sgt);
+}
+
 static void dcmi_buf_queue(struct vb2_buffer *vb)
 {
        struct stm32_dcmi *dcmi =  vb2_get_drv_priv(vb->vb2_queue);
@@ -859,9 +873,7 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
        spin_unlock_irq(&dcmi->irqlock);
 
        /* Stop all pending DMA operations */
-       mutex_lock(&dcmi->dma_lock);
        dmaengine_terminate_sync(dcmi->dma_chan);
-       mutex_unlock(&dcmi->dma_lock);
 
        pm_runtime_put(dcmi->dev);
 
@@ -878,6 +890,7 @@ static const struct vb2_ops dcmi_video_qops = {
        .queue_setup            = dcmi_queue_setup,
        .buf_init               = dcmi_buf_init,
        .buf_prepare            = dcmi_buf_prepare,
+       .buf_cleanup            = dcmi_buf_cleanup,
        .buf_queue              = dcmi_buf_queue,
        .start_streaming        = dcmi_start_streaming,
        .stop_streaming         = dcmi_stop_streaming,
@@ -1953,7 +1966,6 @@ static int dcmi_probe(struct platform_device *pdev)
 
        spin_lock_init(&dcmi->irqlock);
        mutex_init(&dcmi->lock);
-       mutex_init(&dcmi->dma_lock);
        init_completion(&dcmi->complete);
        INIT_LIST_HEAD(&dcmi->buffers);
 

-- 
2.34.1

Reply via email to