With cyclic mode, the shared virt-dma logic doesn't actually
manage the descriptor state, nor the calling of the descriptor
free callback. This results in leaking a desc structure every
time we start an audio transfer.

Thus we must manage it ourselves. The k3dma driver already keeps
track of the active and finished descriptors via ds_run and ds_done
pointers, so when we tear down everything in terminate_all, call
free_desc on the ds_run and ds_done pointers if they are not null.

NOTE: HiKey doesn't use the non-cyclic dma modes, so I'm not been
able to test those modes. But with this patch we no longer leak
the desc structures.

Cc: Zhangfei Gao <zhangfei....@linaro.org>
Cc: Jingoo Han <jg1....@samsung.com>
Cc: Krzysztof Kozlowski <k.kozlow...@samsung.com>
Cc: Maxime Ripard <maxime.rip...@free-electrons.com>
Cc: Vinod Koul <vinod.k...@intel.com>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Mark Brown <broo...@kernel.org>
Cc: Andy Green <a...@warmcat.com>
Signed-off-by: John Stultz <john.stu...@linaro.org>
---
 drivers/dma/k3dma.c | 34 ++++++++++++++++++++++------------
 1 file changed, 22 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 8e4c845..950ed36 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -219,6 +219,7 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
                                spin_lock_irqsave(&c->vc.lock, flags);
                                vchan_cookie_complete(&p->ds_run->vd);
                                p->ds_done = p->ds_run;
+                               p->ds_run = NULL;
                                spin_unlock_irqrestore(&c->vc.lock, flags);
                        }
                        if (c && (tc2 & BIT(i))) {
@@ -266,14 +267,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
                 * so vc->desc_issued only contains desc pending
                 */
                list_del(&ds->vd.node);
+
+               WARN_ON_ONCE(c->phy->ds_run);
+               WARN_ON_ONCE(c->phy->ds_done);
                c->phy->ds_run = ds;
-               c->phy->ds_done = NULL;
                /* start dma */
                k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
                return 0;
        }
-       c->phy->ds_done = NULL;
-       c->phy->ds_run = NULL;
        return -EAGAIN;
 }
 
@@ -659,6 +660,15 @@ static int k3_dma_config(struct dma_chan *chan,
        return 0;
 }
 
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+       struct k3_dma_desc_sw *ds =
+               container_of(vd, struct k3_dma_desc_sw, vd);
+
+       kfree(ds);
+}
+
+
 static int k3_dma_terminate_all(struct dma_chan *chan)
 {
        struct k3_dma_chan *c = to_k3_chan(chan);
@@ -682,7 +692,15 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
                k3_dma_terminate_chan(p, d);
                c->phy = NULL;
                p->vchan = NULL;
-               p->ds_run = p->ds_done = NULL;
+               if (p->ds_run) {
+                       k3_dma_free_desc(&p->ds_run->vd);
+                       p->ds_run = NULL;
+               }
+               if (p->ds_done) {
+                       k3_dma_free_desc(&p->ds_done->vd);
+                       p->ds_done = NULL;
+               }
+
        }
        spin_unlock_irqrestore(&c->vc.lock, flags);
        vchan_dma_desc_free_list(&c->vc, &head);
@@ -735,14 +753,6 @@ static int k3_dma_transfer_resume(struct dma_chan *chan)
        return 0;
 }
 
-static void k3_dma_free_desc(struct virt_dma_desc *vd)
-{
-       struct k3_dma_desc_sw *ds =
-               container_of(vd, struct k3_dma_desc_sw, vd);
-
-       kfree(ds);
-}
-
 static const struct of_device_id k3_pdma_dt_ids[] = {
        { .compatible = "hisilicon,k3-dma-1.0", },
        {}
-- 
1.9.1

Reply via email to