Add dma_src, dma_dst and dma_len to struct dma_async_tx_descriptor
for storing DMA mapping data and convert core DMA engine code
(dma_async_memcpy_buf_to_buf(), dma_async_memcpy_buf_to_pg() and
dma_async_memcpy_pg_to_pg()) to do DMA unmapping itself using
the ->callback functionality.

Cc: Vinod Koul <vinod.k...@intel.com>
Cc: Tomasz Figa <t.f...@samsung.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnier...@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.p...@samsung.com>
---
 drivers/dma/dmaengine.c   | 62 +++++++++++++++++++++++++++++++++++++++++------
 include/linux/dmaengine.h |  6 +++++
 2 files changed, 60 insertions(+), 8 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a815d44..1b9c02a 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -853,6 +853,15 @@ void dma_async_device_unregister(struct dma_device *device)
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
 
+static void dma_async_memcpy_buf_to_buf_cb(void *dma_async_param)
+{
+       struct dma_async_tx_descriptor *tx = dma_async_param;
+       struct dma_device *dev = tx->chan->device;
+
+       dma_unmap_single(dev->dev, tx->dma_src, tx->dma_len, DMA_TO_DEVICE);
+       dma_unmap_single(dev->dev, tx->dma_dst, tx->dma_len, DMA_FROM_DEVICE);
+}
+
 /**
  * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
  * @chan: DMA channel to offload copy to
@@ -877,9 +886,8 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void 
*dest,
 
        dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
        dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
-       flags = DMA_CTRL_ACK |
-               DMA_COMPL_SRC_UNMAP_SINGLE |
-               DMA_COMPL_DEST_UNMAP_SINGLE;
+       flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP |
+               DMA_COMPL_SKIP_DEST_UNMAP;
        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
        if (!tx) {
@@ -888,7 +896,13 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void 
*dest,
                return -ENOMEM;
        }
 
-       tx->callback = NULL;
+       tx->dma_src = dma_src;
+       tx->dma_dst = dma_dest;
+       tx->dma_len = len;
+
+       tx->callback = dma_async_memcpy_buf_to_buf_cb;
+       tx->callback_param = tx;
+
        cookie = tx->tx_submit(tx);
 
        preempt_disable();
@@ -900,6 +914,15 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void 
*dest,
 }
 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 
+static void dma_async_memcpy_buf_to_pg_cb(void *dma_async_param)
+{
+       struct dma_async_tx_descriptor *tx = dma_async_param;
+       struct dma_device *dev = tx->chan->device;
+
+       dma_unmap_single(dev->dev, tx->dma_src, tx->dma_len, DMA_TO_DEVICE);
+       dma_unmap_page(dev->dev, tx->dma_dst, tx->dma_len, DMA_FROM_DEVICE);
+}
+
 /**
  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
  * @chan: DMA channel to offload copy to
@@ -925,7 +948,8 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct 
page *page,
 
        dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
        dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
-       flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
+       flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP |
+               DMA_COMPL_SKIP_DEST_UNMAP;
        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
        if (!tx) {
@@ -934,7 +958,13 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct 
page *page,
                return -ENOMEM;
        }
 
-       tx->callback = NULL;
+       tx->dma_src = dma_src;
+       tx->dma_dst = dma_dest;
+       tx->dma_len = len;
+
+       tx->callback = dma_async_memcpy_buf_to_pg_cb;
+       tx->callback_param = tx;
+
        cookie = tx->tx_submit(tx);
 
        preempt_disable();
@@ -946,6 +976,15 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct 
page *page,
 }
 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
 
+static void dma_async_memcpy_pg_to_pg_cb(void *dma_async_param)
+{
+       struct dma_async_tx_descriptor *tx = dma_async_param;
+       struct dma_device *dev = tx->chan->device;
+
+       dma_unmap_page(dev->dev, tx->dma_src, tx->dma_len, DMA_TO_DEVICE);
+       dma_unmap_page(dev->dev, tx->dma_dst, tx->dma_len, DMA_FROM_DEVICE);
+}
+
 /**
  * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
  * @chan: DMA channel to offload copy to
@@ -974,7 +1013,8 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct 
page *dest_pg,
        dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
        dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
                                DMA_FROM_DEVICE);
-       flags = DMA_CTRL_ACK;
+       flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP |
+               DMA_COMPL_SKIP_DEST_UNMAP;
        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
        if (!tx) {
@@ -983,7 +1023,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct 
page *dest_pg,
                return -ENOMEM;
        }
 
-       tx->callback = NULL;
+       tx->dma_src = dma_src;
+       tx->dma_dst = dma_dest;
+       tx->dma_len = len;
+
+       tx->callback = dma_async_memcpy_pg_to_pg_cb;
+       tx->callback_param = tx;
+
        cookie = tx->tx_submit(tx);
 
        preempt_disable();
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d3201e4..8741d57 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -402,6 +402,9 @@ typedef void (*dma_async_tx_callback)(void 
*dma_async_param);
  * @phys: physical address of the descriptor
  * @chan: target channel for this operation
  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
+ * @dma_src: DMA source address (needed for DMA unmap)
+ * @dma_dst: DMA destination address (needed for DMA unmap)
+ * @dma_len: DMA length (needed for DMA unmap)
  * @callback: routine to call after this operation is complete
  * @callback_param: general parameter to pass to the callback routine
  * ---async_tx api specific fields---
@@ -415,6 +418,9 @@ struct dma_async_tx_descriptor {
        dma_addr_t phys;
        struct dma_chan *chan;
        dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+       dma_addr_t dma_src;
+       dma_addr_t dma_dst;
+       size_t dma_len;
        dma_async_tx_callback callback;
        void *callback_param;
 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
-- 
1.8.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to