Convert core async_tx code (async_syndrome_val()) to do
DMA unmapping itself using the ->callback functionality.

Cc: Dan Williams <d...@fb.com>
Cc: Tomasz Figa <t.f...@samsung.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnier...@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.p...@samsung.com>
---
 crypto/async_tx/async_pq.c | 40 +++++++++++++++++++++++++++++++++++-----
 1 file changed, 35 insertions(+), 5 deletions(-)

diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 2848fe8..9e5500e 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -292,6 +292,26 @@ pq_val_chan(struct async_submit_ctl *submit, struct page 
**blocks, int disks, si
                                     disks, len);
 }
 
+static void async_syndrome_val_cb(void *dma_async_param)
+{
+       struct dma_async_tx_descriptor *tx = dma_async_param;
+       struct dma_device *dev = tx->chan->device;
+       int i;
+
+       for (i = tx->dma_src_cnt; i < tx->dma_src_cnt + 2; i++) {
+               if (tx->dma_src[i])
+                       dma_unmap_page(dev->dev, tx->dma_src[i], tx->dma_len,
+                                      DMA_TO_DEVICE);
+       }
+
+       for (i = 0; i < tx->dma_src_cnt; i++)
+               dma_unmap_page(dev->dev, tx->dma_src[i], tx->dma_len,
+                              DMA_TO_DEVICE);
+
+       if (tx->orig_callback)
+               tx->orig_callback(tx->orig_callback_param);
+}
+
 /**
  * async_syndrome_val - asynchronously validate a raid6 syndrome
  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -335,15 +355,17 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 
                pr_debug("%s: (async) disks: %d len: %zu\n",
                         __func__, disks, len);
-               if (!P(blocks, disks))
+               if (!P(blocks, disks)) {
+                       pq[0] = 0;
                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
-               else
+               } else
                        pq[0] = dma_map_page(dev, P(blocks, disks),
                                             offset, len,
                                             DMA_TO_DEVICE);
-               if (!Q(blocks, disks))
+               if (!Q(blocks, disks)) {
+                       pq[1] = 0;
                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
-               else
+               } else
                        pq[1] = dma_map_page(dev, Q(blocks, disks),
                                             offset, len,
                                             DMA_TO_DEVICE);
@@ -370,7 +392,15 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
                        async_tx_quiesce(&submit->depend_tx);
                        dma_async_issue_pending(chan);
                }
-               async_tx_submit(chan, tx, submit);
+
+               tx->dma_src[src_cnt] = pq[0];
+               tx->dma_src[src_cnt + 1] = pq[1];
+               for (i = 0; i < src_cnt; i++)
+                       tx->dma_src[i] = dma_src[i];
+               tx->dma_src_cnt = src_cnt;
+               tx->dma_len = len;
+
+               __async_tx_submit(chan, tx, async_syndrome_val_cb, tx, submit);
 
                return tx;
        } else {
-- 
1.8.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to