DMA_TO_DEVICE synchronisation must be done after the last modification
of the memory region by the software and before it is handed off to
the device.

Signed-off-by: Hui Tang <tanghu...@huawei.com>
---
 drivers/crypto/qat/qat_common/qat_algs.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/drivers/crypto/qat/qat_common/qat_algs.c 
b/drivers/crypto/qat/qat_common/qat_algs.c
index ff78c73..e88c534 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -731,10 +731,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
        if (unlikely(!bufl))
                return -ENOMEM;
 
-       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, blp)))
-               goto err_in;
-
        for_each_sg(sgl, sg, n, i) {
                int y = sg_nctr;
 
@@ -750,6 +746,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
                sg_nctr++;
        }
        bufl->num_bufs = sg_nctr;
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+               goto err_in;
        qat_req->buf.bl = bufl;
        qat_req->buf.blp = blp;
        qat_req->buf.sz = sz;
@@ -764,9 +763,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
                                       dev_to_node(&GET_DEV(inst->accel_dev)));
                if (unlikely(!buflout))
                        goto err_in;
-               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, bloutp)))
-                       goto err_out;
                bufers = buflout->bufers;
                for_each_sg(sglout, sg, n, i) {
                        int y = sg_nctr;
@@ -784,6 +780,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
                }
                buflout->num_bufs = sg_nctr;
                buflout->num_mapped_bufs = sg_nctr;
+               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+                       goto err_out;
                qat_req->buf.blout = buflout;
                qat_req->buf.bloutp = bloutp;
                qat_req->buf.sz_out = sz_out;
@@ -795,17 +794,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
        return 0;
 
 err_out:
+       if (!dma_mapping_error(dev, bloutp))
+               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
        n = sg_nents(sglout);
        for (i = 0; i < n; i++)
                if (!dma_mapping_error(dev, buflout->bufers[i].addr))
                        dma_unmap_single(dev, buflout->bufers[i].addr,
                                         buflout->bufers[i].len,
                                         DMA_BIDIRECTIONAL);
-       if (!dma_mapping_error(dev, bloutp))
-               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
        kfree(buflout);
 
 err_in:
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
        n = sg_nents(sgl);
        for (i = 0; i < n; i++)
                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -813,8 +816,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance 
*inst,
                                         bufl->bufers[i].len,
                                         DMA_BIDIRECTIONAL);
 
-       if (!dma_mapping_error(dev, blp))
-               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
        kfree(bufl);
 
        dev_err(dev, "Failed to map buf for dma\n");
-- 
2.8.1

Reply via email to