[PATCH 4.15 13/55] crypto: inside-secure - avoid unmapping DMA memory that was not mapped

2018-02-02 Thread Greg Kroah-Hartman
4.15-stable review patch.  If anyone has any objections, please let me know.

--

From: Antoine Tenart 

commit c957f8b3e2e54b29f53ef69decc87bbc858c9b58 upstream.

This patch adds a parameter in the SafeXcel ahash request structure to
keep track of the number of SG entries mapped. This allows not to call
dma_unmap_sg() when dma_map_sg() wasn't called in the first place. This
also removes a warning when the debugging of the DMA-API is enabled in
the kernel configuration: "DMA-API: device driver tries to free DMA
memory it has not allocated".

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Antoine Tenart 
Signed-off-by: Herbert Xu 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/crypto/inside-secure/safexcel_hash.c |   20 
 1 file changed, 12 insertions(+), 8 deletions(-)

--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -34,6 +34,8 @@ struct safexcel_ahash_req {
bool hmac;
bool needs_inv;
 
+   int nents;
+
u8 state_sz;/* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
@@ -152,8 +154,10 @@ static int safexcel_handle_req_result(st
memcpy(areq->result, sreq->state,
   crypto_ahash_digestsize(ahash));
 
-   dma_unmap_sg(priv->dev, areq->src,
-sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+   if (sreq->nents) {
+   dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+   sreq->nents = 0;
+   }
 
safexcel_free_context(priv, async, sreq->state_sz);
 
@@ -178,7 +182,7 @@ static int safexcel_ahash_send_req(struc
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
-   int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+   int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
 
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -248,15 +252,15 @@ static int safexcel_ahash_send_req(struc
}
 
/* Now handle the current ahash request buffer(s) */
-   nents = dma_map_sg(priv->dev, areq->src,
-  sg_nents_for_len(areq->src, areq->nbytes),
-  DMA_TO_DEVICE);
-   if (!nents) {
+   req->nents = dma_map_sg(priv->dev, areq->src,
+   sg_nents_for_len(areq->src, areq->nbytes),
+   DMA_TO_DEVICE);
+   if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
 
-   for_each_sg(areq->src, sg, nents, i) {
+   for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
 
/* Do not overflow the request */




[PATCH 4.15 13/55] crypto: inside-secure - avoid unmapping DMA memory that was not mapped

2018-02-02 Thread Greg Kroah-Hartman
4.15-stable review patch.  If anyone has any objections, please let me know.

--

From: Antoine Tenart 

commit c957f8b3e2e54b29f53ef69decc87bbc858c9b58 upstream.

This patch adds a parameter in the SafeXcel ahash request structure to
keep track of the number of SG entries mapped. This allows not to call
dma_unmap_sg() when dma_map_sg() wasn't called in the first place. This
also removes a warning when the debugging of the DMA-API is enabled in
the kernel configuration: "DMA-API: device driver tries to free DMA
memory it has not allocated".

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Antoine Tenart 
Signed-off-by: Herbert Xu 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/crypto/inside-secure/safexcel_hash.c |   20 
 1 file changed, 12 insertions(+), 8 deletions(-)

--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -34,6 +34,8 @@ struct safexcel_ahash_req {
bool hmac;
bool needs_inv;
 
+   int nents;
+
u8 state_sz;/* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
@@ -152,8 +154,10 @@ static int safexcel_handle_req_result(st
memcpy(areq->result, sreq->state,
   crypto_ahash_digestsize(ahash));
 
-   dma_unmap_sg(priv->dev, areq->src,
-sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+   if (sreq->nents) {
+   dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+   sreq->nents = 0;
+   }
 
safexcel_free_context(priv, async, sreq->state_sz);
 
@@ -178,7 +182,7 @@ static int safexcel_ahash_send_req(struc
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
-   int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+   int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
 
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -248,15 +252,15 @@ static int safexcel_ahash_send_req(struc
}
 
/* Now handle the current ahash request buffer(s) */
-   nents = dma_map_sg(priv->dev, areq->src,
-  sg_nents_for_len(areq->src, areq->nbytes),
-  DMA_TO_DEVICE);
-   if (!nents) {
+   req->nents = dma_map_sg(priv->dev, areq->src,
+   sg_nents_for_len(areq->src, areq->nbytes),
+   DMA_TO_DEVICE);
+   if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
 
-   for_each_sg(areq->src, sg, nents, i) {
+   for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
 
/* Do not overflow the request */