Allwinner A10 and A13 SoC have a version of the SS which produce
invalid IV in IVx register.

Instead of adding a variant for those, let's convert SS to produce IV
directly from data.
Fixes: 6298e948215f2 ("crypto: sunxi-ss - Add Allwinner Security System crypto 
accelerator")
Cc: <sta...@vger.kernel.org>
Signed-off-by: Corentin Labbe <cla...@baylibre.com>
---
 .../allwinner/sun4i-ss/sun4i-ss-cipher.c      | 34 +++++++++++++++----
 1 file changed, 28 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c 
b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 4dd736ee5a4d..53478c3feca6 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -20,6 +20,7 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct 
skcipher_request *areq)
        unsigned int ivsize = crypto_skcipher_ivsize(tfm);
        struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
        u32 mode = ctx->mode;
+       void *backup_iv = NULL;
        /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
        u32 rx_cnt = SS_RX_DEFAULT;
        u32 tx_cnt = 0;
@@ -42,6 +43,13 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct 
skcipher_request *areq)
                return -EINVAL;
        }
 
+       if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+               backup_iv = kzalloc(ivsize, GFP_KERNEL);
+               if (!backup_iv)
+                       return -ENOMEM;
+               scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - 
ivsize, ivsize, 0);
+       }
+
        spin_lock_irqsave(&ss->slock, flags);
 
        for (i = 0; i < op->keylen; i += 4)
@@ -102,9 +110,12 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct 
skcipher_request *areq)
        } while (oleft);
 
        if (areq->iv) {
-               for (i = 0; i < 4 && i < ivsize / 4; i++) {
-                       v = readl(ss->base + SS_IV0 + i * 4);
-                       *(u32 *)(areq->iv + i * 4) = v;
+               if (mode & SS_DECRYPTION) {
+                       memcpy(areq->iv, backup_iv, ivsize);
+                       kfree_sensitive(backup_iv);
+               } else {
+                       scatterwalk_map_and_copy(areq->iv, areq->dst, 
areq->cryptlen - ivsize,
+                                                ivsize, 0);
                }
        }
 
@@ -161,6 +172,7 @@ static int sun4i_ss_cipher_poll(struct skcipher_request 
*areq)
        unsigned int ileft = areq->cryptlen;
        unsigned int oleft = areq->cryptlen;
        unsigned int todo;
+       void *backup_iv = NULL;
        struct sg_mapping_iter mi, mo;
        unsigned int oi, oo;    /* offset for in and out */
        unsigned int ob = 0;    /* offset in buf */
@@ -202,6 +214,13 @@ static int sun4i_ss_cipher_poll(struct skcipher_request 
*areq)
        if (need_fallback)
                return sun4i_ss_cipher_poll_fallback(areq);
 
+       if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+               backup_iv = kzalloc(ivsize, GFP_KERNEL);
+               if (!backup_iv)
+                       return -ENOMEM;
+               scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - 
ivsize, ivsize, 0);
+       }
+
        spin_lock_irqsave(&ss->slock, flags);
 
        for (i = 0; i < op->keylen; i += 4)
@@ -322,9 +341,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request 
*areq)
                }
        }
        if (areq->iv) {
-               for (i = 0; i < 4 && i < ivsize / 4; i++) {
-                       v = readl(ss->base + SS_IV0 + i * 4);
-                       *(u32 *)(areq->iv + i * 4) = v;
+               if (mode & SS_DECRYPTION) {
+                       memcpy(areq->iv, backup_iv, ivsize);
+                       kfree_sensitive(backup_iv);
+               } else {
+                       scatterwalk_map_and_copy(areq->iv, areq->dst, 
areq->cryptlen - ivsize,
+                                                ivsize, 0);
                }
        }
 
-- 
2.26.2

Reply via email to