The rfc3686 implementation in chelsio is pretty much the same
as the generic rfc3686 wrapper.  So it can simply be removed to
reduce complexity.

Signed-off-by: Herbert Xu <[email protected]>
---

 drivers/crypto/chelsio/chcr_algo.c |  109 +------------------------------------
 1 file changed, 4 insertions(+), 105 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 13b908ea48738..8374be72454db 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -856,9 +856,7 @@ static struct sk_buff *create_cipher_wr(struct 
cipher_wr_param *wrparam)
        chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
        if ((reqctx->op == CHCR_DECRYPT_OP) &&
            (!(get_cryptoalg_subtype(tfm) ==
-              CRYPTO_ALG_SUB_TYPE_CTR)) &&
-           (!(get_cryptoalg_subtype(tfm) ==
-              CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
+              CRYPTO_ALG_SUB_TYPE_CTR))) {
                generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
        } else {
                if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
@@ -988,42 +986,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher 
*cipher,
        return err;
 }
 
-static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
-                                  const u8 *key,
-                                  unsigned int keylen)
-{
-       struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
-       unsigned int ck_size, context_size;
-       u16 alignment = 0;
-       int err;
-
-       if (keylen < CTR_RFC3686_NONCE_SIZE)
-               return -EINVAL;
-       memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
-              CTR_RFC3686_NONCE_SIZE);
-
-       keylen -= CTR_RFC3686_NONCE_SIZE;
-       err = chcr_cipher_fallback_setkey(cipher, key, keylen);
-       if (err)
-               goto badkey_err;
-
-       ck_size = chcr_keyctx_ck_size(keylen);
-       alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
-       memcpy(ablkctx->key, key, keylen);
-       ablkctx->enckey_len = keylen;
-       context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
-                       keylen + alignment) >> 4;
-
-       ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
-                                               0, 0, context_size);
-       ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
-
-       return 0;
-badkey_err:
-       ablkctx->enckey_len = 0;
-
-       return err;
-}
 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
 {
        unsigned int size = AES_BLOCK_SIZE;
@@ -1107,10 +1069,6 @@ static int chcr_update_cipher_iv(struct skcipher_request 
*req,
        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
                ctr_add_iv(iv, req->iv, (reqctx->processed /
                           AES_BLOCK_SIZE));
-       else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
-               *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
-                       CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
-                                               AES_BLOCK_SIZE) + 1);
        else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
                ret = chcr_update_tweak(req, iv, 0);
        else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
@@ -1125,11 +1083,6 @@ static int chcr_update_cipher_iv(struct skcipher_request 
*req,
 
 }
 
-/* We need separate function for final iv because in rfc3686  Initial counter
- * starts from 1 and buffer size of iv is 8 byte only which remains constant
- * for subsequent update requests
- */
-
 static int chcr_final_cipher_iv(struct skcipher_request *req,
                                   struct cpl_fw6_pld *fw6_pld, u8 *iv)
 {
@@ -1313,30 +1266,16 @@ static int process_cipher(struct skcipher_request *req,
        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
                bytes = adjust_ctr_overflow(req->iv, bytes);
        }
-       if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
-               memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
-               memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
-                               CTR_RFC3686_IV_SIZE);
-
-               /* initialize counter portion of counter block */
-               *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
-                       CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
-               memcpy(reqctx->init_iv, reqctx->iv, IV);
 
-       } else {
+       memcpy(reqctx->iv, req->iv, IV);
+       memcpy(reqctx->init_iv, req->iv, IV);
 
-               memcpy(reqctx->iv, req->iv, IV);
-               memcpy(reqctx->init_iv, req->iv, IV);
-       }
        if (unlikely(bytes == 0)) {
                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
                                      req);
 fallback:       atomic_inc(&adap->chcr_stats.fallback);
                err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
-                                          subtype ==
-                                          CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
-                                          reqctx->iv : req->iv,
-                                          op_type);
+                                          req->iv, op_type);
                goto error;
        }
        reqctx->op = op_type;
@@ -1486,27 +1425,6 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
        return chcr_device_init(ctx);
 }
 
-static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
-{
-       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
-       struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-
-       /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
-        * cannot be used as fallback in chcr_handle_cipher_response
-        */
-       ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
-                               CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(ablkctx->sw_cipher)) {
-               pr_err("failed to allocate fallback for %s\n", 
alg->base.cra_name);
-               return PTR_ERR(ablkctx->sw_cipher);
-       }
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
-                                   
crypto_skcipher_reqsize(ablkctx->sw_cipher));
-       return chcr_device_init(ctx);
-}
-
-
 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
 {
        struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
@@ -3894,25 +3812,6 @@ static struct chcr_alg_template driver_algs[] = {
                        .decrypt                = chcr_aes_decrypt,
                }
        },
-       {
-               .type = CRYPTO_ALG_TYPE_SKCIPHER |
-                       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
-               .is_registered = 0,
-               .alg.skcipher = {
-                       .base.cra_name          = "rfc3686(ctr(aes))",
-                       .base.cra_driver_name   = "rfc3686-ctr-aes-chcr",
-                       .base.cra_blocksize     = 1,
-
-                       .init                   = chcr_rfc3686_init,
-                       .exit                   = chcr_exit_tfm,
-                       .min_keysize            = AES_MIN_KEY_SIZE + 
CTR_RFC3686_NONCE_SIZE,
-                       .max_keysize            = AES_MAX_KEY_SIZE + 
CTR_RFC3686_NONCE_SIZE,
-                       .ivsize                 = CTR_RFC3686_IV_SIZE,
-                       .setkey                 = chcr_aes_rfc3686_setkey,
-                       .encrypt                = chcr_aes_encrypt,
-                       .decrypt                = chcr_aes_decrypt,
-               }
-       },
        /* SHA */
        {
                .type = CRYPTO_ALG_TYPE_AHASH,

Reply via email to