crypto/nx842: Ignore queue overflow informative error

2015-12-05 Thread Haren Myneni

NX842 coprocessor sets bit 3 if queue is overflow. It is just for
information to the user. So the driver prints this informative message
and ignores it.

Signed-off-by: Haren Myneni 

diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
index 9f8402b..d1a2a2d 100644
--- a/arch/powerpc/include/asm/icswx.h
+++ b/arch/powerpc/include/asm/icswx.h
@@ -164,6 +164,7 @@ struct coprocessor_request_block {
 #define ICSWX_INITIATED(0x8)
 #define ICSWX_BUSY (0x4)
 #define ICSWX_REJECTED (0x2)
+#define ICSWX_BIT3 (0x1)   /* undefined or set from XERSO. */
 
 static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
 {
diff --git a/drivers/crypto/nx/nx-842-powernv.c 
b/drivers/crypto/nx/nx-842-powernv.c
index 9ef51fa..321b8e8 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -442,6 +442,15 @@ static int nx842_powernv_function(const unsigned char *in, 
unsigned int inlen,
 (unsigned int)ccw,
 (unsigned int)be32_to_cpu(crb->ccw));
 
+   /*
+* NX842 coprocessor uses 3rd bit to report queue overflow which is
+* not an error, just for information to user. So, ignore this bit.
+*/
+   if (ret & ICSWX_BIT3) {
+   pr_info_ratelimited("842 coprocessor queue overflow\n");
+   ret &= ~ICSWX_BIT3;
+   }
+
switch (ret) {
case ICSWX_INITIATED:
ret = wait_for_csb(wmem, csb);


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH] Crypto: rockchip/crypto - add hash support for crypto engine in rk3288

2015-12-05 Thread Corentin LABBE
Le 05/12/2015 07:30, Zain Wang a écrit :
> Add md5 sha1 sha256 support for crypto engine in rk3288.
> This patch can't support multiple updatings because of limited of IC,
> as result, it can't support import and export too.
> 
> Signed-off-by: Zain Wang 
> ---
>  drivers/crypto/rockchip/Makefile   |   1 +
>  drivers/crypto/rockchip/rk3288_crypto.c|  33 +-
>  drivers/crypto/rockchip/rk3288_crypto.h|  50 ++-
>  drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c |  20 +-
>  drivers/crypto/rockchip/rk3288_crypto_ahash.c  | 369 
> +
>  5 files changed, 455 insertions(+), 18 deletions(-)
>  create mode 100644 drivers/crypto/rockchip/rk3288_crypto_ahash.c
> 
> diff --git a/drivers/crypto/rockchip/Makefile 
> b/drivers/crypto/rockchip/Makefile
> index 7051c6c..30f9129 100644
> +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
> @@ -0,0 +1,369 @@
> +/*
> + * Crypto acceleration support for Rockchip RK3288
> + *
> + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
> + *
> + * Author: Zain Wang 
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
> + */
> +#include "rk3288_crypto.h"
> +
> +static u8 *outdata[3] = {
> + "\xda\x39\xa3\xee\x5e\x6b\x4b\x0d\x32\x55"
> + "\xbf\xef\x95\x60\x18\x90\xaf\xd8\x07\x09",
> +
> + "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14"
> + "\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24"
> + "\x27\xae\x41\xe4\x64\x9b\x93\x4c"
> + "\xa4\x95\x99\x1b\x78\x52\xb8\x55",
> +
> + "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
> + "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
> +};
> +

Clearly this array must be set const, and a comment about what are thoses 
numbers is necessary.
Perhaps splitting that in three arrays const xxx_zero_message_hash = ... is 
also better.

> +static void nodata_process(struct ahash_request *req)
> +{
> + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
> + int rk_digest_size;
> +
> + rk_digest_size = crypto_ahash_digestsize(tfm);
> +
> + if (rk_digest_size == SHA1_DIGEST_SIZE)
> + memcpy(req->result, outdata[0], rk_digest_size);
> + else if (rk_digest_size == SHA256_DIGEST_SIZE)
> + memcpy(req->result, outdata[1], rk_digest_size);
> + else if (rk_digest_size == MD5_DIGEST_SIZE)
> + memcpy(req->result, outdata[2], rk_digest_size);
> +}
> +
> +static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
> +{
> + if (dev->ahash_req->base.complete)
> + dev->ahash_req->base.complete(>ahash_req->base, err);
> +}
> +
> +static void rk_ahash_hw_init(struct rk_crypto_info *dev)
> +{
> + int reg_status = 0;
> +
> + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
> +  RK_CRYPTO_HASH_FLUSH |
> +  _SBF(0x, 16);
> + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
> +
> + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
> + reg_status &= (~RK_CRYPTO_HASH_FLUSH);
> + reg_status |= _SBF(0x, 16);
> + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
> +
> + memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
> +}
> +
> +static void rk_ahash_reg_init(struct rk_crypto_info *dev)
> +{
> + rk_ahash_hw_init(dev);
> +
> + CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
> + RK_CRYPTO_HRDMA_DONE_ENA);
> +
> + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
> + RK_CRYPTO_HRDMA_DONE_INT);
> +
> + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
> +RK_CRYPTO_HASH_SWAP_DO);
> +
> + CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
> +   RK_CRYPTO_BYTESWAP_BRFIFO |
> +   RK_CRYPTO_BYTESWAP_BTFIFO);
> +}
> +
> +static int rk_ahash_init(struct ahash_request *req)
> +{
> + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
> + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
> + struct rk_crypto_info *dev = NULL;
> + int rk_digest_size;
> +
> + dev = tctx->dev;
> + dev->left_bytes = 0;
> + dev->aligned = 0;
> + dev->ahash_req = req;
> + dev->mode = 0;
> + dev->align_size = 4;
> + dev->sg_dst = NULL;
> +
> + tctx->first_op = 1;
> +
> + rk_digest_size = crypto_ahash_digestsize(tfm);
> + if (!rk_digest_size)
> + dev_err(dev->dev, "can't get digestsize\n");
> + if (rk_digest_size == SHA1_DIGEST_SIZE)
> + dev->mode = RK_CRYPTO_HASH_SHA1;
> + else if (rk_digest_size == SHA256_DIGEST_SIZE)
> +  

[PATCH v7 3/4] crypto: akcipher: add akcipher declarations needed by templates.

2015-12-05 Thread Andrew Zaborowski
Add a struct akcipher_instance and struct akcipher_spawn similar to
how AEAD declares them and the macros for converting to/from
crypto_instance/crypto_spawn.  Also add register functions to
avoid exposing crypto_akcipher_type.

Signed-off-by: Andrew Zaborowski 
---
v2: no changes since v1
v3: drop the new crypto_akcipher_type methods and
add struct akcipher_instance
v4: avoid exposing crypto_akcipher_type after all, add struct akcipher_spawn
and utilities
v5: add akcipher_instance.free
v6: only support akcipher_instance.free, not crypto_template.free,
add further akcipher.h macros
v7: remove duplicate crypto_spawn_akcipher added in v6
---
 crypto/akcipher.c  | 34 -
 include/crypto/internal/akcipher.h | 78 ++
 2 files changed, 111 insertions(+), 1 deletion(-)

diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 120ec04..def301e 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -21,6 +21,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "internal.h"
 
 #ifdef CONFIG_NET
@@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
 }
 
+static void crypto_akcipher_free_instance(struct crypto_instance *inst)
+{
+   struct akcipher_instance *akcipher = akcipher_instance(inst);
+
+   akcipher->free(akcipher);
+}
+
 static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
+   .free = crypto_akcipher_free_instance,
 #ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
 #endif
@@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = {
.tfmsize = offsetof(struct crypto_akcipher, base),
 };
 
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
+u32 type, u32 mask)
+{
+   spawn->base.frontend = _akcipher_type;
+   return crypto_grab_spawn(>base, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
+
 struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
  u32 mask)
 {
@@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char 
*alg_name, u32 type,
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
 
-int crypto_register_akcipher(struct akcipher_alg *alg)
+static void akcipher_prepare_alg(struct akcipher_alg *alg)
 {
struct crypto_alg *base = >base;
 
base->cra_type = _akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+}
+
+int crypto_register_akcipher(struct akcipher_alg *alg)
+{
+   struct crypto_alg *base = >base;
+
+   akcipher_prepare_alg(alg);
return crypto_register_alg(base);
 }
 EXPORT_SYMBOL_GPL(crypto_register_akcipher);
@@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg)
 }
 EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
 
+int akcipher_register_instance(struct crypto_template *tmpl,
+  struct akcipher_instance *inst)
+{
+   akcipher_prepare_alg(>alg);
+   return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(akcipher_register_instance);
+
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Generic public key cipher type");
diff --git a/include/crypto/internal/akcipher.h 
b/include/crypto/internal/akcipher.h
index 9a2bda1..479a007 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -13,6 +13,22 @@
 #ifndef _CRYPTO_AKCIPHER_INT_H
 #define _CRYPTO_AKCIPHER_INT_H
 #include 
+#include 
+
+struct akcipher_instance {
+   void (*free)(struct akcipher_instance *inst);
+   union {
+   struct {
+   char head[offsetof(struct akcipher_alg, base)];
+   struct crypto_instance base;
+   } s;
+   struct akcipher_alg alg;
+   };
+};
+
+struct crypto_akcipher_spawn {
+   struct crypto_spawn base;
+};
 
 /*
  * Transform internal helpers.
@@ -38,6 +54,56 @@ static inline const char *akcipher_alg_name(struct 
crypto_akcipher *tfm)
return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name;
 }
 
+static inline struct crypto_instance *akcipher_crypto_instance(
+   struct akcipher_instance *inst)
+{
+   return container_of(>alg.base, struct crypto_instance, alg);
+}
+
+static inline struct akcipher_instance *akcipher_instance(
+   struct crypto_instance *inst)
+{
+   return container_of(>alg, struct akcipher_instance, alg.base);
+}
+
+static inline struct akcipher_instance *akcipher_alg_instance(
+   struct crypto_akcipher *akcipher)
+{
+   return akcipher_instance(crypto_tfm_alg_instance(>base));
+}
+
+static inline void *akcipher_instance_ctx(struct akcipher_instance *inst)
+{
+   return 

Re: [PATCH v6 3/4] crypto: akcipher: add akcipher declarations needed by templates.

2015-12-05 Thread Andrzej Zaborowski
Hi Herbert,

On 4 December 2015 at 15:28, Herbert Xu  wrote:
> Andrew Zaborowski  wrote:
>>
>> +static inline struct crypto_akcipher *crypto_spawn_akcipher(
>> +   struct crypto_akcipher_spawn *spawn)
>> +{
>> +   return crypto_spawn_tfm2(>base);
>> +}
>> +
>> +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn)
>> +{
>> +   crypto_drop_spawn(>base);
>> +}
>> +
>> +static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
>> +   struct crypto_akcipher_spawn *spawn)
>> +{
>> +   return container_of(spawn->base.alg, struct akcipher_alg, base);
>> +}
>> +
>> +static inline struct crypto_akcipher *crypto_spawn_akcipher(
>> +   struct crypto_akcipher_spawn *spawn)
>> +{
>> +   return crypto_spawn_tfm2(>base);
>> +}
>
> You're defining crypto_spawn_akcipher twice.  This also means that
> you didn't even bother compiling it.

The chunk that removed crypto_spawn_akcipher ended up in patch 4/4
because I messed up the rebase, oops.  I'll fix this.

I did compile and test.

Best regards
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v7 4/4] crypto: RSA padding algorithm

2015-12-05 Thread Andrew Zaborowski
This patch adds PKCS#1 v1.5 standard RSA padding as a separate template.
This way an RSA cipher with padding can be obtained by instantiating
"pkcs1pad(rsa)".  The reason for adding this is that RSA is almost
never used without this padding (or OAEP) so it will be needed for
either certificate work in the kernel or the userspace, and I also hear
that it is likely implemented by hardware RSA in which case hardware
implementations of the whole of pkcs1pad(rsa) can be provided.

Signed-off-by: Andrew Zaborowski 
---
v2: rename rsa-padding.c to rsa-pkcs1pad.c,
use a memset instead of a loop,
add a key size check in pkcs1pad_sign,
add a general comment about pkcs1pad_verify
v3: rewrite the initialisation to avoid an obsolete and less flexible
mechanism, now following the aead template initialisation.
v4: follow the aead template initialisation exactly.
v5: use the instance .free, set no template .free.
v6: don't use crypto_alg.cra_init / cra_exit
v7: check for CRYPTO_TFM_REQ_MAY_SLEEP, remove removal of
crypto_spawn_akcipher
---
 crypto/Makefile   |   1 +
 crypto/rsa-pkcs1pad.c | 617 ++
 crypto/rsa.c  |  16 +-
 include/crypto/internal/rsa.h |   2 +
 4 files changed, 635 insertions(+), 1 deletion(-)
 create mode 100644 crypto/rsa-pkcs1pad.c

diff --git a/crypto/Makefile b/crypto/Makefile
index f7aba92..2acdbbd 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
 rsa_generic-y += rsaprivkey-asn1.o
 rsa_generic-y += rsa.o
 rsa_generic-y += rsa_helper.o
+rsa_generic-y += rsa-pkcs1pad.o
 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
 
 cryptomgr-y := algboss.o testmgr.o
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
new file mode 100644
index 000..accc67d
--- /dev/null
+++ b/crypto/rsa-pkcs1pad.c
@@ -0,0 +1,617 @@
+/*
+ * RSA padding templates.
+ *
+ * Copyright (c) 2015  Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+struct pkcs1pad_ctx {
+   struct crypto_akcipher *child;
+
+   unsigned int key_size;
+};
+
+struct pkcs1pad_request {
+   struct akcipher_request child_req;
+
+   struct scatterlist in_sg[3], out_sg[2];
+   uint8_t *in_buf, *out_buf;
+};
+
+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+   unsigned int keylen)
+{
+   struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+   int err, size;
+
+   err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
+
+   if (!err) {
+   /* Find out new modulus size from rsa implementation */
+   size = crypto_akcipher_maxsize(ctx->child);
+
+   ctx->key_size = size > 0 ? size : 0;
+   if (size <= 0)
+   err = size;
+   }
+
+   return err;
+}
+
+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+   unsigned int keylen)
+{
+   struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+   int err, size;
+
+   err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
+
+   if (!err) {
+   /* Find out new modulus size from rsa implementation */
+   size = crypto_akcipher_maxsize(ctx->child);
+
+   ctx->key_size = size > 0 ? size : 0;
+   if (size <= 0)
+   err = size;
+   }
+
+   return err;
+}
+
+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
+{
+   struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+   /*
+* The maximum destination buffer size for the encrypt/sign operations
+* will be the same as for RSA, even though it's smaller for
+* decrypt/verify.
+*/
+
+   return ctx->key_size ?: -EINVAL;
+}
+
+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
+   struct scatterlist *next)
+{
+   int nsegs = next ? 1 : 0;
+
+   if (offset_in_page(buf) + len <= PAGE_SIZE) {
+   nsegs += 1;
+   sg_init_table(sg, nsegs);
+   sg_set_buf(sg, buf, len);
+   } else {
+   nsegs += 2;
+   sg_init_table(sg, nsegs);
+   sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
+   sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
+   offset_in_page(buf) + len - PAGE_SIZE);
+   }
+
+   if (next)
+   sg_chain(sg, nsegs, next);
+}
+
+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int 
err)
+{
+   struct crypto_akcipher *tfm = 

Re: [RFC PATCH] Crypto: rockchip/crypto - add hash support for crypto engine in rk3288

2015-12-05 Thread Stephan Mueller
Am Samstag, 5. Dezember 2015, 14:30:25 schrieb Zain Wang:

Hi Zain,

>Add md5 sha1 sha256 support for crypto engine in rk3288.
>This patch can't support multiple updatings because of limited of IC,
>as result, it can't support import and export too.
>
>Signed-off-by: Zain Wang 
>---
> drivers/crypto/rockchip/Makefile   |   1 +
> drivers/crypto/rockchip/rk3288_crypto.c|  33 +-
> drivers/crypto/rockchip/rk3288_crypto.h|  50 ++-
> drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c |  20 +-
> drivers/crypto/rockchip/rk3288_crypto_ahash.c  | 369
>+ 5 files changed, 455 insertions(+), 18 deletions(-)
> create mode 100644 drivers/crypto/rockchip/rk3288_crypto_ahash.c
>
>diff --git a/drivers/crypto/rockchip/Makefile
>b/drivers/crypto/rockchip/Makefile index 7051c6c..30f9129 100644
>--- a/drivers/crypto/rockchip/Makefile
>+++ b/drivers/crypto/rockchip/Makefile
>@@ -1,3 +1,4 @@
> obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
> rk_crypto-objs := rk3288_crypto.o \
> rk3288_crypto_ablkcipher.o \
>+rk3288_crypto_ahash.o
>diff --git a/drivers/crypto/rockchip/rk3288_crypto.c
>b/drivers/crypto/rockchip/rk3288_crypto.c index 82f3044..67d69d2 100644
>--- a/drivers/crypto/rockchip/rk3288_crypto.c
>+++ b/drivers/crypto/rockchip/rk3288_crypto.c
>@@ -190,7 +190,6 @@ static void rk_crypto_tasklet_cb(unsigned long data)
> {
>   struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
>   struct crypto_async_request *async_req, *backlog;
>-  struct rk_cipher_reqctx *ablk_reqctx;
>   int err = 0;
>   unsigned long flags;
>
>@@ -207,10 +206,10 @@ static void rk_crypto_tasklet_cb(unsigned long data)
>   backlog = NULL;
>   }
>
>-  if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) 
{
>+  if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
>   dev->ablk_req = ablkcipher_request_cast(async_req);
>-  ablk_reqctx   = ablkcipher_request_ctx(dev->ablk_req);
>-  }
>+  else
>+  dev->ahash_req = ahash_request_cast(async_req);
>   err = dev->start(dev);
>   if (err)
>   dev->complete(dev, err);
>@@ -223,6 +222,9 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
>   _cbc_des_alg,
>   _ecb_des3_ede_alg,
>   _cbc_des3_ede_alg,
>+  _ahash_sha1,
>+  _ahash_sha256,
>+  _ahash_md5,
> };
>
> static int rk_crypto_register(struct rk_crypto_info *crypto_info)
>@@ -232,15 +234,24 @@ static int rk_crypto_register(struct rk_crypto_info
>*crypto_info)
>
>   for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
>   rk_cipher_algs[i]->dev = crypto_info;
>-  err = crypto_register_alg(_cipher_algs[i]->alg);
>+  if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
>+  err = crypto_register_alg(
>+  _cipher_algs[i]->alg.crypto);
>+  else
>+  err = crypto_register_ahash(
>+  _cipher_algs[i]->alg.hash);
>   if (err)
>   goto err_cipher_algs;
>   }
>   return 0;
>
> err_cipher_algs:
>-  for (k = 0; k < i; k++)
>-  crypto_unregister_alg(_cipher_algs[k]->alg);
>+  for (k = 0; k < i; k++) {
>+  if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
>+  crypto_unregister_alg(_cipher_algs[k]->alg.crypto);
>+  else
>+  crypto_unregister_ahash(_cipher_algs[i]->alg.hash);
>+  }
>   return err;
> }
>
>@@ -248,8 +259,12 @@ static void rk_crypto_unregister(void)
> {
>   unsigned int i;
>
>-  for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
>-  crypto_unregister_alg(_cipher_algs[i]->alg);
>+  for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
>+  if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
>+  crypto_unregister_alg(_cipher_algs[i]->alg.crypto);
>+  else
>+  crypto_unregister_ahash(_cipher_algs[i]->alg.hash);
>+  }
> }
>
> static void rk_crypto_action(void *data)
>diff --git a/drivers/crypto/rockchip/rk3288_crypto.h
>b/drivers/crypto/rockchip/rk3288_crypto.h index 604ffe7..453a00f 100644
>--- a/drivers/crypto/rockchip/rk3288_crypto.h
>+++ b/drivers/crypto/rockchip/rk3288_crypto.h
>@@ -6,6 +6,10 @@
> #include 
> #include 
> #include 
>+#include 
>+
>+#include "crypto/md5.h"
>+#include "crypto/sha.h"
>
> #define _SBF(v, f)((v) << (f))
>
>@@ -149,6 +153,28 @@
> #define RK_CRYPTO_TDES_KEY3_0 0x0130
> #define RK_CRYPTO_TDES_KEY3_1 0x0134
>
>+/* HASH */
>+#define RK_CRYPTO_HASH_CTRL   0x0180
>+#define RK_CRYPTO_HASH_SWAP_DOBIT(3)
>+#define RK_CRYPTO_HASH_SWAP_DIBIT(2)
>+#define RK_CRYPTO_HASH_SHA1   _SBF(0x00, 0)
>+#define 

[PATCH 2/2] chacha20poly1305: Skip encryption/decryption for 0-len

2015-12-05 Thread Jason A. Donenfeld
If the length of the plaintext is zero, there's no need to waste cycles
on encryption and decryption. Using the chacha20poly1305 construction
for zero-length plaintexts is a common way of using a shared encryption
key for AAD authentication.

Signed-off-by: Jason A. Donenfeld 
Cc: 
---
 crypto/chacha20poly1305.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 99c3cce..7b6b935 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
 
+   if (rctx->cryptlen == 0)
+   goto skip;
+
chacha_iv(creq->iv, req, 1);
 
sg_init_table(rctx->src, 2);
@@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req)
if (err)
return err;
 
+skip:
return poly_verify_tag(req);
 }
 
@@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
 
+   if (req->cryptlen == 0)
+   goto skip;
+
chacha_iv(creq->iv, req, 1);
 
sg_init_table(rctx->src, 2);
@@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req)
if (err)
return err;
 
+skip:
return poly_genkey(req);
 }
 
-- 
2.6.3

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/2] blkcipher: Copy iv from desc even for 0-len walks

2015-12-05 Thread Jason A. Donenfeld
Some ciphers actually support encrypting zero length plaintexts. For
example, many AEAD modes support this. The resulting ciphertext for
those winds up being only the authentication tag, which is a result of
the key, the iv, the additional data, and the fact that the plaintext
had zero length. The blkcipher constructors won't copy the IV to the
right place, however, when using a zero length input, resulting in
some significant problems when ciphers call their initialization
routines, only to find that the ->iv parameter is uninitialized. One
such example of this would be using chacha20poly1305 with a zero length
input, which then calls chacha20, which calls the key setup routine,
which eventually OOPSes due to the uninitialized ->iv member.

Signed-off-by: Jason A. Donenfeld 
Cc: 
---
 crypto/ablkcipher.c | 2 +-
 crypto/blkcipher.c  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index b4ffc5b..e5b5721 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct 
ablkcipher_request *req,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
 
+   walk->iv = req->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
 
walk->iv_buffer = NULL;
-   walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 11b9814..8cc1622 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc 
*desc,
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
 
+   walk->iv = desc->info;
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
 
walk->buffer = NULL;
-   walk->iv = desc->info;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = blkcipher_copy_iv(walk);
if (err)
-- 
2.6.3

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html