Re: [PATCH] vti6: Add pmtu handling to vti6_xmit.

2016-02-24 Thread Mark McKinstry
On 19/02/16 01:19, Steffen Klassert wrote:
> On Thu, Feb 18, 2016 at 01:40:00AM +, Mark McKinstry wrote:
>> This patch fixes our issue, thanks. In our scenario the tunnel path MTU
>> now gets updated so that subsequent large packets sent over the tunnel
>> get fragmented correctly.
> I've applied this patch to the ipsec tree now.
> Thanks for testing!
I spoke too soon. Upon further testing with this patch we have found it 
causes
a skt buffer leak. This is problematic for us and can cause memory 
exhaustion in
one of our test scenarios that has an IPv4 IPsec tunnel over a PPP link. 
Also
the patch's -EMSGSIZE return value appears to be invalid because vti_xmit()
should be returning a type netdev_tx_t (NETDEV_TX_OK etc). It looks to 
me that
this patch should really be doing a goto tx_error rather than doing an early
return with -EMSGSIZE. This would result in the skt buffer being freed,
NETDEV_TX_OK being returned (thus indicating vti_xmit() "took care of 
packet"),
and the tx_errors counter being incremented (which seems like a reasonable
thing to do).

I think the original IPv6 patch probably has the same issues, and could be
causing a DOS attack vulnerability in recent Linux releases. If this patch's
code gets hit for every received packet then the box's memory will soon be
exhausted - e.g. a rogue device sends a stream of largish pkts through a box
with a vti interface, and ignores every ICMPV6_PKT_TOOBIG pkt sent back 
to it.
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 5/8] crypto: acomp - add support for lz4hc via scomp

2016-02-24 Thread Giovanni Cabiddu
This patch implements an scomp backend for the lz4hc compression algorithm.
This way, lz4hc is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig |1 +
 crypto/lz4hc.c |   94 ++--
 2 files changed, 85 insertions(+), 10 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 8374bee..0d43757 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1536,6 +1536,7 @@ config CRYPTO_LZ4
 config CRYPTO_LZ4HC
tristate "LZ4HC compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select LZ4HC_COMPRESS
select LZ4_DECOMPRESS
help
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index a1d3b5b..b798b0c 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -22,37 +22,53 @@
 #include 
 #include 
 #include 
+#include 
 
 struct lz4hc_ctx {
void *lz4hc_comp_mem;
 };
 
+static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+{
+   void *ctx;
+
+   ctx = vmalloc(LZ4HC_MEM_COMPRESS);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   return ctx;
+}
+
 static int lz4hc_init(struct crypto_tfm *tfm)
 {
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
 
-   ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS);
-   if (!ctx->lz4hc_comp_mem)
+   ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
+   if (IS_ERR(ctx->lz4hc_comp_mem))
return -ENOMEM;
 
return 0;
 }
 
+static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   vfree(ctx);
+}
+
 static void lz4hc_exit(struct crypto_tfm *tfm)
 {
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
 
-   vfree(ctx->lz4hc_comp_mem);
+   lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
 }
 
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
-   unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
+  u8 *dst, unsigned int *dlen, void *ctx)
 {
-   struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
 
-   err = lz4hc_compress(src, slen, dst, _len, ctx->lz4hc_comp_mem);
+   err = lz4hc_compress(src, slen, dst, _len, ctx);
 
if (err < 0)
return -EINVAL;
@@ -61,8 +77,25 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, 
const u8 *src,
return 0;
 }
 
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen,
+  void *ctx)
+{
+   return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+unsigned int slen, u8 *dst,
+unsigned int *dlen)
+{
+   struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   return __lz4hc_compress_crypto(src, slen, dst, dlen,
+   ctx->lz4hc_comp_mem);
+}
+
+static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
+u8 *dst, unsigned int *dlen, void *ctx)
 {
int err;
size_t tmp_len = *dlen;
@@ -76,6 +109,20 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, 
const u8 *src,
return err;
 }
 
+static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+unsigned int slen, u8 *dst, unsigned int *dlen,
+void *ctx)
+{
+   return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+  unsigned int slen, u8 *dst,
+  unsigned int *dlen)
+{
+   return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
 static struct crypto_alg alg_lz4hc = {
.cra_name   = "lz4hc",
.cra_flags  = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +136,41 @@ static struct crypto_alg alg_lz4hc = {
.coa_decompress = lz4hc_decompress_crypto } }
 };
 
+static struct scomp_alg scomp = {
+   .alloc_ctx  = lz4hc_alloc_ctx,
+   .free_ctx   = lz4hc_free_ctx,
+   .compress   = lz4hc_scompress,
+   .decompress = lz4hc_sdecompress,
+   .base   = {
+   .cra_name   = "lz4hc",
+   .cra_driver_name = "lz4hc-scomp",
+   .cra_flags   = CRYPTO_ALG_TYPE_SCOMPRESS |
+   CRYPTO_SCOMP_DECOMP_NOCTX,
+   .cra_module  = THIS_MODULE,
+   }
+};
+
 static int __init 

[PATCH v3 2/8] crypto: add driver-side scomp interface

2016-02-24 Thread Giovanni Cabiddu
Add a synchronous back-end (scomp) to acomp. This allows to easily expose
the already present compression algorithms in LKCF via acomp.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Makefile |1 +
 crypto/acompress.c  |   75 ++-
 crypto/scompress.c  |  262 +++
 include/crypto/acompress.h  |   65 +++--
 include/crypto/internal/acompress.h |   42 ++
 include/crypto/internal/scompress.h |  138 ++
 include/linux/crypto.h  |9 +-
 7 files changed, 543 insertions(+), 49 deletions(-)
 create mode 100644 crypto/scompress.c
 create mode 100644 include/crypto/internal/scompress.h

diff --git a/crypto/Makefile b/crypto/Makefile
index e817b38..fc8fcfe 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
 
 obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
 
 $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
 $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
diff --git a/crypto/acompress.c b/crypto/acompress.c
index f24fef3..2bd9c95 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -22,8 +22,11 @@
 #include 
 #include 
 #include 
+#include 
 #include "internal.h"
 
+static const struct crypto_type crypto_acomp_type;
+
 #ifdef CONFIG_NET
 static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 {
@@ -67,6 +70,14 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
 
+   if (tfm->__crt_alg->cra_type != _acomp_type)
+   return crypto_init_scomp_ops_async(tfm);
+
+   acomp->compress = alg->compress;
+   acomp->decompress = alg->decompress;
+   acomp->comp_reqsize = alg->comp_reqsize;
+   acomp->decomp_reqsize = alg->decomp_reqsize;
+
if (alg->exit)
acomp->base.exit = crypto_acomp_exit_tfm;
 
@@ -76,15 +87,22 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
return 0;
 }
 
+unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
+{
+   if (alg->cra_type == _acomp_type)
+   return crypto_alg_extsize(alg);
+   return sizeof(struct crypto_scomp *);
+}
+
 static const struct crypto_type crypto_acomp_type = {
-   .extsize = crypto_alg_extsize,
+   .extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
 #ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
 #endif
.report = crypto_acomp_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
-   .maskset = CRYPTO_ALG_TYPE_MASK,
+   .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
 };
@@ -96,6 +114,59 @@ struct crypto_acomp *crypto_alloc_acomp(const char 
*alg_name, u32 type,
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
 
+struct acomp_req *acomp_compression_request_alloc(struct crypto_acomp *acomp,
+ gfp_t gfp)
+{
+   struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+   struct acomp_req *req;
+
+   req = __acomp_compression_request_alloc(acomp, gfp);
+   if (req && (tfm->__crt_alg->cra_type != _acomp_type))
+   return crypto_acomp_scomp_alloc_ctx(req, 1);
+
+   return req;
+}
+EXPORT_SYMBOL_GPL(acomp_compression_request_alloc);
+
+struct acomp_req *acomp_decompression_request_alloc(struct crypto_acomp *acomp,
+   gfp_t gfp)
+{
+   struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+   struct acomp_req *req;
+
+   req = __acomp_decompression_request_alloc(acomp, gfp);
+   if (req && (tfm->__crt_alg->cra_type != _acomp_type))
+   return crypto_acomp_scomp_alloc_ctx(req, 0);
+
+   return req;
+}
+EXPORT_SYMBOL_GPL(acomp_decompression_request_alloc);
+
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp, gfp_t gfp)
+{
+   struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+   struct acomp_req *req;
+
+   req = __acomp_request_alloc(acomp, gfp);
+   if (req && (tfm->__crt_alg->cra_type != _acomp_type))
+   return crypto_acomp_scomp_alloc_ctx(req, 1);
+
+   return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_alloc);
+
+void acomp_request_free(struct acomp_req *req)
+{
+   struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+   struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+
+   if (tfm->__crt_alg->cra_type != _acomp_type)
+   crypto_acomp_scomp_free_ctx(req);
+
+   __acomp_request_free(req);
+}
+EXPORT_SYMBOL_GPL(acomp_request_free);
+
 int crypto_register_acomp(struct acomp_alg *alg)
 {
struct crypto_alg *base 

[PATCH v3 8/8] crypto: acomp - update testmgr with support for acomp

2016-02-24 Thread Giovanni Cabiddu
This patch adds tests to the test manager for algorithms exposed through
the acomp api

Signed-off-by: Giovanni Cabiddu 
---
 crypto/testmgr.c |  159 +-
 1 files changed, 146 insertions(+), 13 deletions(-)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6e41a93..dd6b198 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -32,6 +32,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "internal.h"
 
@@ -1419,6 +1420,121 @@ out:
return ret;
 }
 
+static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
+struct comp_testvec *dtemplate, int ctcount, int dtcount)
+{
+   const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+   unsigned int i;
+   char output[COMP_BUF_SIZE];
+   int ret;
+   struct scatterlist src, dst;
+   struct acomp_req *req;
+   struct tcrypt_result result;
+
+   for (i = 0; i < ctcount; i++) {
+   unsigned int dlen = COMP_BUF_SIZE;
+   int ilen = ctemplate[i].inlen;
+
+   memset(output, 0, sizeof(output));
+   init_completion();
+   sg_init_one(, ctemplate[i].input, ilen);
+   sg_init_one(, output, dlen);
+
+   req = acomp_compression_request_alloc(tfm, GFP_KERNEL);
+   if (!req) {
+   pr_err("alg: acomp: request alloc failed for %s\n",
+  algo);
+   ret = -ENOMEM;
+   goto out;
+   }
+
+   acomp_request_set_params(req, , , ilen, dlen);
+   acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+  tcrypt_complete, );
+
+   ret = wait_async_op(, crypto_acomp_compress(req));
+   if (ret) {
+   pr_err("alg: acomp: compression failed on test %d for 
%s: ret=%d\n",
+  i + 1, algo, -ret);
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (req->produced != ctemplate[i].outlen) {
+   pr_err("alg: acomp: Compression test %d failed for %s: 
output len = %d\n",
+  i + 1, algo, req->produced);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (memcmp(output, ctemplate[i].output, req->produced)) {
+   pr_err("alg: acomp: Compression test %d failed for 
%s\n",
+  i + 1, algo);
+   hexdump(output, req->produced);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   acomp_request_free(req);
+   }
+
+   for (i = 0; i < dtcount; i++) {
+   unsigned int dlen = COMP_BUF_SIZE;
+   int ilen = dtemplate[i].inlen;
+
+   memset(output, 0, sizeof(output));
+   init_completion();
+   sg_init_one(, dtemplate[i].input, ilen);
+   sg_init_one(, output, dlen);
+
+   req = acomp_decompression_request_alloc(tfm, GFP_KERNEL);
+   if (!req) {
+   pr_err("alg: acomp: request alloc failed for %s\n",
+  algo);
+   ret = -ENOMEM;
+   goto out;
+   }
+
+   acomp_request_set_params(req, , , ilen, dlen);
+   acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+  tcrypt_complete, );
+
+   ret = wait_async_op(, crypto_acomp_decompress(req));
+   if (ret) {
+   pr_err("alg: acomp: decompression failed on test %d for 
%s: ret=%d\n",
+  i + 1, algo, -ret);
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (req->produced != dtemplate[i].outlen) {
+   pr_err("alg: acomp: Decompression test %d failed for 
%s: output len = %d\n",
+  i + 1, algo, req->produced);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   if (memcmp(output, dtemplate[i].output, req->produced)) {
+   pr_err("alg: acomp: Decompression test %d failed for 
%s\n",
+  i + 1, algo);
+   hexdump(output, req->produced);
+   ret = -EINVAL;
+   acomp_request_free(req);
+   goto out;
+   }
+
+   acomp_request_free(req);
+   }
+
+   ret 

[PATCH v3 4/8] crypto: acomp - add support for lz4 via scomp

2016-02-24 Thread Giovanni Cabiddu
This patch implements an scomp backend for the lz4 compression algorithm.
This way, lz4 is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig |1 +
 crypto/lz4.c   |   93 ++--
 2 files changed, 84 insertions(+), 10 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index c15070d8..8374bee 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1527,6 +1527,7 @@ config CRYPTO_842
 config CRYPTO_LZ4
tristate "LZ4 compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select LZ4_COMPRESS
select LZ4_DECOMPRESS
help
diff --git a/crypto/lz4.c b/crypto/lz4.c
index aefbcea..a9fc214 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -23,36 +23,53 @@
 #include 
 #include 
 #include 
+#include 
 
 struct lz4_ctx {
void *lz4_comp_mem;
 };
 
+static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+{
+   void *ctx;
+
+   ctx = vmalloc(LZ4_MEM_COMPRESS);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   return ctx;
+}
+
 static int lz4_init(struct crypto_tfm *tfm)
 {
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-   ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS);
-   if (!ctx->lz4_comp_mem)
+   ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
+   if (IS_ERR(ctx->lz4_comp_mem))
return -ENOMEM;
 
return 0;
 }
 
+static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   vfree(ctx);
+}
+
 static void lz4_exit(struct crypto_tfm *tfm)
 {
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-   vfree(ctx->lz4_comp_mem);
+
+   lz4_free_ctx(NULL, ctx->lz4_comp_mem);
 }
 
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
-   unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
+u8 *dst, unsigned int *dlen, void *ctx)
 {
-   struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
 
-   err = lz4_compress(src, slen, dst, _len, ctx->lz4_comp_mem);
+   err = lz4_compress(src, slen, dst, _len, ctx);
 
if (err < 0)
return -EINVAL;
@@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const 
u8 *src,
return 0;
 }
 
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
+unsigned int slen, u8 *dst, unsigned int *dlen,
+void *ctx)
+{
+   return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+   struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
+}
+
+static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
+  u8 *dst, unsigned int *dlen, void *ctx)
 {
int err;
size_t tmp_len = *dlen;
@@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, 
const u8 *src,
return err;
 }
 
+static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen,
+  void *ctx)
+{
+   return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+unsigned int slen, u8 *dst,
+unsigned int *dlen)
+{
+   return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
 static struct crypto_alg alg_lz4 = {
.cra_name   = "lz4",
.cra_flags  = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +135,41 @@ static struct crypto_alg alg_lz4 = {
.coa_decompress = lz4_decompress_crypto } }
 };
 
+static struct scomp_alg scomp = {
+   .alloc_ctx  = lz4_alloc_ctx,
+   .free_ctx   = lz4_free_ctx,
+   .compress   = lz4_scompress,
+   .decompress = lz4_sdecompress,
+   .base   = {
+   .cra_name   = "lz4",
+   .cra_driver_name = "lz4-scomp",
+   .cra_flags   = CRYPTO_ALG_TYPE_SCOMPRESS |
+   CRYPTO_SCOMP_DECOMP_NOCTX,
+   .cra_module  = THIS_MODULE,
+   }
+};
+
 static int __init lz4_mod_init(void)
 {
-   return crypto_register_alg(_lz4);
+   int ret;
+
+   ret = crypto_register_alg(_lz4);
+   if (ret)
+   return ret;
+
+   ret = crypto_register_scomp();
+   

[PATCH v3 0/8] crypto: asynchronous compression api

2016-02-24 Thread Giovanni Cabiddu
The following patch set introduces acomp, a generic asynchronous
(de)compression api.
What is proposed is a new crypto type called crypto_acomp_type,
plus a new struct acomp_alg and struct crypto_acomp, together
with number of helper functions to register acomp type algorithms
and allocate tfm instances. This is to make it similar to how the
existing crypto API works for the ablkcipher, ahash, and aead types.
This interface will allow the following operations:

int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);

The main benefit that this API gives is to allow for
asynchronous (de)compression operations, mainly supported by
compression accelerators.

Changes in v3:
- added driver-side scomp interface
- provided support for lzo, lz4, lz4hc, 842, deflate compression algorithms
  via the acomp api (through scomp)
- extended testmgr to support acomp
- removed extended acomp api for supporting deflate algorithm parameters
  (will be enhanced and re-proposed in future)
Note that (2) to (7) are a rework of Joonsoo Kim's scomp patches.

Changes in v2:
- added compression and decompression request sizes in acomp_alg
  in order to enable noctx support
- extended api with helpers to allocate compression and
  decompression requests

Changes from initial submit:
- added consumed and produced fields to acomp_req
- extended api to support configuration of deflate compressors

---
Giovanni Cabiddu (8):
  crypto: add asynchronous compression api
  crypto: add driver-side scomp interface
  crypto: acomp - add support for lzo via scomp
  crypto: acomp - add support for lz4 via scomp
  crypto: acomp - add support for lz4hc via scomp
  crypto: acomp - add support for 842 via scomp
  crypto: acomp - add support for deflate via scomp
  crypto: acomp - update testmgr with support for acomp

 crypto/842.c|   84 ++-
 crypto/Kconfig  |   15 ++
 crypto/Makefile |3 +
 crypto/acompress.c  |  189 +++
 crypto/crypto_user.c|   21 +++
 crypto/deflate.c|  112 --
 crypto/lz4.c|   93 ++--
 crypto/lz4hc.c  |   94 ++--
 crypto/lzo.c|  100 ++--
 crypto/scompress.c  |  262 +++
 crypto/testmgr.c|  159 ++--
 include/crypto/acompress.h  |  291 +++
 include/crypto/internal/acompress.h |  108 +
 include/crypto/internal/scompress.h |  138 +
 include/linux/crypto.h  |8 +
 15 files changed, 1616 insertions(+), 61 deletions(-)
 create mode 100644 crypto/acompress.c
 create mode 100644 crypto/scompress.c
 create mode 100644 include/crypto/acompress.h
 create mode 100644 include/crypto/internal/acompress.h
 create mode 100644 include/crypto/internal/scompress.h

-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 1/8] crypto: add asynchronous compression api

2016-02-24 Thread Giovanni Cabiddu
This patch introduces acomp, an asynchronous compression api that uses
scatterlist buffers.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig  |   10 +
 crypto/Makefile |2 +
 crypto/acompress.c  |  118 +
 crypto/crypto_user.c|   21 +++
 include/crypto/acompress.h  |  318 +++
 include/crypto/internal/acompress.h |   66 +++
 include/linux/crypto.h  |1 +
 7 files changed, 536 insertions(+), 0 deletions(-)
 create mode 100644 crypto/acompress.c
 create mode 100644 include/crypto/acompress.h
 create mode 100644 include/crypto/internal/acompress.h

diff --git a/crypto/Kconfig b/crypto/Kconfig
index f6bfdda..29ac8cb 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER
select CRYPTO_AKCIPHER2
select CRYPTO_ALGAPI
 
+config CRYPTO_ACOMP
+   tristate
+   select CRYPTO_ACOMP2
+   select CRYPTO_ALGAPI
+
+config CRYPTO_ACOMP2
+   tristate
+   select CRYPTO_ALGAPI2
+
 config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@@ -114,6 +123,7 @@ config CRYPTO_MANAGER2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
+   select CRYPTO_ACOMP2
 
 config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
diff --git a/crypto/Makefile b/crypto/Makefile
index 4f4ef7e..e817b38 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
 
+obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
+
 $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
 $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
 clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
diff --git a/crypto/acompress.c b/crypto/acompress.c
new file mode 100644
index 000..f24fef3
--- /dev/null
+++ b/crypto/acompress.c
@@ -0,0 +1,118 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li 
+ *  Giovanni Cabiddu 
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "internal.h"
+
+#ifdef CONFIG_NET
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+   struct crypto_report_comp racomp;
+
+   strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+   if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+   sizeof(struct crypto_report_comp), ))
+   goto nla_put_failure;
+   return 0;
+
+nla_put_failure:
+   return -EMSGSIZE;
+}
+#else
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+   return -ENOSYS;
+}
+#endif
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+   __attribute__ ((unused));
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+   seq_puts(m, "type : acomp\n");
+}
+
+static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
+{
+   struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+   struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+   alg->exit(acomp);
+}
+
+static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
+{
+   struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+   struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+   if (alg->exit)
+   acomp->base.exit = crypto_acomp_exit_tfm;
+
+   if (alg->init)
+   return alg->init(acomp);
+
+   return 0;
+}
+
+static const struct crypto_type crypto_acomp_type = {
+   .extsize = crypto_alg_extsize,
+   .init_tfm = crypto_acomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+   .show = crypto_acomp_show,
+#endif
+   .report = crypto_acomp_report,
+   .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+   .maskset = CRYPTO_ALG_TYPE_MASK,
+   .type = CRYPTO_ALG_TYPE_ACOMPRESS,
+   .tfmsize = offsetof(struct crypto_acomp, base),
+};
+
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+   u32 mask)
+{
+   return crypto_alloc_tfm(alg_name, _acomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+   struct crypto_alg *base = >base;
+
+   base->cra_type = _acomp_type;
+   base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+   base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
+
+   return 

[PATCH v3 3/8] crypto: acomp - add support for lzo via scomp

2016-02-24 Thread Giovanni Cabiddu
This patch implements an scomp backend for the lzo compression algorithm.
This way, lzo is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/Kconfig |1 +
 crypto/lzo.c   |  100 +++
 2 files changed, 86 insertions(+), 15 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 29ac8cb..c15070d8 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1510,6 +1510,7 @@ config CRYPTO_DEFLATE
 config CRYPTO_LZO
tristate "LZO compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select LZO_COMPRESS
select LZO_DECOMPRESS
help
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 4b3e925..01403c7 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -22,40 +22,56 @@
 #include 
 #include 
 #include 
+#include 
 
 struct lzo_ctx {
void *lzo_comp_mem;
 };
 
+static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+{
+   void *ctx;
+
+   ctx = kmalloc(LZO1X_MEM_COMPRESS,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+   if (!ctx)
+   ctx = vmalloc(LZO1X_MEM_COMPRESS);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   return ctx;
+}
+
 static int lzo_init(struct crypto_tfm *tfm)
 {
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-   ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
-   GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
-   if (!ctx->lzo_comp_mem)
-   ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
-   if (!ctx->lzo_comp_mem)
+   ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
+   if (IS_ERR(ctx->lzo_comp_mem))
return -ENOMEM;
 
return 0;
 }
 
+static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   kvfree(ctx);
+}
+
 static void lzo_exit(struct crypto_tfm *tfm)
 {
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-   kvfree(ctx->lzo_comp_mem);
+   lzo_free_ctx(NULL, ctx->lzo_comp_mem);
 }
 
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
-   unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lzo_compress(const u8 *src, unsigned int slen,
+   u8 *dst, unsigned int *dlen, void *ctx)
 {
-   struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
 
-   err = lzo1x_1_compress(src, slen, dst, _len, ctx->lzo_comp_mem);
+   err = lzo1x_1_compress(src, slen, dst, _len, ctx);
 
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,8 +80,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 
*src,
return 0;
 }
 
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
+   unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+   struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
+}
+
+static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
+unsigned int slen, u8 *dst, unsigned int *dlen,
+void *ctx)
+{
+   return __lzo_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __lzo_decompress(const u8 *src, unsigned int slen,
+   u8 *dst, unsigned int *dlen)
 {
int err;
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
@@ -77,7 +108,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 
*src,
 
*dlen = tmp_len;
return 0;
+}
 
+static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+   return __lzo_decompress(src, slen, dst, dlen);
+}
+
+static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+  unsigned int slen, u8 *dst, unsigned int *dlen,
+  void *ctx)
+{
+   return __lzo_decompress(src, slen, dst, dlen);
 }
 
 static struct crypto_alg alg = {
@@ -88,18 +131,45 @@ static struct crypto_alg alg = {
.cra_init   = lzo_init,
.cra_exit   = lzo_exit,
.cra_u  = { .compress = {
-   .coa_compress   = lzo_compress,
-   .coa_decompress = lzo_decompress } }
+   .coa_compress   = lzo_compress,
+   .coa_decompress = lzo_decompress } }
+};
+
+static struct scomp_alg scomp = {
+   .alloc_ctx  = lzo_alloc_ctx,
+   .free_ctx   = lzo_free_ctx,
+   .compress   = lzo_scompress,
+   .decompress = lzo_sdecompress,
+   .base   = {
+   .cra_name   = "lzo",
+   .cra_driver_name = 

[PATCH v3 6/8] crypto: acomp - add support for 842 via scomp

2016-02-24 Thread Giovanni Cabiddu
This patch implements an scomp backend for the 842 compression algorithm.
This way, 842 is exposed through the acomp api.

Signed-off-by: Giovanni Cabiddu 
---
 crypto/842.c   |   84 ++--
 crypto/Kconfig |1 +
 2 files changed, 82 insertions(+), 3 deletions(-)

diff --git a/crypto/842.c b/crypto/842.c
index 98e387e..f61e77c 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -31,11 +31,46 @@
 #include 
 #include 
 #include 
+#include 
 
 struct crypto842_ctx {
-   char wmem[SW842_MEM_COMPRESS];  /* working memory for compress */
+   void *wmem; /* working memory for compress */
 };
 
+static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+{
+   void *ctx;
+
+   ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+
+   return ctx;
+}
+
+static int crypto842_init(struct crypto_tfm *tfm)
+{
+   struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   ctx->wmem = crypto842_alloc_ctx(NULL);
+   if (IS_ERR(ctx->wmem))
+   return -ENOMEM;
+
+   return 0;
+}
+
+static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+   kfree(ctx);
+}
+
+static void crypto842_exit(struct crypto_tfm *tfm)
+{
+   struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+   crypto842_free_ctx(NULL, ctx->wmem);
+}
+
 static int crypto842_compress(struct crypto_tfm *tfm,
  const u8 *src, unsigned int slen,
  u8 *dst, unsigned int *dlen)
@@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm,
return sw842_compress(src, slen, dst, dlen, ctx->wmem);
 }
 
+static int crypto842_scompress(struct crypto_scomp *tfm,
+  const u8 *src, unsigned int slen,
+  u8 *dst, unsigned int *dlen, void *ctx)
+{
+   return sw842_compress(src, slen, dst, dlen, ctx);
+}
+
 static int crypto842_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
@@ -52,27 +94,63 @@ static int crypto842_decompress(struct crypto_tfm *tfm,
return sw842_decompress(src, slen, dst, dlen);
 }
 
+static int crypto842_sdecompress(struct crypto_scomp *tfm,
+const u8 *src, unsigned int slen,
+u8 *dst, unsigned int *dlen, void *ctx)
+{
+   return sw842_decompress(src, slen, dst, dlen);
+}
+
 static struct crypto_alg alg = {
.cra_name   = "842",
.cra_driver_name= "842-generic",
.cra_priority   = 100,
.cra_flags  = CRYPTO_ALG_TYPE_COMPRESS,
-   .cra_ctxsize= sizeof(struct crypto842_ctx),
.cra_module = THIS_MODULE,
+   .cra_init   = crypto842_init,
+   .cra_exit   = crypto842_exit,
.cra_u  = { .compress = {
.coa_compress   = crypto842_compress,
.coa_decompress = crypto842_decompress } }
 };
 
+static struct scomp_alg scomp = {
+   .alloc_ctx  = crypto842_alloc_ctx,
+   .free_ctx   = crypto842_free_ctx,
+   .compress   = crypto842_scompress,
+   .decompress = crypto842_sdecompress,
+   .base   = {
+   .cra_name   = "842",
+   .cra_driver_name = "842-scomp",
+   .cra_priority= 100,
+   .cra_flags   = CRYPTO_ALG_TYPE_SCOMPRESS |
+   CRYPTO_SCOMP_DECOMP_NOCTX,
+   .cra_module  = THIS_MODULE,
+   }
+};
+
 static int __init crypto842_mod_init(void)
 {
-   return crypto_register_alg();
+   int ret;
+
+   ret = crypto_register_alg();
+   if (ret)
+   return ret;
+
+   ret = crypto_register_scomp();
+   if (ret) {
+   crypto_unregister_alg();
+   return ret;
+   }
+
+   return ret;
 }
 module_init(crypto842_mod_init);
 
 static void __exit crypto842_mod_exit(void)
 {
crypto_unregister_alg();
+   crypto_unregister_scomp();
 }
 module_exit(crypto842_mod_exit);
 
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 0d43757..690e474 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1519,6 +1519,7 @@ config CRYPTO_LZO
 config CRYPTO_842
tristate "842 compression algorithm"
select CRYPTO_ALGAPI
+   select CRYPTO_ACOMP2
select 842_COMPRESS
select 842_DECOMPRESS
help
-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 0/2] KEYS: Use pkcs1pad for padding in software_pkey

2016-02-24 Thread David Howells
Tadeusz Struk  wrote:

> I have converted the software_pkey to make use of the pkcs1pad
> template. The rsa.c is reverted back to what it was i.e. just
> math primitives and all padding is done in rsa-pkcs1padd.c
> software_pkey.c just allocates pksc1padd(alg,hash)

Okay, thanks - I'll take a look at that later.

One thought that just occurred to me: would it make sense to make a second
crypto template in rsa-pkcs1pad.c and call it "rsassa-pkcs1-v1_5" that does
this padding variant?  It can share most of its code with the generic pkcs1pad
algorithm.

We could then also add a third variant "rsassa-pss" at a later date.

David
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/2] crypto: Add hash param to pkcs1pad

2016-02-24 Thread Tadeusz Struk
This adds hash param to pkcs1pad.
The pkcs1pad template can work with or without the hash.
When hash param is provided then the verify operation will
also verify the output against the known digest.

Signed-off-by: Tadeusz Struk 
---
 crypto/rsa-pkcs1pad.c |  182 ++---
 1 file changed, 156 insertions(+), 26 deletions(-)

diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 50f5c97..1cea67d 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -18,12 +18,89 @@
 #include 
 #include 
 
+/*
+ * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
+ */
+static const u8 rsa_digest_info_md5[] = {
+   0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
+   0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
+   0x05, 0x00, 0x04, 0x10
+};
+
+static const u8 rsa_digest_info_sha1[] = {
+   0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+   0x2b, 0x0e, 0x03, 0x02, 0x1a,
+   0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 rsa_digest_info_rmd160[] = {
+   0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+   0x2b, 0x24, 0x03, 0x02, 0x01,
+   0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 rsa_digest_info_sha224[] = {
+   0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
+   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
+   0x05, 0x00, 0x04, 0x1c
+};
+
+static const u8 rsa_digest_info_sha256[] = {
+   0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
+   0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 rsa_digest_info_sha384[] = {
+   0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
+   0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 rsa_digest_info_sha512[] = {
+   0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
+   0x05, 0x00, 0x04, 0x40
+};
+
+static const struct rsa_asn1_template {
+   const char  *name;
+   const u8*data;
+   size_t  size;
+} rsa_asn1_templates[] = {
+#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
+   _(md5),
+   _(sha1),
+   _(rmd160),
+   _(sha256),
+   _(sha384),
+   _(sha512),
+   _(sha224),
+   { NULL }
+#undef _
+};
+
+static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
+{
+   const struct rsa_asn1_template *p;
+
+   for (p = rsa_asn1_templates; p->name; p++)
+   if (strcmp(name, p->name) == 0)
+   return p;
+   return NULL;
+}
+
 struct pkcs1pad_ctx {
struct crypto_akcipher *child;
-
+   const char *hash_name;
unsigned int key_size;
 };
 
+struct pkcs1pad_inst_ctx {
+   struct crypto_akcipher_spawn spawn;
+   const char *hash_name;
+};
+
 struct pkcs1pad_request {
struct akcipher_request child_req;
 
@@ -339,13 +416,22 @@ static int pkcs1pad_sign(struct akcipher_request *req)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+   const struct rsa_asn1_template *digest_info = NULL;
int err;
-   unsigned int ps_end;
+   unsigned int ps_end, digest_size = 0;
 
if (!ctx->key_size)
return -EINVAL;
 
-   if (req->src_len > ctx->key_size - 11)
+   if (ctx->hash_name) {
+   digest_info = rsa_lookup_asn1(ctx->hash_name);
+   if (!digest_info)
+   return -EINVAL;
+
+   digest_size = digest_info->size;
+   }
+
+   if (req->src_len + digest_size > ctx->key_size - 11)
return -EOVERFLOW;
 
if (req->dst_len < ctx->key_size) {
@@ -371,11 +457,16 @@ static int pkcs1pad_sign(struct akcipher_request *req)
if (!req_ctx->in_buf)
return -ENOMEM;
 
-   ps_end = ctx->key_size - req->src_len - 2;
+   ps_end = ctx->key_size - digest_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x01;
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
 
+   if (digest_info) {
+   memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
+  digest_info->size);
+   }
+
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
 
@@ -408,6 +499,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request 
*req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+   const struct rsa_asn1_template *digest_info;
unsigned int pos;
 
if (err == -EOVERFLOW)
@@ -422,20 +514,33 @@ static int pkcs1pad_verify_complete(struct 
akcipher_request *req, int 

[PATCH 2/2] crypto: remove padding logic from rsa.c

2016-02-24 Thread Tadeusz Struk
This reverts back the rsa.c to do the math primitives only.
It also reverts the akcipher api changes as the hash param
will be passed to the rsa-pkcs1 template.
All padding and encoding logic is moved to the rsa-pkcs1pad.
The software_pkey.c uses pkcs1pad template to allocate the akcipher
and the hash param is passed via pksc1pad.

Signed-off-by: Tadeusz Struk 
---
 crypto/asymmetric_keys/software_pkey.c |   28 
 crypto/rsa.c   |  210 +---
 crypto/testmgr.c   |5 -
 include/crypto/akcipher.h  |7 -
 4 files changed, 56 insertions(+), 194 deletions(-)

diff --git a/crypto/asymmetric_keys/software_pkey.c 
b/crypto/asymmetric_keys/software_pkey.c
index 8732a41..69693fd 100644
--- a/crypto/asymmetric_keys/software_pkey.c
+++ b/crypto/asymmetric_keys/software_pkey.c
@@ -75,6 +75,9 @@ int software_pkey_verify_signature(const struct software_pkey 
*pkey,
struct crypto_akcipher *tfm;
struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg;
+   char alg_name[CRYPTO_MAX_ALG_NAME];
+   void *output;
+   unsigned int outlen;
int ret = -ENOMEM;
 
pr_devel("==>%s()\n", __func__);
@@ -84,7 +87,11 @@ int software_pkey_verify_signature(const struct 
software_pkey *pkey,
BUG_ON(!sig->digest);
BUG_ON(!sig->s);
 
-   tfm = crypto_alloc_akcipher(sig->pkey_algo, 0, 0);
+   if (snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
+sig->pkey_algo, sig->hash_algo) >= CRYPTO_MAX_ALG_NAME)
+   return -EINVAL;
+
+   tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
 
@@ -96,11 +103,15 @@ int software_pkey_verify_signature(const struct 
software_pkey *pkey,
if (ret)
goto error_free_req;
 
+   outlen = crypto_akcipher_maxsize(tfm);
+   output = kmalloc(outlen, GFP_KERNEL);
+   if (!output)
+   goto error_free_req;
+
sg_init_one(_sg, sig->s, sig->s_size);
-   sg_init_one(_sg, sig->digest, sig->digest_size);
-   akcipher_request_set_crypt(req, _sg, _sg,
-  sig->s_size, sig->digest_size,
-  sig->hash_algo);
+   sg_init_one(_sg, output, outlen);
+   akcipher_request_set_crypt(req, _sg, _sg, sig->s_size,
+  outlen);
init_completion();
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
  CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -112,6 +123,13 @@ int software_pkey_verify_signature(const struct 
software_pkey *pkey,
ret = compl.err;
}
 
+   if (!ret) {
+   if (memcmp(sig->digest, output, sig->digest_size) ||
+   req->dst_len != sig->digest_size)
+   ret = -EBADMSG;
+   }
+
+   kfree(output);
 error_free_req:
akcipher_request_free(req);
 error_free_tfm:
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 9a7c9ca..77d737f 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -16,78 +16,6 @@
 #include 
 
 /*
- * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
- */
-static const u8 rsa_digest_info_md5[] = {
-   0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
-   0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
-   0x05, 0x00, 0x04, 0x10
-};
-
-static const u8 rsa_digest_info_sha1[] = {
-   0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
-   0x2b, 0x0e, 0x03, 0x02, 0x1a,
-   0x05, 0x00, 0x04, 0x14
-};
-
-static const u8 rsa_digest_info_rmd160[] = {
-   0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
-   0x2b, 0x24, 0x03, 0x02, 0x01,
-   0x05, 0x00, 0x04, 0x14
-};
-
-static const u8 rsa_digest_info_sha224[] = {
-   0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
-   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
-   0x05, 0x00, 0x04, 0x1c
-};
-
-static const u8 rsa_digest_info_sha256[] = {
-   0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
-   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
-   0x05, 0x00, 0x04, 0x20
-};
-
-static const u8 rsa_digest_info_sha384[] = {
-   0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
-   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
-   0x05, 0x00, 0x04, 0x30
-};
-
-static const u8 rsa_digest_info_sha512[] = {
-   0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
-   0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
-   0x05, 0x00, 0x04, 0x40
-};
-
-static const struct rsa_asn1_template {
-   const char  *name;
-   const u8*data;
-   size_t  size;
-} rsa_asn1_templates[] = {
-#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
-   _(md5),
-   _(sha1),
-   _(rmd160),
-   _(sha256),
-   _(sha384),
-   _(sha512),
-   _(sha224),
-   { NULL }
-#undef _
-};
-
-static const struct 

Re: [PATCH] Re: Broken userspace crypto in linux-4.1.18

2016-02-24 Thread Greg KH
On Wed, Feb 24, 2016 at 09:54:48AM +0100, Milan Broz wrote:
> On 02/24/2016 09:32 AM, Jiri Slaby wrote:
> >> +  af_alg_release_parent(sk);
> > 
> > and this occurs to me like a double release?
> 
> yes, my copy mistake.

Which is why I want the real patches backported please.  Whenever we do
a "just this smaller patch" for a stable kernel, it is ALWAYS wrong.

Please backport the patches in a correct way so that we can apply
them...

thanks,

greg k-h
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] X.509: Fix test for self-signed certificate

2016-02-24 Thread David Howells
Hi Michal,

I have the attached patch already in my queue.

David
---
commit d19fcb825912c67e09e0575b95accaa42899e07f
Author: David Howells 
Date:   Wed Feb 24 14:37:54 2016 +

X.509: Don't treat self-signed keys specially

Trust for a self-signed certificate can normally only be determined by
whether we obtained it from a trusted location (ie. it was built into the
kernel at compile time), so there's not really any point in checking it -
we could verify that the signature is valid, but it doesn't really tell us
anything if the signature checks out.

However, there's a bug in the code determining whether a certificate is
self-signed or not - if they have neither AKID nor SKID then we just assume
that the cert is self-signed, which may not be true.

Given this, remove the code that treats self-signed certs specially when it
comes to evaluating trustability and attempt to evaluate them as ordinary
signed certificates.  We then expect self-signed certificates to fail the
trustability check and be marked as untrustworthy in x509_key_preparse().

Note that there is the possibility of the trustability check on a
self-signed cert then succeeding.  This is most likely to happen when a
duplicate of the certificate is already on the trust keyring - in which
case it shouldn't be a problem.

Signed-off-by: David Howells 
Acked-by: Mimi Zohar 
cc: David Woodhouse 

diff --git a/crypto/asymmetric_keys/x509_public_key.c 
b/crypto/asymmetric_keys/x509_public_key.c
index 9e9e5a6a9ed6..fd76eca902b8 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -255,6 +255,9 @@ static int x509_validate_trust(struct x509_certificate 
*cert,
struct key *key;
int ret = 1;
 
+   if (!cert->akid_id || !cert->akid_skid)
+   return 1;
+
if (!trust_keyring)
return -EOPNOTSUPP;
 
@@ -312,19 +315,23 @@ static int x509_key_preparse(struct key_preparsed_payload 
*prep)
cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
cert->pub->id_type = PKEY_ID_X509;
 
-   /* Check the signature on the key if it appears to be self-signed */
-   if ((!cert->akid_skid && !cert->akid_id) ||
-   asymmetric_key_id_same(cert->skid, cert->akid_skid) ||
-   asymmetric_key_id_same(cert->id, cert->akid_id)) {
-   ret = x509_check_signature(cert->pub, cert); /* self-signed */
-   if (ret < 0)
-   goto error_free_cert;
-   } else if (!prep->trusted) {
+   /* See if we can derive the trustability of this certificate.
+*
+* When it comes to self-signed certificates, we cannot evaluate
+* trustedness except by the fact that we obtained it from a trusted
+* location.  So we just rely on x509_validate_trust() failing in this
+* case.
+*
+* Note that there's a possibility of a self-signed cert matching a
+* cert that we have (most likely a duplicate that we already trust) -
+* in which case it will be marked trusted.
+*/
+   if (!prep->trusted) {
ret = x509_validate_trust(cert, get_system_trusted_keyring());
if (ret)
ret = x509_validate_trust(cert, get_ima_mok_keyring());
if (!ret)
-   prep->trusted = 1;
+   prep->trusted = true;
}
 
/* Propose a description */
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Re: Broken userspace crypto in linux-4.1.18

2016-02-24 Thread Milan Broz
On 02/24/2016 09:32 AM, Jiri Slaby wrote:
>> +af_alg_release_parent(sk);
> 
> and this occurs to me like a double release?

yes, my copy mistake.

Anyway, there should be also two more patches from series.

If it helps, I copied proper backport here (upstream commit is referenced in 
header)
https://mbroz.fedorapeople.org/tmp/4.1/

Older kernel probably need some more changes but I hope these should be trivial.

Thanks,
Milan
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] Re: Broken userspace crypto in linux-4.1.18

2016-02-24 Thread Jiri Slaby
On 02/21/2016, 05:40 PM, Milan Broz wrote:
> On 02/20/2016 03:33 PM, Thomas D. wrote:
>> Hi,
>>
>> FYI: v3.10.97, v3.14.61 and 3.18.27 are also affected.
>>
>> v4.3.6 works. Looks like the patch set is only compatible with >=linux-4.3.
>>
>> v3.12.54 works because it doesn't contain the patch in question.
> 
> Hi,
> 
> indeed, because whoever backported this patchset skipped two patches
> from series (because of skcipher interface file was introduced later).
> 
> I tried to backport it (on 4.1.18 tree, see patch below) and it fixes the 
> problem
> for me.
> 
> Anyway, it is just quick test what is the problem, patch need proper review or
> backport from someone who knows the code better.
> 
> I will release stable cryptsetup soon that will work with new kernel even 
> without
> this patch but I would strongly recommend that stable kernels get these 
> compatibility
> backports as well to avoid breaking existing userspace.
> 
> Thanks,
> Milan
> -
> 
> This patch backports missing parts of crypto API compatibility changes:
> 
>   dd504589577d8e8e70f51f997ad487a4cb6c026f
>   crypto: algif_skcipher - Require setkey before accept(2)
> 
>   a0fa2d037129a9849918a92d91b79ed6c7bd2818
>   crypto: algif_skcipher - Add nokey compatibility path
> 
> --- crypto/algif_skcipher.c.orig  2016-02-21 16:44:27.172312990 +0100
> +++ crypto/algif_skcipher.c   2016-02-21 17:03:58.555785347 +0100
...
> @@ -790,24 +912,50 @@
>   af_alg_release_parent(sk);

This,

>  }
>  
> -static int skcipher_accept_parent(void *private, struct sock *sk)
> +static void skcipher_sock_destruct(struct sock *sk)
> +{
> + skcipher_sock_destruct_common(sk);
> + af_alg_release_parent(sk);

this,

> +}
> +
> +static void skcipher_release_parent_nokey(struct sock *sk)
> +{
> + struct alg_sock *ask = alg_sk(sk);
> +
> + if (!ask->refcnt) {
> + sock_put(ask->parent);
> + return;
> + }
> +
> + af_alg_release_parent(sk);

and this occurs to me like a double release?

thanks,
-- 
js
suse labs
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html