Re: [PATCH v2 6/7] crypto: omap-aes: Add support for GCM mode

2015-07-08 Thread Lokesh Vutla
On Wednesday 08 July 2015 01:23 PM, Herbert Xu wrote:
 On Wed, Jul 08, 2015 at 03:48:05PM +0800, Herbert Xu wrote:
 On Wed, Jul 08, 2015 at 12:29:47PM +0530, Lokesh Vutla wrote:

 + if (req-assoclen + req-cryptlen == 0) {
 + scatterwalk_map_and_copy(ctx-auth_tag, req-dst, 0, authlen,
 +  1);
 + return 0;
 + }

 How can this be right? Did you enable the selftest?
 Why not? Self tests are passed for this case.

 As per the equation given in GCM spec[1], we can see that
 if assoclen and cryptlen is 0, then output of GCM  is just E(K, Y0)
 where Y0 = IV||(0^31)1
 I have E(K, Y0) calculated in previous step. And copying it
 to destination if assoclen and cryptlen is 0.

 Correct me if I am wrong.

 It should be E(K, Y0) ^ GHASH(0).  So unless GHASH(0) == 0, your
 code doesn't work.
 
 OK, GHASH(0) is indeed zero so I guess your code does work after
 all.
Sorry. I did not see this message and replied on the other thread.

Thanks and regards,
Lokesh
 
 Cheers,
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 6/7] crypto: omap-aes: Add support for GCM mode

2015-07-08 Thread Lokesh Vutla
On Wednesday 08 July 2015 01:18 PM, Herbert Xu wrote:
 On Wed, Jul 08, 2015 at 12:29:47PM +0530, Lokesh Vutla wrote:

 +  if (req-assoclen + req-cryptlen == 0) {
 +  scatterwalk_map_and_copy(ctx-auth_tag, req-dst, 0, authlen,
 +   1);
 +  return 0;
 +  }

 How can this be right? Did you enable the selftest?
 Why not? Self tests are passed for this case.

 As per the equation given in GCM spec[1], we can see that
 if assoclen and cryptlen is 0, then output of GCM  is just E(K, Y0)
 where Y0 = IV||(0^31)1
 I have E(K, Y0) calculated in previous step. And copying it
 to destination if assoclen and cryptlen is 0.

 Correct me if I am wrong.
 
 It should be E(K, Y0) ^ GHASH(0).  So unless GHASH(0) == 0, your
 code doesn't work.
Yes, thats right. I have considered that.
So, we need GHASH(H, {}, {}).
As per the spec, 
GHASH(H, A, C) = X(m + n + 1). 
m = n = 0 in our case.

X0 = 0
X1 = (X(m  + n) ^ (len(A) || len(C)) . H
X1 = 0 . H  (GF(128) Multiplication)
X1 = 0  

The same thing is given in the Test case 1 of the spec. GHASH(H, {}, {}) = 0

Thanks and regards,
Lokesh

 
 Cheers,
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: crypto: nx - Fix reentrancy bugs

2015-07-08 Thread Leonidas S. Barbosa
 PS it would appear that you can make this completely reentrant
 by moving all the data from the tfm into the reqeust.  Could you
 work on this and remove the spinlock completely?

Yes.

Thank you!

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


crypto: nx/842 - Fix context corruption

2015-07-08 Thread Herbert Xu
The transform context is shared memory and must not be written
to without locking.  This patch adds locking to nx-842 to prevent
context corruption.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au

diff --git a/drivers/crypto/nx/nx-842-crypto.c 
b/drivers/crypto/nx/nx-842-crypto.c
index d53a1dc..3288a70 100644
--- a/drivers/crypto/nx/nx-842-crypto.c
+++ b/drivers/crypto/nx/nx-842-crypto.c
@@ -60,6 +60,7 @@
 #include linux/vmalloc.h
 #include linux/sw842.h
 #include linux/ratelimit.h
+#include linux/spinlock.h
 
 #include nx-842.h
 
@@ -125,6 +126,8 @@ static int update_param(struct nx842_crypto_param *p,
 }
 
 struct nx842_crypto_ctx {
+   spinlock_t lock;
+
u8 *wmem;
u8 *sbounce, *dbounce;
 
@@ -136,6 +139,7 @@ static int nx842_crypto_init(struct crypto_tfm *tfm)
 {
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
 
+   spin_lock_init(ctx-lock);
ctx-wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
ctx-sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx-dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
@@ -315,6 +319,8 @@ static int nx842_crypto_compress(struct crypto_tfm *tfm,
   DIV_ROUND_UP(p.iremain, c.maximum));
hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
 
+   spin_lock_bh(ctx-lock);
+
/* skip adding header if the buffers meet all constraints */
add_header = (p.iremain % c.multiple||
  p.iremain  c.minimum ||
@@ -331,8 +337,9 @@ static int nx842_crypto_compress(struct crypto_tfm *tfm,
 
while (p.iremain  0) {
n = hdr-groups++;
+   ret = -ENOSPC;
if (hdr-groups  NX842_CRYPTO_GROUP_MAX)
-   return -ENOSPC;
+   goto unlock;
 
/* header goes before first group */
h = !n  add_header ? hdrsize : 0;
@@ -342,12 +349,13 @@ static int nx842_crypto_compress(struct crypto_tfm *tfm,
 
ret = compress(ctx, p, hdr-group[n], c, ignore, h);
if (ret)
-   return ret;
+   goto unlock;
}
 
if (!add_header  hdr-groups  1) {
pr_err(Internal error: No header but multiple groups\n);
-   return -EINVAL;
+   ret = -EINVAL;
+   goto unlock;
}
 
/* ignore indicates the input stream needed to be padded */
@@ -358,13 +366,15 @@ static int nx842_crypto_compress(struct crypto_tfm *tfm,
if (add_header)
ret = nx842_crypto_add_header(hdr, dst);
if (ret)
-   return ret;
+   goto unlock;
 
*dlen = p.ototal;
 
pr_debug(compress total slen %x dlen %x\n, slen, *dlen);
 
-   return 0;
+unlock:
+   spin_unlock_bh(ctx-lock);
+   return ret;
 }
 
 static int decompress(struct nx842_crypto_ctx *ctx,
@@ -494,6 +504,8 @@ static int nx842_crypto_decompress(struct crypto_tfm *tfm,
 
hdr = (struct nx842_crypto_header *)src;
 
+   spin_lock_bh(ctx-lock);
+
/* If it doesn't start with our header magic number, assume it's a raw
 * 842 compressed buffer and pass it directly to the hardware driver
 */
@@ -506,26 +518,31 @@ static int nx842_crypto_decompress(struct crypto_tfm *tfm,
 
ret = decompress(ctx, p, g, c, 0, usehw);
if (ret)
-   return ret;
+   goto unlock;
 
*dlen = p.ototal;
 
-   return 0;
+   ret = 0;
+   goto unlock;
}
 
if (!hdr-groups) {
pr_err(header has no groups\n);
-   return -EINVAL;
+   ret = -EINVAL;
+   goto unlock;
}
if (hdr-groups  NX842_CRYPTO_GROUP_MAX) {
pr_err(header has too many groups %x, max %x\n,
   hdr-groups, NX842_CRYPTO_GROUP_MAX);
-   return -EINVAL;
+   ret = -EINVAL;
+   goto unlock;
}
 
hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr-groups);
-   if (hdr_len  slen)
-   return -EOVERFLOW;
+   if (hdr_len  slen) {
+   ret = -EOVERFLOW;
+   goto unlock;
+   }
 
memcpy(ctx-header, src, hdr_len);
hdr = ctx-header;
@@ -537,14 +554,19 @@ static int nx842_crypto_decompress(struct crypto_tfm *tfm,
 
ret = decompress(ctx, p, hdr-group[n], c, ignore, usehw);
if (ret)
-   return ret;
+   goto unlock;
}
 
*dlen = p.ototal;
 
pr_debug(decompress total slen %x dlen %x\n, slen, *dlen);
 
-   return 0;
+   ret = 0;
+
+unlock:
+   spin_unlock_bh(ctx-lock);
+
+   return ret;
 }
 
 static struct crypto_alg alg = {
-- 
Email: Herbert Xu herb...@gondor.apana.org.au
Home Page: http://gondor.apana.org.au/~herbert/

Re: akcipher: continuous memory for input/output

2015-07-08 Thread Herbert Xu
On Wed, Jul 08, 2015 at 05:27:15PM +0200, Stephan Mueller wrote:
 
 But according to Tadeusz it may be viable to register hybrid asym ciphers 
 with 
 the akcipher API. If there is a full blown hybrid asym cipher we have to 
 handle potentially large chunks of data. I am now wondering whether a flat 
 buffer still makes sense or whether the akcipher should be converted to 
 scatter lists as all the other cipher types.
 
 If it shall stay as is, how would we be handling larger buffers?

What is a hybrid cipher and why would we want to have it in the
kernel?

Cheers,
-- 
Email: Herbert Xu herb...@gondor.apana.org.au
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


akcipher: continuous memory for input/output

2015-07-08 Thread Stephan Mueller
Hi Herbert, Tadeusz,

I looked a bit further into the akcipher code to see how we can add an AF_ALG 
there.

What currently wonders me is the akcipher_request_set_crypt function: the 
memory for input/output buffers is *no* scatter list but a plain buffer. I 
think I can understand that for raw RSA operations as the input/output is not 
overly large.

But according to Tadeusz it may be viable to register hybrid asym ciphers with 
the akcipher API. If there is a full blown hybrid asym cipher we have to 
handle potentially large chunks of data. I am now wondering whether a flat 
buffer still makes sense or whether the akcipher should be converted to 
scatter lists as all the other cipher types.

If it shall stay as is, how would we be handling larger buffers?

Thanks
Stephan
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: akcipher: continuous memory for input/output

2015-07-08 Thread Stephan Mueller
Am Mittwoch, 8. Juli 2015, 23:29:32 schrieb Herbert Xu:

Hi Herbert,

On Wed, Jul 08, 2015 at 05:27:15PM +0200, Stephan Mueller wrote:
 But according to Tadeusz it may be viable to register hybrid asym ciphers
 with the akcipher API. If there is a full blown hybrid asym cipher we have
 to handle potentially large chunks of data. I am now wondering whether a
 flat buffer still makes sense or whether the akcipher should be converted
 to scatter lists as all the other cipher types.
 
 If it shall stay as is, how would we be handling larger buffers?

What is a hybrid cipher and why would we want to have it in the
kernel?

Please consider the thread from June 25 between Tadeusz and me. With hybrid 
cipher, I am referring to the asym + sym cipher combo to implement a real 
complete asymmetric encryption/decryption. Or I am referring to the asym + 
hash combo to implement a real signature generation/verification.

Tadeusz' rsa.c implements the raw RSA modular exponentiation operation. But 
that does not make a usable encryption or signature operation. For example, 
the kernel module signature verification illustrates that very nicely:

mod_verify_sig():
...
/* hash the code */
pks = mod_make_digest(ms.hash, mod, modlen);
...
/* RSA verify the hash */
ret = verify_signature(key, pks);

What I am wondering is that whether such hybrid operation (hash+asym or 
sym+asym) may be entirely implemented in hardware. If you tell me that this 
will never be the case, I will stop asking and would be happy with the plain 
buffer. :-)

But IIRC, there are systems out there which perform a full hybrid asym 
operation. Without having checked in detail, I believe this is true for IBM 
cryptoexpress or Cavium cards.


The reason for having it in the kernel is what Tadeusz describes: the kernel 
does not need asym operations. But the hardware is only accessible from kernel 
space. With this, the kernel crypto API together with a yet to be written 
AF_ALG akcipher would make the asym hardware available to user space.

Ciao
Stephan
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 00/10] crypto: x86_64 - Add SSE/AVX2 ChaCha20/Poly1305 ciphers

2015-07-08 Thread Herbert Xu
On Wed, Jul 08, 2015 at 10:36:23PM +0200, Martin Willi wrote:
 
 I get less constant numbers between different runs when using sec=0,
 hence I've used sec=1. Below are the numbers of average runs for the
 AEAD measuring cycles; I'll use cycles in the individual patch notes in
 a v2.

If you're going to use sec you need to use at least 10 in order
for it to be meaningful as shorter values often result in bogus
numbers.  Of course sec=10 takes a long time which is why cycles
is the preferred method.

What sort of variance do you see with cycles? Do you get the
same variance for other algorithms, e.g., cbc/aes?

Thanks,
-- 
Email: Herbert Xu herb...@gondor.apana.org.au
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 00/10] crypto: x86_64 - Add SSE/AVX2 ChaCha20/Poly1305 ciphers

2015-07-08 Thread Martin Willi
Herbert,

 Running the speed test with sec=1 makes no sense because it's
 too short.  Please use sec=0 and count cycles instead.

I get less constant numbers between different runs when using sec=0,
hence I've used sec=1. Below are the numbers of average runs for the
AEAD measuring cycles; I'll use cycles in the individual patch notes in
a v2.

Kind regards
Martin

--

generic:
testing speed of rfc7539esp(chacha20,poly1305) 
(rfc7539esp(chacha20-generic,poly1305-generic)) encryption
test 0 (288 bit key, 16 byte blocks): 1 operation in 9444 cycles (16 bytes)
test 1 (288 bit key, 64 byte blocks): 1 operation in 10692 cycles (64 bytes)
test 2 (288 bit key, 256 byte blocks): 1 operation in 18299 cycles (256 bytes)
test 3 (288 bit key, 512 byte blocks): 1 operation in 26952 cycles (512 bytes)
test 4 (288 bit key, 1024 byte blocks): 1 operation in 48493 cycles (1024 bytes)
test 5 (288 bit key, 2048 byte blocks): 1 operation in 83766 cycles (2048 bytes)
test 6 (288 bit key, 4096 byte blocks): 1 operation in 150899 cycles (4096 
bytes)
test 7 (288 bit key, 8192 byte blocks): 1 operation in 296779 cycles (8192 
bytes)

SSE2/SSSE3:
testing speed of rfc7539esp(chacha20,poly1305) 
(rfc7539esp(chacha20-simd,poly1305-simd)) encryption
test 0 (288 bit key, 16 byte blocks): 1 operation in 9814 cycles (16 bytes)
test 1 (288 bit key, 64 byte blocks): 1 operation in 9998 cycles (64 bytes)
test 2 (288 bit key, 256 byte blocks): 1 operation in 12442 cycles (256 bytes)
test 3 (288 bit key, 512 byte blocks): 1 operation in 20321 cycles (512 bytes)
test 4 (288 bit key, 1024 byte blocks): 1 operation in 21098 cycles (1024 bytes)
test 5 (288 bit key, 2048 byte blocks): 1 operation in 33423 cycles (2048 bytes)
test 6 (288 bit key, 4096 byte blocks): 1 operation in 55183 cycles (4096 bytes)
test 7 (288 bit key, 8192 byte blocks): 1 operation in 102514 cycles (8192 
bytes)

AVX2:
testing speed of rfc7539esp(chacha20,poly1305) 
(rfc7539esp(chacha20-simd,poly1305-simd)) encryption
test 0 (288 bit key, 16 byte blocks): 1 operation in 9883 cycles (16 bytes)
test 1 (288 bit key, 64 byte blocks): 1 operation in 10891 cycles (64 bytes)
test 2 (288 bit key, 256 byte blocks): 1 operation in 12467 cycles (256 bytes)
test 3 (288 bit key, 512 byte blocks): 1 operation in 13538 cycles (512 bytes)
test 4 (288 bit key, 1024 byte blocks): 1 operation in 16783 cycles (1024 bytes)
test 5 (288 bit key, 2048 byte blocks): 1 operation in 23161 cycles (2048 bytes)
test 6 (288 bit key, 4096 byte blocks): 1 operation in 37359 cycles (4096 bytes)
test 7 (288 bit key, 8192 byte blocks): 1 operation in 64670 cycles (8192 bytes)


--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 6/7] crypto: omap-aes: Add support for GCM mode

2015-07-08 Thread Lokesh Vutla
On Wednesday 08 July 2015 09:48 AM, Herbert Xu wrote:
 On Tue, Jul 07, 2015 at 09:01:48PM +0530, Lokesh Vutla wrote:

 +static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
 + struct aead_request *req)
 +{
 +void *buf_in;
 +int pages, alen, clen, cryptlen, nsg;
 +struct crypto_aead *aead = crypto_aead_reqtfm(req);
 +unsigned int authlen = crypto_aead_authsize(aead);
 +u32 dec = !(dd-flags  FLAGS_ENCRYPT);
 +struct scatterlist *input, *assoc, tmp[2];
 +
 +alen = ALIGN(req-assoclen, AES_BLOCK_SIZE);
 +cryptlen = req-cryptlen - (dec * authlen);
 +clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
 +
 +dd-sgs_copied = 0;
 +
 +nsg = !!(req-assoclen  req-cryptlen);
 +
 +assoc = req-src[0];
 +sg_init_table(dd-in_sgl, nsg + 1);
 +if (req-assoclen) {
 +if (omap_aes_check_aligned(assoc, req-assoclen)) {
 +dd-sgs_copied |= AES_ASSOC_DATA_COPIED;
 +pages = get_order(alen);
 +buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
 +if (!buf_in) {
 +pr_err(Couldn't allocate for unaligncases.\n);
 +return -1;
 +}
 +
 +scatterwalk_map_and_copy(buf_in, assoc, 0,
 + req-assoclen, 0);
 +memset(buf_in + req-assoclen, 0, alen - req-assoclen);
 +} else {
 +buf_in = sg_virt(req-assoc);
 
 req-assoc is now obsolete. Did you test this code?
Sorry, I missed it. Ill update.

 
 +static int do_encrypt_iv(struct aead_request *req, u32 *tag)
 +{
 +struct scatterlist iv_sg;
 +struct ablkcipher_request *ablk_req;
 +struct crypto_ablkcipher *tfm;
 +struct tcrypt_result result;
 +struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 +int ret = 0;
 +
 +tfm = crypto_alloc_ablkcipher(ctr(aes), 0, 0);
 
 Ugh, you cannot allocate crypto transforms in the data path.  You
 should allocate it in init instead.  Also using ctr(aes) is overkill.
 Just use aes and do the xor by hand.
Ill take care of this.
 
 +static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
 +{
 +struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 +struct omap_aes_reqctx *rctx = aead_request_ctx(req);
 +struct crypto_aead *aead = crypto_aead_reqtfm(req);
 +unsigned int authlen = crypto_aead_authsize(aead);
 +struct omap_aes_dev *dd;
 +__be32 counter = cpu_to_be32(1);
 +int err;
 +
 +memset(ctx-auth_tag, 0, sizeof(ctx-auth_tag));
 
 The ctx is shared memory and you must not write to it as multiple
 requests can be called on the same tfm.  Use rctx instead.
 
 +memcpy(req-iv + 12, counter, 4);
 
 The IV is only 12 bytes long so you're corrupting memory here.
 You should use rctx here too.
Ok, Ill use rctx. Thanks for pointing.

 
 +if (req-assoclen + req-cryptlen == 0) {
 +scatterwalk_map_and_copy(ctx-auth_tag, req-dst, 0, authlen,
 + 1);
 +return 0;
 +}
 
 How can this be right? Did you enable the selftest?
Why not? Self tests are passed for this case.

As per the equation given in GCM spec[1], we can see that
if assoclen and cryptlen is 0, then output of GCM  is just E(K, Y0)
where Y0 = IV||(0^31)1
I have E(K, Y0) calculated in previous step. And copying it
to destination if assoclen and cryptlen is 0.

Correct me if I am wrong.

Thanks and regards,
Lokesh

[1] 
http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf

 
 Cheers,
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 0/7] crypto: omap-aes: Add support for GCM mode

2015-07-08 Thread Herbert Xu
On Tue, Jul 07, 2015 at 09:01:42PM +0530, Lokesh Vutla wrote:
 This series does some basic cleanup and adds support for
 AES GCM mode for omap aes driver.
 
 Changes since v1:
 - Switched GCM to new AEAD interface

Patches 1-4 and 7 applied.

Cheers,
-- 
Email: Herbert Xu herb...@gondor.apana.org.au
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2 5/7] crypto: aead: Add aead_request_cast() api

2015-07-08 Thread Lokesh Vutla
On Wednesday 08 July 2015 09:26 AM, Herbert Xu wrote:
 On Tue, Jul 07, 2015 at 09:01:47PM +0530, Lokesh Vutla wrote:
 Add aead_request_cast() api to get pointer to aead_request
 from cryto_async_request.

 Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
 ---
  include/crypto/internal/aead.h | 6 ++
  1 file changed, 6 insertions(+)

 diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
 index 4b25471..0423fa5 100644
 --- a/include/crypto/internal/aead.h
 +++ b/include/crypto/internal/aead.h
 @@ -157,6 +157,12 @@ static inline unsigned int 
 crypto_aead_maxauthsize(struct crypto_aead *aead)
  return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
  }
  
 +static inline struct aead_request *aead_request_cast(
 +struct crypto_async_request *req)
 +{
 +return container_of(req, struct aead_request, base);
 +}
 
 Please drop this and use the aead_queue interface I just posted.
Okay will update in next version

Thanks and regards,
Lokesh
 
 Thanks,
 

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 13/14] crypto: caam - Use new IV convention

2015-07-08 Thread Herbert Xu
This patch converts rfc4106 to the new calling convention where
the IV is now part of the AD and needs to be skipped.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 drivers/crypto/caam/caamalg.c |   75 +++---
 1 file changed, 49 insertions(+), 26 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index daca933..3c50a50 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -87,8 +87,8 @@
 #define DESC_GCM_DEC_LEN   (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
 
 #define DESC_RFC4106_BASE  (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN   (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN   (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN   (DESC_RFC4106_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN   (DESC_RFC4106_BASE + 12 * CAAM_CMD_SZ)
 
 #define DESC_RFC4543_BASE  (3 * CAAM_CMD_SZ)
 #define DESC_RFC4543_ENC_LEN   (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
@@ -976,29 +976,32 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx-class1_alg_type |
 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-   append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+   append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-   /* Skip assoc data */
-   append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
-   /* cryptlen = seqoutlen - assoclen */
-   append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+   /* Skip IV */
+   append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 
-   /* Write encrypted data */
-   append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
/* Read payload data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
 
+   /* Skip assoc data */
+   append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+   /* cryptlen = seqoutlen - assoclen */
+   append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+   /* Write encrypted data */
+   append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
/* Write ICV */
append_seq_store(desc, ctx-authsize, LDST_CLASS_1_CCB |
 LDST_SRCDST_BYTE_CONTEXT);
@@ -1044,29 +1047,32 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx-class1_alg_type |
 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-   append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+   append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-   /* Skip assoc data */
-   append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
-   /* Will write cryptlen bytes */
-   append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+   /* Skip IV */
+   append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
/* Will read cryptlen bytes */
-   append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-   /* Store payload data */
-   append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+   append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
 
/* Read encrypted data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
 
+   /* Skip assoc data */
+   append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+   /* Will write cryptlen bytes */
+   append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+   /* Store payload data */
+   append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
/* Read ICV */
append_seq_fifo_load(desc, ctx-authsize, FIFOLD_CLASS_CLASS1 |
 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
@@ -2685,6 +2691,14 @@ static int gcm_encrypt(struct aead_request *req)
return ret;
 }
 
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+   if (req-assoclen  8)
+   return -EINVAL;
+
+   return 

[PATCH 6/14] crypto: seqiv - Replace seqniv with seqiv

2015-07-08 Thread Herbert Xu
This patch replaces the seqniv generator with seqiv when the
underlying algorithm understands the new calling convention.

This not only makes more sense as now seqiv is solely responsible
for IV generation rather than also determining how the IV is going
to be used, it also allows for optimisations in the underlying
implementation.  For example, the space for the IV could be used
to add padding for authentication.

This patch also removes the unnecessary copying of IV to dst
during seqiv decryption as the IV is part of the AD and not cipher
text.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/seqiv.c |   34 +++---
 1 file changed, 19 insertions(+), 15 deletions(-)

diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 122c56e..45d0563 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -467,9 +467,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
aead_request_set_ad(subreq, req-assoclen + ivsize);
 
scatterwalk_map_and_copy(req-iv, req-src, req-assoclen, ivsize, 0);
-   if (req-src != req-dst)
-   scatterwalk_map_and_copy(req-iv, req-dst,
-req-assoclen, ivsize, 1);
 
return crypto_aead_decrypt(subreq);
 }
@@ -516,9 +513,9 @@ static int seqiv_old_aead_init(struct crypto_tfm *tfm)
return err ?: aead_geniv_init(tfm);
 }
 
-static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
+static int seqiv_aead_init_common(struct crypto_aead *geniv,
+ unsigned int reqsize)
 {
-   struct crypto_aead *geniv = __crypto_aead_cast(tfm);
struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
int err;
 
@@ -541,7 +538,7 @@ static int seqiv_aead_init_common(struct crypto_tfm *tfm, 
unsigned int reqsize)
if (IS_ERR(ctx-null))
goto out;
 
-   err = aead_geniv_init(tfm);
+   err = aead_geniv_init(crypto_aead_tfm(geniv));
if (err)
goto drop_null;
 
@@ -556,19 +553,19 @@ drop_null:
goto out;
 }
 
-static int seqiv_aead_init(struct crypto_tfm *tfm)
+static int seqiv_aead_init(struct crypto_aead *tfm)
 {
return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
 }
 
-static int seqniv_aead_init(struct crypto_tfm *tfm)
+static int seqniv_aead_init(struct crypto_aead *tfm)
 {
return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
 }
 
-static void seqiv_aead_exit(struct crypto_tfm *tfm)
+static void seqiv_aead_exit(struct crypto_aead *tfm)
 {
-   struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+   struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
crypto_free_aead(ctx-geniv.child);
crypto_put_default_null_skcipher();
@@ -666,11 +663,11 @@ static int seqiv_aead_create(struct crypto_template 
*tmpl, struct rtattr **tb)
inst-alg.encrypt = seqiv_aead_encrypt;
inst-alg.decrypt = seqiv_aead_decrypt;
 
-   inst-alg.base.cra_init = seqiv_aead_init;
-   inst-alg.base.cra_exit = seqiv_aead_exit;
+   inst-alg.init = seqiv_aead_init;
+   inst-alg.exit = seqiv_aead_exit;
 
inst-alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-   inst-alg.base.cra_ctxsize += inst-alg.base.cra_aead.ivsize;
+   inst-alg.base.cra_ctxsize += inst-alg.ivsize;
 
 done:
err = aead_register_instance(tmpl, inst);
@@ -727,8 +724,15 @@ static int seqniv_create(struct crypto_template *tmpl, 
struct rtattr **tb)
inst-alg.encrypt = seqniv_aead_encrypt;
inst-alg.decrypt = seqniv_aead_decrypt;
 
-   inst-alg.base.cra_init = seqniv_aead_init;
-   inst-alg.base.cra_exit = seqiv_aead_exit;
+   inst-alg.init = seqniv_aead_init;
+   inst-alg.exit = seqiv_aead_exit;
+
+   if ((alg-base.cra_flags  CRYPTO_ALG_AEAD_NEW)) {
+   inst-alg.encrypt = seqiv_aead_encrypt;
+   inst-alg.decrypt = seqiv_aead_decrypt;
+
+   inst-alg.init = seqiv_aead_init;
+   }
 
inst-alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst-alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/14] crypto: cryptd - Propagate new AEAD implementation flag

2015-07-08 Thread Herbert Xu
This patch allows the CRYPTO_ALG_AEAD_NEW flag to be propagated.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/cryptd.c |   11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 2f833dc..360ee85 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -176,10 +176,9 @@ static inline void cryptd_check_internal(struct rtattr 
**tb, u32 *type,
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return;
-   if ((algt-type  CRYPTO_ALG_INTERNAL))
-   *type |= CRYPTO_ALG_INTERNAL;
-   if ((algt-mask  CRYPTO_ALG_INTERNAL))
-   *mask |= CRYPTO_ALG_INTERNAL;
+
+   *type |= algt-type  (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_AEAD_NEW);
+   *mask |= algt-mask  (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_AEAD_NEW);
 }
 
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -806,7 +805,9 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
goto out_drop_aead;
 
inst-alg.base.cra_flags = CRYPTO_ALG_ASYNC |
-  (alg-base.cra_flags  CRYPTO_ALG_INTERNAL);
+  (alg-base.cra_flags 
+   (CRYPTO_ALG_INTERNAL |
+CRYPTO_ALG_AEAD_NEW));
inst-alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 
inst-alg.ivsize = crypto_aead_alg_ivsize(alg);
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 8/14] crypto: testmgr - Disable rfc4106 test and convert test vectors

2015-07-08 Thread Herbert Xu
This patch disables the rfc4106 test while the conversion to the
new seqiv calling convention takes place.  It also converts the
rfc4106 test vectors to the new format.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/testmgr.c |2 
 crypto/testmgr.h |  602 +--
 2 files changed, 319 insertions(+), 285 deletions(-)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d0a42bd..c4fe6a8 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3492,7 +3492,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
-   .alg = rfc4106(gcm(aes)),
+   .alg = rfc4106(gcm(aes))-disabled,
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b052555..c0c02436 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -20135,149 +20135,150 @@ static struct aead_testvec 
aes_gcm_dec_tv_template[] = {
 };
 
 static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
-{ /* Generated using Crypto++ */
+   { /* Generated using Crypto++ */
.key= zeroed_string,
.klen   = 20,
-.iv = zeroed_string,
-.input  = zeroed_string,
-.ilen   = 16,
-.assoc  = zeroed_string,
-.alen   = 8,
+   .iv = zeroed_string,
+   .input  = zeroed_string,
+   .ilen   = 16,
+   .assoc  = zeroed_string,
+   .alen   = 16,
.result = \x03\x88\xDA\xCE\x60\xB6\xA3\x92
-  \xF3\x28\xC2\xB9\x71\xB2\xFE\x78
-  \x97\xFE\x4C\x23\x37\x42\x01\xE0
-  \x81\x9F\x8D\xC5\xD7\x41\xA0\x1B,
+ \xF3\x28\xC2\xB9\x71\xB2\xFE\x78
+ \x97\xFE\x4C\x23\x37\x42\x01\xE0
+ \x81\x9F\x8D\xC5\xD7\x41\xA0\x1B,
.rlen   = 32,
-},{
+   },{
.key= \xfe\xff\xe9\x92\x86\x65\x73\x1c
  \x6d\x6a\x8f\x94\x67\x30\x83\x08
-  \x00\x00\x00\x00,
+ \x00\x00\x00\x00,
.klen   = 20,
-.iv = \x00\x00\x00\x00\x00\x00\x00\x01
-  \x00\x00\x00\x00,
-.input  = zeroed_string,
-.ilen   = 16,
-.assoc  = zeroed_string,
-.alen   = 8,
+   .iv = \x00\x00\x00\x00\x00\x00\x00\x01,
+   .input  = zeroed_string,
+   .ilen   = 16,
+   .assoc  = \x00\x00\x00\x00\x00\x00\x00\x00
+ \x00\x00\x00\x00\x00\x00\x00\x01,
+   .alen   = 16,
.result = \xC0\x0D\x8B\x42\x0F\x8F\x34\x18
-  \x88\xB1\xC5\xBC\xC5\xB6\xD6\x28
-  \x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D
-  \x2F\x70\x44\x92\xF7\xF2\xE3\xEF,
+ \x88\xB1\xC5\xBC\xC5\xB6\xD6\x28
+ \x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D
+ \x2F\x70\x44\x92\xF7\xF2\xE3\xEF,
.rlen   = 32,
 
-}, {
+   }, {
.key= \xfe\xff\xe9\x92\x86\x65\x73\x1c
  \x6d\x6a\x8f\x94\x67\x30\x83\x08
-  \x00\x00\x00\x00,
+ \x00\x00\x00\x00,
.klen   = 20,
-.iv = zeroed_string,
-.input  = \x01\x01\x01\x01\x01\x01\x01\x01
-  \x01\x01\x01\x01\x01\x01\x01\x01,
-.ilen   = 16,
-.assoc  = zeroed_string,
-.alen   = 8,
+   .iv = zeroed_string,
+   .input  = \x01\x01\x01\x01\x01\x01\x01\x01
+ \x01\x01\x01\x01\x01\x01\x01\x01,
+   .ilen   = 16,
+   .assoc  = zeroed_string,
+   .alen   = 16,
.result = \x4B\xB1\xB5\xE3\x25\x71\x70\xDE
-  \x7F\xC9\x9C\xA5\x14\x19\xF2\xAC
-  \x0B\x8F\x88\x69\x17\xE6\xB4\x3C
-  \xB1\x68\xFD\x14\x52\x64\x61\xB2,
+ \x7F\xC9\x9C\xA5\x14\x19\xF2\xAC
+ \x0B\x8F\x88\x69\x17\xE6\xB4\x3C
+ \xB1\x68\xFD\x14\x52\x64\x61\xB2,
.rlen   = 32,
-}, {
+   }, {
.key= \xfe\xff\xe9\x92\x86\x65\x73\x1c
  \x6d\x6a\x8f\x94\x67\x30\x83\x08
-  \x00\x00\x00\x00,
+ \x00\x00\x00\x00,
.klen   = 20,
-.iv = zeroed_string,
-.input  = \x01\x01\x01\x01\x01\x01\x01\x01
-  

[PATCH 7/14] crypto: aead - Propagate new AEAD implementation flag for IV generators

2015-07-08 Thread Herbert Xu
This patch allows the CRYPTO_ALG_AEAD_NEW flag to be propagated.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/aead.c |5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/crypto/aead.c b/crypto/aead.c
index 8cd45a7..1a5b118 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -604,7 +604,7 @@ struct aead_instance *aead_geniv_alloc(struct 
crypto_template *tmpl,
return ERR_CAST(algt);
 
if ((algt-type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) 
-   algt-mask)
+   algt-mask  ~CRYPTO_ALG_AEAD_NEW)
return ERR_PTR(-EINVAL);
 
name = crypto_attr_alg_name(tb[1]);
@@ -683,7 +683,8 @@ struct aead_instance *aead_geniv_alloc(struct 
crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_drop_alg;
 
-   inst-alg.base.cra_flags = alg-base.cra_flags  CRYPTO_ALG_ASYNC;
+   inst-alg.base.cra_flags = alg-base.cra_flags 
+  (CRYPTO_ALG_ASYNC | CRYPTO_ALG_AEAD_NEW);
inst-alg.base.cra_priority = alg-base.cra_priority;
inst-alg.base.cra_blocksize = alg-base.cra_blocksize;
inst-alg.base.cra_alignmask = alg-base.cra_alignmask;
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/14] crypto: aead - Add type-safe function for freeing instances

2015-07-08 Thread Herbert Xu
This patch adds a type-safe function for freeing AEAD instances
to struct aead_instance.  This replaces the existing free function
in struct crypto_template which does not know the type of the
instance that it's freeing.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/aead.c  |   13 +
 include/crypto/internal/aead.h |1 +
 2 files changed, 14 insertions(+)

diff --git a/crypto/aead.c b/crypto/aead.c
index 07bf997..8cd45a7 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -307,9 +307,22 @@ static void crypto_aead_show(struct seq_file *m, struct 
crypto_alg *alg)
seq_printf(m, geniv: none\n);
 }
 
+static void crypto_aead_free_instance(struct crypto_instance *inst)
+{
+   struct aead_instance *aead = aead_instance(inst);
+
+   if (!aead-free) {
+   inst-tmpl-free(inst);
+   return;
+   }
+
+   aead-free(aead);
+}
+
 static const struct crypto_type crypto_new_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
+   .free = crypto_aead_free_instance,
 #ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
 #endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index c3942f4..a292e96 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -21,6 +21,7 @@
 struct rtattr;
 
 struct aead_instance {
+   void (*free)(struct aead_instance *inst);
union {
struct {
char head[offsetof(struct aead_alg, base)];
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/14] crypto: gcm - Use new IV convention

2015-07-08 Thread Herbert Xu
This patch converts rfc4106 to the new calling convention where
the IV is now part of the AD and needs to be skipped.  This patch
also makes use of the new type-safe way of freeing instances.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/gcm.c |  114 +++
 1 file changed, 77 insertions(+), 37 deletions(-)

diff --git a/crypto/gcm.c b/crypto/gcm.c
index 7d32d47..0c9e33b 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -38,6 +38,12 @@ struct crypto_rfc4106_ctx {
u8 nonce[4];
 };
 
+struct crypto_rfc4106_req_ctx {
+   struct scatterlist src[3];
+   struct scatterlist dst[3];
+   struct aead_request subreq;
+};
+
 struct crypto_rfc4543_instance_ctx {
struct crypto_aead_spawn aead;
 };
@@ -601,6 +607,15 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
crypto_free_ablkcipher(ctx-ctr);
 }
 
+static void crypto_gcm_free(struct aead_instance *inst)
+{
+   struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+   crypto_drop_skcipher(ctx-ctr);
+   crypto_drop_ahash(ctx-ghash);
+   kfree(inst);
+}
+
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *full_name,
@@ -619,7 +634,8 @@ static int crypto_gcm_create_common(struct crypto_template 
*tmpl,
if (IS_ERR(algt))
return PTR_ERR(algt);
 
-   if ((algt-type ^ CRYPTO_ALG_TYPE_AEAD)  algt-mask)
+   if ((algt-type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_AEAD_NEW)) 
+   algt-mask)
return -EINVAL;
 
ghash_alg = crypto_find_alg(ghash_name, crypto_ahash_type,
@@ -674,6 +690,7 @@ static int crypto_gcm_create_common(struct crypto_template 
*tmpl,
 
inst-alg.base.cra_flags = (ghash-base.cra_flags | ctr-cra_flags) 
   CRYPTO_ALG_ASYNC;
+   inst-alg.base.cra_flags |= CRYPTO_ALG_AEAD_NEW;
inst-alg.base.cra_priority = (ghash-base.cra_priority +
   ctr-cra_priority) / 2;
inst-alg.base.cra_blocksize = 1;
@@ -689,6 +706,8 @@ static int crypto_gcm_create_common(struct crypto_template 
*tmpl,
inst-alg.encrypt = crypto_gcm_encrypt;
inst-alg.decrypt = crypto_gcm_decrypt;
 
+   inst-free = crypto_gcm_free;
+
err = aead_register_instance(tmpl, inst);
if (err)
goto out_put_ctr;
@@ -728,19 +747,9 @@ static int crypto_gcm_create(struct crypto_template *tmpl, 
struct rtattr **tb)
ctr_name, ghash);
 }
 
-static void crypto_gcm_free(struct crypto_instance *inst)
-{
-   struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-   crypto_drop_skcipher(ctx-ctr);
-   crypto_drop_ahash(ctx-ghash);
-   kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_gcm_tmpl = {
.name = gcm,
.create = crypto_gcm_create,
-   .free = crypto_gcm_free,
.module = THIS_MODULE,
 };
 
@@ -770,7 +779,6 @@ static int crypto_gcm_base_create(struct crypto_template 
*tmpl,
 static struct crypto_template crypto_gcm_base_tmpl = {
.name = gcm_base,
.create = crypto_gcm_base_create,
-   .free = crypto_gcm_free,
.module = THIS_MODULE,
 };
 
@@ -816,27 +824,50 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead 
*parent,
 
 static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 {
-   struct aead_request *subreq = aead_request_ctx(req);
+   struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
+   struct aead_request *subreq = rctx-subreq;
struct crypto_aead *child = ctx-child;
+   struct scatterlist *sg;
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
   crypto_aead_alignmask(child) + 1);
 
+   scatterwalk_map_and_copy(iv + 12, req-src, 0, req-assoclen - 8, 0);
+
memcpy(iv, ctx-nonce, 4);
memcpy(iv + 4, req-iv, 8);
 
+   sg_init_table(rctx-src, 3);
+   sg_set_buf(rctx-src, iv + 12, req-assoclen - 8);
+   sg = scatterwalk_ffwd(rctx-src + 1, req-src, req-assoclen);
+   if (sg != rctx-src + 1)
+   sg_chain(rctx-src, 2, sg);
+
+   if (req-src != req-dst) {
+   sg_init_table(rctx-dst, 3);
+   sg_set_buf(rctx-dst, iv + 12, req-assoclen - 8);
+   sg = scatterwalk_ffwd(rctx-dst + 1, req-dst, req-assoclen);
+   if (sg != rctx-dst + 1)
+   sg_chain(rctx-dst, 2, sg);
+   }
+
aead_request_set_tfm(subreq, child);
aead_request_set_callback(subreq, req-base.flags, req-base.complete,
  req-base.data);
-   aead_request_set_crypt(subreq, req-src, 

[PATCH 3/14] crypto: pcrypt - Propagate new AEAD implementation flag

2015-07-08 Thread Herbert Xu
This patch allows the CRYPTO_ALG_AEAD_NEW flag to be propagated.

It also restores the ASYNC bit that went missing during the AEAD
conversion.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/pcrypt.c |   12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 45e7d51..001a3a3 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -274,11 +274,16 @@ static int pcrypt_create_aead(struct crypto_template 
*tmpl, struct rtattr **tb,
  u32 type, u32 mask)
 {
struct pcrypt_instance_ctx *ctx;
+   struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
const char *name;
int err;
 
+   algt = crypto_get_attr_type(tb);
+   if (IS_ERR(algt))
+   return PTR_ERR(algt);
+
name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -290,7 +295,9 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, 
struct rtattr **tb,
ctx = aead_instance_ctx(inst);
crypto_set_aead_spawn(ctx-spawn, aead_crypto_instance(inst));
 
-   err = crypto_grab_aead(ctx-spawn, name, 0, 0);
+   err = crypto_grab_aead(ctx-spawn, name,
+  algt-type  CRYPTO_ALG_AEAD_NEW,
+  algt-mask  CRYPTO_ALG_AEAD_NEW);
if (err)
goto out_free_inst;
 
@@ -299,6 +306,9 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, 
struct rtattr **tb,
if (err)
goto out_drop_aead;
 
+   inst-alg.base.cra_flags = CRYPTO_ALG_ASYNC;
+   inst-alg.base.cra_flags |= alg-base.cra_flags  CRYPTO_ALG_AEAD_NEW;
+
inst-alg.ivsize = crypto_aead_alg_ivsize(alg);
inst-alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 10/14] crypto: aesni - Use new IV convention

2015-07-08 Thread Herbert Xu
This patch converts rfc4106 to the new calling convention where
the IV is now in the AD and needs to be skipped.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 arch/x86/crypto/aesni-intel_glue.c |   56 +
 1 file changed, 20 insertions(+), 36 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c 
b/arch/x86/crypto/aesni-intel_glue.c
index dccad38..2347ef0 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead)
return PTR_ERR(cryptd_tfm);
 
*ctx = cryptd_tfm;
-   crypto_aead_set_reqsize(
-   aead,
-   sizeof(struct aead_request) +
-   crypto_aead_reqsize(cryptd_tfm-base));
+   crypto_aead_set_reqsize(aead, crypto_aead_reqsize(cryptd_tfm-base));
return 0;
 }
 
@@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length equal */
-   /* to 8 or 12 bytes */
-   if (unlikely(req-assoclen != 8  req-assoclen != 12))
+   /* to 16 or 20 bytes */
+   if (unlikely(req-assoclen != 16  req-assoclen != 20))
return -EINVAL;
 
/* IV below built */
@@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
}
 
kernel_fpu_begin();
-   aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req-cryptlen, iv,
-   ctx-hash_subkey, assoc, (unsigned long)req-assoclen, dst
-   + ((unsigned long)req-cryptlen), auth_tag_len);
+   aesni_gcm_enc_tfm(aes_ctx, dst, src, req-cryptlen, iv,
+ ctx-hash_subkey, assoc, req-assoclen - 8,
+ dst + req-cryptlen, auth_tag_len);
kernel_fpu_end();
 
/* The authTag (aka the Integrity Check Value) needs to be written
@@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request 
*req)
struct scatter_walk dst_sg_walk;
unsigned int i;
 
-   if (unlikely(req-assoclen != 8  req-assoclen != 12))
+   if (unlikely(req-assoclen != 16  req-assoclen != 20))
return -EINVAL;
 
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
-   /* equal to 8 or 12 bytes */
+   /* equal to 16 or 20 bytes */
 
tempCipherLen = (unsigned long)(req-cryptlen - auth_tag_len);
/* IV below built */
@@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request 
*req)
 
kernel_fpu_begin();
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
-   ctx-hash_subkey, assoc, (unsigned long)req-assoclen,
-   authTag, auth_tag_len);
+ ctx-hash_subkey, assoc, req-assoclen - 8,
+ authTag, auth_tag_len);
kernel_fpu_end();
 
/* Compare generated tag with passed in tag. */
@@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
struct cryptd_aead *cryptd_tfm = *ctx;
-   struct aead_request *subreq = aead_request_ctx(req);
 
-   aead_request_set_tfm(subreq, irq_fpu_usable() ?
-cryptd_aead_child(cryptd_tfm) :
-cryptd_tfm-base);
+   aead_request_set_tfm(req, irq_fpu_usable() ?
+ cryptd_aead_child(cryptd_tfm) :
+ cryptd_tfm-base);
 
-   aead_request_set_callback(subreq, req-base.flags,
- req-base.complete, req-base.data);
-   aead_request_set_crypt(subreq, req-src, req-dst,
-  req-cryptlen, req-iv);
-   aead_request_set_ad(subreq, req-assoclen);
-
-   return crypto_aead_encrypt(subreq);
+   return crypto_aead_encrypt(req);
 }
 
 static int rfc4106_decrypt(struct aead_request *req)
@@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
struct cryptd_aead *cryptd_tfm = *ctx;
-   struct aead_request *subreq = aead_request_ctx(req);
-
-   aead_request_set_tfm(subreq, irq_fpu_usable() ?
-cryptd_aead_child(cryptd_tfm) :
-cryptd_tfm-base);
 
-   aead_request_set_callback(subreq, req-base.flags,
- req-base.complete, req-base.data);
-   aead_request_set_crypt(subreq, req-src, req-dst,
-  req-cryptlen, req-iv);
-   aead_request_set_ad(subreq, req-assoclen);
+   aead_request_set_tfm(req, 

[PATCH 5/14] crypto: echainiv - Fix encryption convention

2015-07-08 Thread Herbert Xu
This patch fixes a bug where we were incorrectly including the
IV in the AD during encryption.  The IV must remain in the plain
text for it to be encrypted.

During decryption there is no need to copy the IV to dst because
it's now part of the AD.

This patch removes an unncessary check on authsize which would be
performed by the underlying decrypt call.

Finally this patch makes use of the type-safe init/exit functions.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/echainiv.c |   24 +++-
 1 file changed, 11 insertions(+), 13 deletions(-)

diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index b6e43dc..d3896c7 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -145,8 +145,8 @@ static int echainiv_encrypt(struct aead_request *req)
 
aead_request_set_callback(subreq, req-base.flags, compl, data);
aead_request_set_crypt(subreq, req-dst, req-dst,
-  req-cryptlen - ivsize, info);
-   aead_request_set_ad(subreq, req-assoclen + ivsize);
+  req-cryptlen, info);
+   aead_request_set_ad(subreq, req-assoclen);
 
crypto_xor(info, ctx-salt, ivsize);
scatterwalk_map_and_copy(info, req-dst, req-assoclen, ivsize, 1);
@@ -166,7 +166,7 @@ static int echainiv_decrypt(struct aead_request *req)
void *data;
unsigned int ivsize = crypto_aead_ivsize(geniv);
 
-   if (req-cryptlen  ivsize + crypto_aead_authsize(geniv))
+   if (req-cryptlen  ivsize)
return -EINVAL;
 
aead_request_set_tfm(subreq, ctx-geniv.child);
@@ -180,16 +180,12 @@ static int echainiv_decrypt(struct aead_request *req)
aead_request_set_ad(subreq, req-assoclen + ivsize);
 
scatterwalk_map_and_copy(req-iv, req-src, req-assoclen, ivsize, 0);
-   if (req-src != req-dst)
-   scatterwalk_map_and_copy(req-iv, req-dst,
-req-assoclen, ivsize, 1);
 
return crypto_aead_decrypt(subreq);
 }
 
-static int echainiv_init(struct crypto_tfm *tfm)
+static int echainiv_init(struct crypto_aead *geniv)
 {
-   struct crypto_aead *geniv = __crypto_aead_cast(tfm);
struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
int err;
 
@@ -212,7 +208,7 @@ static int echainiv_init(struct crypto_tfm *tfm)
if (IS_ERR(ctx-null))
goto out;
 
-   err = aead_geniv_init(tfm);
+   err = aead_geniv_init(crypto_aead_tfm(geniv));
if (err)
goto drop_null;
 
@@ -227,9 +223,9 @@ drop_null:
goto out;
 }
 
-static void echainiv_exit(struct crypto_tfm *tfm)
+static void echainiv_exit(struct crypto_aead *tfm)
 {
-   struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
+   struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
 
crypto_free_aead(ctx-geniv.child);
crypto_put_default_null_skcipher();
@@ -262,13 +258,15 @@ static int echainiv_aead_create(struct crypto_template 
*tmpl,
inst-alg.encrypt = echainiv_encrypt;
inst-alg.decrypt = echainiv_decrypt;
 
-   inst-alg.base.cra_init = echainiv_init;
-   inst-alg.base.cra_exit = echainiv_exit;
+   inst-alg.init = echainiv_init;
+   inst-alg.exit = echainiv_exit;
 
inst-alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst-alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
inst-alg.base.cra_ctxsize += inst-alg.ivsize;
 
+   inst-free = aead_geniv_free;
+
 done:
err = aead_register_instance(tmpl, inst);
if (err)
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 12/14] crypto: nx - Use new IV convention

2015-07-08 Thread Herbert Xu
This patch converts rfc4106 to the new calling convention where
the IV is now part of the AD and needs to be skipped.  This patch
also makes use of type-safe AEAD functions where possible.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 drivers/crypto/nx/nx-aes-gcm.c |   66 -
 1 file changed, 40 insertions(+), 26 deletions(-)

diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 92c993f..5719638 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,11 +21,9 @@
 
 #include crypto/internal/aead.h
 #include crypto/aes.h
-#include crypto/algapi.h
 #include crypto/scatterwalk.h
 #include linux/module.h
 #include linux/types.h
-#include linux/crypto.h
 #include asm/vio.h
 
 #include nx_csbcpb.h
@@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
  const u8   *in_key,
  unsigned intkey_len)
 {
-   struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm-base);
+   struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx-csbcpb;
struct nx_csbcpb *csbcpb_aead = nx_ctx-csbcpb_aead;
 
@@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
  const u8   *in_key,
  unsigned intkey_len)
 {
-   struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm-base);
+   struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
char *nonce = nx_ctx-priv.gcm.nonce;
int rc;
 
@@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead 
*tfm,
 
 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
  struct aead_request   *req,
- u8*out)
+ u8*out,
+ unsigned int assoclen)
 {
int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx-csbcpb_aead;
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx-in_sg;
-   unsigned int nbytes = req-assoclen;
+   unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
 
@@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
 
atomic_inc((nx_ctx-stats-aes_ops));
-   atomic64_add(req-assoclen, (nx_ctx-stats-aes_bytes));
+   atomic64_add(assoclen, (nx_ctx-stats-aes_bytes));
 
processed += to_process;
} while (processed  nbytes);
@@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
return rc;
 }
 
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+   unsigned int assoclen)
 {
int rc;
-   struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req-base.tfm);
+   struct nx_crypto_ctx *nx_ctx =
+   crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_csbcpb *csbcpb = nx_ctx-csbcpb;
struct nx_sg *nx_sg;
-   unsigned int nbytes = req-assoclen;
+   unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
 
@@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct 
blkcipher_desc *desc)
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
atomic_inc((nx_ctx-stats-aes_ops));
-   atomic64_add(req-assoclen, (nx_ctx-stats-aes_bytes));
+   atomic64_add(assoclen, (nx_ctx-stats-aes_bytes));
 
processed += to_process;
} while (processed  nbytes);
@@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct 
blkcipher_desc *desc,
 int enc)
 {
int rc;
-   struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req-base.tfm);
+   struct nx_crypto_ctx *nx_ctx =
+   crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_csbcpb *csbcpb = nx_ctx-csbcpb;
char out[AES_BLOCK_SIZE];
struct nx_sg *in_sg, *out_sg;
@@ -314,9 +316,11 @@ out:
return rc;
 }
 
-static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+   unsigned int assoclen)
 {
-   struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req-base.tfm);
+   struct nx_crypto_ctx *nx_ctx =
+   crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx-csbcpb;
struct blkcipher_desc desc;
@@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int 
enc)
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
if (nbytes == 0) {
-   if (req-assoclen == 0)
+ 

[PATCH 1/14] crypto: api - Add instance free function to crypto_type

2015-07-08 Thread Herbert Xu
Currently the task of freeing an instance is given to the crypto
template.  However, it has no type information on the instance so
we have to resort to checking type information at runtime.

This patch introduces a free function to crypto_type that will be
used to free an instance.  This can then be used to free an instance
in a type-safe manner.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/algapi.c |   14 --
 include/crypto/algapi.h |2 ++
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/crypto/algapi.c b/crypto/algapi.c
index ceebfcf..d130b41 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -67,12 +67,22 @@ static int crypto_check_alg(struct crypto_alg *alg)
return crypto_set_driver_name(alg);
 }
 
+static void crypto_free_instance(struct crypto_instance *inst)
+{
+   if (!inst-alg.cra_type-free) {
+   inst-tmpl-free(inst);
+   return;
+   }
+
+   inst-alg.cra_type-free(inst);
+}
+
 static void crypto_destroy_instance(struct crypto_alg *alg)
 {
struct crypto_instance *inst = (void *)alg;
struct crypto_template *tmpl = inst-tmpl;
 
-   tmpl-free(inst);
+   crypto_free_instance(inst);
crypto_tmpl_put(tmpl);
 }
 
@@ -481,7 +491,7 @@ void crypto_unregister_template(struct crypto_template 
*tmpl)
 
hlist_for_each_entry_safe(inst, n, list, list) {
BUG_ON(atomic_read(inst-alg.cra_refcnt) != 1);
-   tmpl-free(inst);
+   crypto_free_instance(inst);
}
crypto_remove_final(users);
 }
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 9041a84..c9fe145 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,6 +18,7 @@
 #include linux/skbuff.h
 
 struct crypto_aead;
+struct crypto_instance;
 struct module;
 struct rtattr;
 struct seq_file;
@@ -30,6 +31,7 @@ struct crypto_type {
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
+   void (*free)(struct crypto_instance *inst);
 
unsigned int type;
unsigned int maskclear;
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 9/14] crypto: tcrypt - Add support for new IV convention

2015-07-08 Thread Herbert Xu
This patch allows the AEAD speed tests to cope with the new seqiv
calling convention as well as the old one.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/tcrypt.c |   11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 3603c7c..73ed4f2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -344,7 +344,12 @@ static void test_aead_speed(const char *algo, int enc, 
unsigned int secs,
goto out_nosg;
sgout = sg[9];
 
-   tfm = crypto_alloc_aead(algo, 0, 0);
+   tfm = crypto_alloc_aead(algo, CRYPTO_ALG_AEAD_NEW,
+   CRYPTO_ALG_AEAD_NEW);
+   if (PTR_ERR(tfm) == -ENOENT) {
+   aad_size -= 8;
+   tfm = crypto_alloc_aead(algo, 0, CRYPTO_ALG_AEAD_NEW);
+   }
 
if (IS_ERR(tfm)) {
pr_err(alg: aead: Failed to load transform for %s: %ld\n, 
algo,
@@ -1778,14 +1783,14 @@ static int do_test(const char *alg, u32 type, u32 mask, 
int m)
 
case 211:
test_aead_speed(rfc4106(gcm(aes)), ENCRYPT, sec,
-   NULL, 0, 16, 8, aead_speed_template_20);
+   NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed(gcm(aes), ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_20);
break;
 
case 212:
test_aead_speed(rfc4309(ccm(aes)), ENCRYPT, sec,
-   NULL, 0, 16, 8, aead_speed_template_19);
+   NULL, 0, 16, 16, aead_speed_template_19);
break;
 
case 300:
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 14/14] crypto: testmgr - Reenable rfc4106 test

2015-07-08 Thread Herbert Xu
Now that all implementations of rfc4106 have been converted we can
reenable the test.

Signed-off-by: Herbert Xu herb...@gondor.apana.org.au
---

 crypto/testmgr.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c4fe6a8..d0a42bd 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3492,7 +3492,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
-   .alg = rfc4106(gcm(aes))-disabled,
+   .alg = rfc4106(gcm(aes)),
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html