Re: [PATCH 10/10] crypto: tcrypt: Added speed tests for Async AEAD crypto alogrithms

2015-07-06 Thread Herbert Xu
On Thu, Jul 02, 2015 at 10:48:40AM +0530, Lokesh Vutla wrote:
 Adding simple speed tests for a range of block sizes for Async AEAD crypto
 algorithms.
 
 Signed-off-by: Lokesh Vutla lokeshvu...@ti.com

What's wrong with the existing AEAD speed tests?
-- 
Email: Herbert Xu herb...@gondor.apana.org.au
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 10/10] crypto: tcrypt: Added speed tests for Async AEAD crypto alogrithms

2015-07-06 Thread Herbert Xu
On Mon, Jul 06, 2015 at 02:15:06PM +0530, Lokesh Vutla wrote:

 The existing AEAD test case does not do a wait_for_completion(), when
 the return value is EINPROGRESS or EBUSY like it is done for acipher_speed 
 tests.

Please fix them to do the wait.

Thanks,
-- 
Email: Herbert Xu herb...@gondor.apana.org.au
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 10/10] crypto: tcrypt: Added speed tests for Async AEAD crypto alogrithms

2015-07-01 Thread Lokesh Vutla
Adding simple speed tests for a range of block sizes for Async AEAD crypto
algorithms.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 crypto/tcrypt.c |  233 +++
 crypto/tcrypt.h |1 +
 2 files changed, 234 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1a28001..b37f3f4 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -992,6 +992,234 @@ out:
crypto_free_ahash(tfm);
 }
 
+static inline int do_one_aead_op(struct aead_request *req, int ret)
+{
+   if (ret == -EINPROGRESS || ret == -EBUSY) {
+   struct tcrypt_result *tr = req-base.data;
+
+   ret = wait_for_completion_interruptible(tr-completion);
+   if (!ret)
+   ret = tr-err;
+   reinit_completion(tr-completion);
+   }
+
+   return ret;
+}
+
+static int test_aaead_jiffies(struct aead_request *req, int enc,
+ int blen, int sec)
+{
+   unsigned long start, end;
+   int bcount;
+   int ret;
+
+   for (start = jiffies, end = start + sec * HZ, bcount = 0;
+time_before(jiffies, end); bcount++) {
+   if (enc)
+   ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+   else
+   ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+   if (ret)
+   return ret;
+   }
+
+   pr_cont(%d operations in %d seconds (%ld bytes)\n,
+   bcount, sec, (long)bcount * blen);
+   return 0;
+}
+
+static int test_aaead_cycles(struct aead_request *req, int enc, int blen)
+{
+   unsigned long cycles = 0;
+   int ret = 0;
+   int i;
+
+   /* Warm-up run. */
+   for (i = 0; i  4; i++) {
+   if (enc)
+   ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+   else
+   ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+   if (ret)
+   goto out;
+   }
+
+   /* The real thing. */
+   for (i = 0; i  8; i++) {
+   cycles_t start, end;
+
+   start = get_cycles();
+   if (enc)
+   ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+   else
+   ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+   end = get_cycles();
+
+   if (ret)
+   goto out;
+
+   cycles += end - start;
+   }
+
+out:
+   if (ret == 0)
+   pr_cont(1 operation in %lu cycles (%d bytes)\n,
+   (cycles + 4) / 8, blen);
+
+   return ret;
+}
+
+static void test_aaead_speed(const char *algo, int enc, unsigned int sec,
+struct aead_speed_template *template,
+unsigned int tcount, u8 authsize,
+unsigned int aad_size, u8 *keysize)
+{
+   unsigned int i, j;
+   struct crypto_aead *tfm;
+   int ret = -ENOMEM;
+   const char *key;
+   struct aead_request *req;
+   struct scatterlist *sg;
+   struct scatterlist *asg;
+   struct scatterlist *sgout;
+   const char *e;
+   void *assoc;
+   char iv[MAX_IVLEN];
+   char *xbuf[XBUFSIZE];
+   char *xoutbuf[XBUFSIZE];
+   char *axbuf[XBUFSIZE];
+   unsigned int *b_size;
+   unsigned int iv_len;
+   struct tcrypt_result result;
+
+   if (enc == ENCRYPT)
+   e = encryption;
+   else
+   e = decryption;
+
+   if (testmgr_alloc_buf(xbuf))
+   goto out_noxbuf;
+   if (testmgr_alloc_buf(axbuf))
+   goto out_noaxbuf;
+   if (testmgr_alloc_buf(xoutbuf))
+   goto out_nooutbuf;
+
+   sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL);
+   if (!sg)
+   goto out_nosg;
+   asg = sg[8];
+   sgout = asg[8];
+
+   init_completion(result.completion);
+   pr_info(\ntesting speed of %s %s\n, algo, e);
+
+   tfm = crypto_alloc_aead(algo, 0, 0);
+
+   if (IS_ERR(tfm)) {
+   pr_err(alg: aead: Failed to load transform for %s: %ld\n,
+  algo, PTR_ERR(tfm));
+   return;
+   }
+
+   req = aead_request_alloc(tfm, GFP_KERNEL);
+   if (!req) {
+   pr_err(alg: aead: Failed to allocate request for %s\n,
+  algo);
+   goto out;
+   }
+
+   aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, result);
+
+   i = 0;
+   do {
+   b_size = aead_sizes;
+   do {
+   assoc = axbuf[0];
+
+   if (aad_size  PAGE_SIZE) {
+   memset(assoc, 0xff, aad_size);
+   } else {
+   pr_err(associate data length