[PATCH 05/10] crypto: omap-aes: Add support for GCM mode

2015-07-01 Thread Lokesh Vutla
OMAP AES hw supports aes gcm mode.
Adding support for GCM mode in omap-aes driver.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/Makefile   |3 +-
 drivers/crypto/omap-aes-gcm.c |  304 +
 drivers/crypto/omap-aes.c |  238 +---
 drivers/crypto/omap-aes.h |  205 +++
 4 files changed, 575 insertions(+), 175 deletions(-)
 create mode 100644 drivers/crypto/omap-aes-gcm.c
 create mode 100644 drivers/crypto/omap-aes.h

diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fb84be7..3afad7b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
 n2_crypto-y := n2_core.o n2_asm.o
 obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
-obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
+omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
 obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
 obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
new file mode 100644
index 000..1be9d91
--- /dev/null
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -0,0 +1,304 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP AES GCM HW acceleration.
+ *
+ * Copyright (c) 2015 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include linux/err.h
+#include linux/module.h
+#include linux/init.h
+#include linux/errno.h
+#include linux/kernel.h
+#include linux/platform_device.h
+#include linux/scatterlist.h
+#include linux/dma-mapping.h
+#include linux/dmaengine.h
+#include linux/omap-dma.h
+#include linux/pm_runtime.h
+#include linux/of.h
+#include linux/of_device.h
+#include linux/of_address.h
+#include linux/io.h
+#include linux/crypto.h
+#include linux/interrupt.h
+#include crypto/scatterwalk.h
+#include crypto/aes.h
+#include omap-aes.h
+
+static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
+struct aead_request *req);
+
+static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
+{
+   struct aead_request *req = dd-aead_req;
+
+   dd-flags = ~FLAGS_BUSY;
+   dd-in_sg = NULL;
+   dd-out_sg = NULL;
+
+   req-base.complete(req-base, ret);
+}
+
+static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
+{
+   u8 *tag;
+   int alen, clen, i, ret = 0, nsg;
+
+   alen = ALIGN(dd-assoc_len, AES_BLOCK_SIZE);
+   clen = ALIGN(dd-total, AES_BLOCK_SIZE);
+
+   nsg = 1 + !!(dd-assoc_len  dd-total);
+
+   if (!dd-pio_only) {
+   dma_sync_sg_for_device(dd-dev, dd-out_sg, dd-out_sg_len,
+  DMA_FROM_DEVICE);
+   dma_unmap_sg(dd-dev, dd-in_sg, dd-in_sg_len, DMA_TO_DEVICE);
+   dma_unmap_sg(dd-dev, dd-out_sg, dd-out_sg_len,
+DMA_FROM_DEVICE);
+   omap_aes_crypt_dma_stop(dd);
+   }
+
+   if (dd-flags  FLAGS_ENCRYPT)
+   scatterwalk_map_and_copy(dd-ctx-auth_tag, dd-aead_req-dst,
+dd-total, dd-authsize, 1);
+
+   if (!(dd-flags  FLAGS_ENCRYPT)) {
+   tag = (u8 *)dd-ctx-auth_tag;
+   for (i = 0; i  dd-authsize; i++) {
+   if (tag[i]) {
+   dev_err(dd-dev, GCM decryption: Tag Message 
is wrong\n);
+   ret = -EBADMSG;
+   }
+   }
+   }
+
+   omap_aes_gcm_finish_req(dd, ret);
+   omap_aes_gcm_handle_queue(dd, NULL);
+}
+
+static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
+struct aead_request *req)
+{
+   void *buf_in;
+   int alen, clen;
+   struct crypto_aead *aead = crypto_aead_reqtfm(req);
+   unsigned int authlen = crypto_aead_authsize(aead);
+   u32 dec = !(dd-flags  FLAGS_ENCRYPT);
+
+   alen = req-assoclen;
+   clen = req-cryptlen - (dec * authlen);
+
+   dd-sgs_copied = 0;
+
+   sg_init_table(dd-in_sgl, 2);
+   buf_in = sg_virt(req-assoc);
+   sg_set_buf(dd-in_sgl, buf_in, alen);
+
+   buf_in = sg_virt(req-src);
+   sg_set_buf(dd-in_sgl[1], buf_in, clen);
+
+   dd-in_sg = dd-in_sgl;
+   dd-total = clen;
+   dd-assoc_len = req-assoclen;
+   dd-authsize = authlen;
+   dd-out_sg = req-dst;
+
+   dd-in_sg_len = scatterwalk_bytes_sglen(dd-in_sg, alen + clen);
+   dd-out_sg_len = scatterwalk_bytes_sglen(dd-out_sg, clen);
+
+   return 0;
+}
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ 

[PATCH 10/10] crypto: tcrypt: Added speed tests for Async AEAD crypto alogrithms

2015-07-01 Thread Lokesh Vutla
Adding simple speed tests for a range of block sizes for Async AEAD crypto
algorithms.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 crypto/tcrypt.c |  233 +++
 crypto/tcrypt.h |1 +
 2 files changed, 234 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1a28001..b37f3f4 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -992,6 +992,234 @@ out:
crypto_free_ahash(tfm);
 }
 
+static inline int do_one_aead_op(struct aead_request *req, int ret)
+{
+   if (ret == -EINPROGRESS || ret == -EBUSY) {
+   struct tcrypt_result *tr = req-base.data;
+
+   ret = wait_for_completion_interruptible(tr-completion);
+   if (!ret)
+   ret = tr-err;
+   reinit_completion(tr-completion);
+   }
+
+   return ret;
+}
+
+static int test_aaead_jiffies(struct aead_request *req, int enc,
+ int blen, int sec)
+{
+   unsigned long start, end;
+   int bcount;
+   int ret;
+
+   for (start = jiffies, end = start + sec * HZ, bcount = 0;
+time_before(jiffies, end); bcount++) {
+   if (enc)
+   ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+   else
+   ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+   if (ret)
+   return ret;
+   }
+
+   pr_cont(%d operations in %d seconds (%ld bytes)\n,
+   bcount, sec, (long)bcount * blen);
+   return 0;
+}
+
+static int test_aaead_cycles(struct aead_request *req, int enc, int blen)
+{
+   unsigned long cycles = 0;
+   int ret = 0;
+   int i;
+
+   /* Warm-up run. */
+   for (i = 0; i  4; i++) {
+   if (enc)
+   ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+   else
+   ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+   if (ret)
+   goto out;
+   }
+
+   /* The real thing. */
+   for (i = 0; i  8; i++) {
+   cycles_t start, end;
+
+   start = get_cycles();
+   if (enc)
+   ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+   else
+   ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+   end = get_cycles();
+
+   if (ret)
+   goto out;
+
+   cycles += end - start;
+   }
+
+out:
+   if (ret == 0)
+   pr_cont(1 operation in %lu cycles (%d bytes)\n,
+   (cycles + 4) / 8, blen);
+
+   return ret;
+}
+
+static void test_aaead_speed(const char *algo, int enc, unsigned int sec,
+struct aead_speed_template *template,
+unsigned int tcount, u8 authsize,
+unsigned int aad_size, u8 *keysize)
+{
+   unsigned int i, j;
+   struct crypto_aead *tfm;
+   int ret = -ENOMEM;
+   const char *key;
+   struct aead_request *req;
+   struct scatterlist *sg;
+   struct scatterlist *asg;
+   struct scatterlist *sgout;
+   const char *e;
+   void *assoc;
+   char iv[MAX_IVLEN];
+   char *xbuf[XBUFSIZE];
+   char *xoutbuf[XBUFSIZE];
+   char *axbuf[XBUFSIZE];
+   unsigned int *b_size;
+   unsigned int iv_len;
+   struct tcrypt_result result;
+
+   if (enc == ENCRYPT)
+   e = encryption;
+   else
+   e = decryption;
+
+   if (testmgr_alloc_buf(xbuf))
+   goto out_noxbuf;
+   if (testmgr_alloc_buf(axbuf))
+   goto out_noaxbuf;
+   if (testmgr_alloc_buf(xoutbuf))
+   goto out_nooutbuf;
+
+   sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL);
+   if (!sg)
+   goto out_nosg;
+   asg = sg[8];
+   sgout = asg[8];
+
+   init_completion(result.completion);
+   pr_info(\ntesting speed of %s %s\n, algo, e);
+
+   tfm = crypto_alloc_aead(algo, 0, 0);
+
+   if (IS_ERR(tfm)) {
+   pr_err(alg: aead: Failed to load transform for %s: %ld\n,
+  algo, PTR_ERR(tfm));
+   return;
+   }
+
+   req = aead_request_alloc(tfm, GFP_KERNEL);
+   if (!req) {
+   pr_err(alg: aead: Failed to allocate request for %s\n,
+  algo);
+   goto out;
+   }
+
+   aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, result);
+
+   i = 0;
+   do {
+   b_size = aead_sizes;
+   do {
+   assoc = axbuf[0];
+
+   if (aad_size  PAGE_SIZE) {
+   memset(assoc, 0xff, aad_size);
+   } else {
+   pr_err(associate data length 

[PATCH 03/10] crypto: aead: Add aead_request_cast() api

2015-07-01 Thread Lokesh Vutla
Add aead_request_cast() api to get pointer to aead_request
from cryto_async_request.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 include/linux/crypto.h |6 ++
 1 file changed, 6 insertions(+)

diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 10df5d2..20fac3d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -1460,6 +1460,12 @@ static inline void aead_request_set_tfm(struct 
aead_request *req,
req-base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)-base);
 }
 
+static inline struct aead_request *aead_request_cast(
+   struct crypto_async_request *req)
+{
+   return container_of(req, struct aead_request, base);
+}
+
 /**
  * aead_request_alloc() - allocate request data structure
  * @tfm: cipher handle to be registered with the request
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 08/10] crypto: omap-aes: gmc: Add algo info

2015-07-01 Thread Lokesh Vutla
Now the driver supports gcm mode, add omap-aes-gcm
algo info to omap-aes driver.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes.c |   22 ++
 1 file changed, 22 insertions(+)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e5e9a19..11f3850 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -789,6 +789,28 @@ static struct crypto_alg algs_ctr[] = {
.decrypt= omap_aes_ctr_decrypt,
}
 },
+{
+   .cra_name   = gcm(aes),
+   .cra_driver_name= gcm-aes-omap,
+   .cra_priority   = 100,
+   .cra_flags  = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+   .cra_blocksize  = AES_BLOCK_SIZE,
+   .cra_ctxsize= sizeof(struct omap_aes_ctx),
+   .cra_alignmask  = 0xf,
+   .cra_type   = crypto_aead_type,
+   .cra_module = THIS_MODULE,
+   .cra_init   = omap_aes_gcm_cra_init,
+   .cra_exit   = omap_aes_cra_exit,
+   .cra_u.aead = {
+   .maxauthsize= AES_BLOCK_SIZE,
+   .geniv  = eseqiv,
+   .ivsize = AES_BLOCK_SIZE,
+   .setkey = omap_aes_gcm_setkey,
+   .encrypt= omap_aes_gcm_encrypt,
+   .decrypt= omap_aes_gcm_decrypt,
+   }
+},
 };
 
 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 01/10] crypto: omap-aes: Add support for lengths not aligned with AES_BLOCK_SIZE

2015-07-01 Thread Lokesh Vutla
OMAP AES driver returns an error if the data is not aligned with
AES_BLOCK_SIZE bytes.
But OMAP AES hw allows data input upto 1 byte aligned, but still
zeros are to be appended and complete AES_BLOCK_SIZE has to be written.
And correct length has to be passed in LENGTH field.
Adding support for inputs not aligned with AES_BLOCK_SIZE.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes.c |   33 -
 1 file changed, 16 insertions(+), 17 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9a28b7e..a923101 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -558,6 +558,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, 
int total)
 {
int len = 0;
 
+   if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+   return -1;
+
while (sg) {
if (!IS_ALIGNED(sg-offset, 4))
return -1;
@@ -577,9 +580,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, 
int total)
 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 {
void *buf_in, *buf_out;
-   int pages;
+   int pages, total;
 
-   pages = get_order(dd-total);
+   total = ALIGN(dd-total, AES_BLOCK_SIZE);
+   pages = get_order(total);
 
buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +598,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_copy_buf(buf_in, dd-in_sg, 0, dd-total, 0);
 
sg_init_table(dd-in_sgl, 1);
-   sg_set_buf(dd-in_sgl, buf_in, dd-total);
+   sg_set_buf(dd-in_sgl, buf_in, total);
dd-in_sg = dd-in_sgl;
 
sg_init_table(dd-out_sgl, 1);
-   sg_set_buf(dd-out_sgl, buf_out, dd-total);
+   sg_set_buf(dd-out_sgl, buf_out, total);
dd-out_sg = dd-out_sgl;
 
return 0;
@@ -611,7 +615,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct omap_aes_ctx *ctx;
struct omap_aes_reqctx *rctx;
unsigned long flags;
-   int err, ret = 0;
+   int err, ret = 0, len;
 
spin_lock_irqsave(dd-lock, flags);
if (req)
@@ -650,8 +654,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd-sgs_copied = 0;
}
 
-   dd-in_sg_len = scatterwalk_bytes_sglen(dd-in_sg, dd-total);
-   dd-out_sg_len = scatterwalk_bytes_sglen(dd-out_sg, dd-total);
+   len = ALIGN(dd-total, AES_BLOCK_SIZE);
+   dd-in_sg_len = scatterwalk_bytes_sglen(dd-in_sg, len);
+   dd-out_sg_len = scatterwalk_bytes_sglen(dd-out_sg, len);
BUG_ON(dd-in_sg_len  0 || dd-out_sg_len  0);
 
rctx = ablkcipher_request_ctx(req);
@@ -678,7 +683,7 @@ static void omap_aes_done_task(unsigned long data)
 {
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
void *buf_in, *buf_out;
-   int pages;
+   int pages, len;
 
pr_debug(enter done_task\n);
 
@@ -697,7 +702,8 @@ static void omap_aes_done_task(unsigned long data)
 
sg_copy_buf(buf_out, dd-orig_out, 0, dd-total_save, 1);
 
-   pages = get_order(dd-total_save);
+   len = ALIGN(dd-total_save, AES_BLOCK_SIZE);
+   pages = get_order(len);
free_pages((unsigned long)buf_in, pages);
free_pages((unsigned long)buf_out, pages);
}
@@ -726,11 +732,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, 
unsigned long mode)
  !!(mode  FLAGS_ENCRYPT),
  !!(mode  FLAGS_CBC));
 
-   if (!IS_ALIGNED(req-nbytes, AES_BLOCK_SIZE)) {
-   pr_err(request size is not exact amount of AES blocks\n);
-   return -EINVAL;
-   }
-
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
@@ -1046,9 +1047,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
}
}
 
-   dd-total -= AES_BLOCK_SIZE;
-
-   BUG_ON(dd-total  0);
+   dd-total -= min_t(size_t, AES_BLOCK_SIZE, dd-total);
 
/* Clear IRQ status */
status = ~AES_REG_IRQ_DATA_OUT;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04/10] crypto: omap-aes: Use BIT() macro

2015-07-01 Thread Lokesh Vutla
Use BIT()/GENMASK() macros for all register definitions instead of
hand-writing bit masks.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes.c |   36 ++--
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 96fc7f7..d974ab6 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -52,30 +52,30 @@
 #define AES_REG_IV(dd, x)  ((dd)-pdata-iv_ofs + ((x) * 0x04))
 
 #define AES_REG_CTRL(dd)   ((dd)-pdata-ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK(3  7)
-#define AES_REG_CTRL_CTR_WIDTH_32  (0  7)
-#define AES_REG_CTRL_CTR_WIDTH_64  (1  7)
-#define AES_REG_CTRL_CTR_WIDTH_96  (2  7)
-#define AES_REG_CTRL_CTR_WIDTH_128 (3  7)
-#define AES_REG_CTRL_CTR   (1  6)
-#define AES_REG_CTRL_CBC   (1  5)
-#define AES_REG_CTRL_KEY_SIZE  (3  3)
-#define AES_REG_CTRL_DIRECTION (1  2)
-#define AES_REG_CTRL_INPUT_READY   (1  1)
-#define AES_REG_CTRL_OUTPUT_READY  (1  0)
-#define AES_REG_CTRL_MASK  FLD_MASK(24, 2)
+#define AES_REG_CTRL_CTR_WIDTH_MASKGENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32  0
+#define AES_REG_CTRL_CTR_WIDTH_64  BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96  BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
+#define AES_REG_CTRL_CTR   BIT(6)
+#define AES_REG_CTRL_CBC   BIT(5)
+#define AES_REG_CTRL_KEY_SIZE  GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION BIT(2)
+#define AES_REG_CTRL_INPUT_READY   BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY  BIT(0)
+#define AES_REG_CTRL_MASK  GENMASK(24, 2)
 
 #define AES_REG_DATA_N(dd, x)  ((dd)-pdata-data_ofs + ((x) * 0x04))
 
 #define AES_REG_REV(dd)((dd)-pdata-rev_ofs)
 
 #define AES_REG_MASK(dd)   ((dd)-pdata-mask_ofs)
-#define AES_REG_MASK_SIDLE (1  6)
-#define AES_REG_MASK_START (1  5)
-#define AES_REG_MASK_DMA_OUT_EN(1  3)
-#define AES_REG_MASK_DMA_IN_EN (1  2)
-#define AES_REG_MASK_SOFTRESET (1  1)
-#define AES_REG_AUTOIDLE   (1  0)
+#define AES_REG_MASK_SIDLE BIT(6)
+#define AES_REG_MASK_START BIT(5)
+#define AES_REG_MASK_DMA_OUT_ENBIT(3)
+#define AES_REG_MASK_DMA_IN_EN BIT(2)
+#define AES_REG_MASK_SOFTRESET BIT(1)
+#define AES_REG_AUTOIDLE   BIT(0)
 
 #define AES_REG_LENGTH_N(x)(0x54 + ((x) * 0x04))
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 07/10] crypto: omap-aes: gcm: Add support for unaligned lengths

2015-07-01 Thread Lokesh Vutla
Check if the inputs are not aligned, if not process
the input before starting the hw acceleration.
Similarly after completition of hw acceleration.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes-gcm.c |   82 +
 1 file changed, 74 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 72815af..9c68ff0 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -48,8 +48,9 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, 
int ret)
 
 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
 {
+   void *buf;
u8 *tag;
-   int alen, clen, i, ret = 0, nsg;
+   int pages, alen, clen, i, ret = 0, nsg;
 
alen = ALIGN(dd-assoc_len, AES_BLOCK_SIZE);
clen = ALIGN(dd-total, AES_BLOCK_SIZE);
@@ -65,10 +66,29 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
omap_aes_crypt_dma_stop(dd);
}
 
+   if (dd-sgs_copied  AES_OUT_DATA_COPIED) {
+   buf = sg_virt(dd-out_sgl);
+   scatterwalk_map_and_copy(buf, dd-orig_out, 0, dd-total, 1);
+
+   pages = get_order(clen);
+   free_pages((unsigned long)buf, pages);
+   }
+
if (dd-flags  FLAGS_ENCRYPT)
scatterwalk_map_and_copy(dd-ctx-auth_tag, dd-aead_req-dst,
 dd-total, dd-authsize, 1);
 
+   if (dd-sgs_copied  AES_ASSOC_DATA_COPIED) {
+   buf = sg_virt(dd-in_sgl[0]);
+   pages = get_order(alen);
+   free_pages((unsigned long)buf, pages);
+   }
+   if (dd-sgs_copied  AES_IN_DATA_COPIED) {
+   buf = sg_virt(dd-in_sgl[nsg - 1]);
+   pages = get_order(clen);
+   free_pages((unsigned long)buf, pages);
+   }
+
if (!(dd-flags  FLAGS_ENCRYPT)) {
tag = (u8 *)dd-ctx-auth_tag;
for (i = 0; i  dd-authsize; i++) {
@@ -87,13 +107,14 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev 
*dd,
 struct aead_request *req)
 {
void *buf_in;
-   int alen, clen, nsg;
+   int pages, alen, clen, cryptlen, nsg;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authlen = crypto_aead_authsize(aead);
u32 dec = !(dd-flags  FLAGS_ENCRYPT);
 
-   alen = req-assoclen;
-   clen = req-cryptlen - (dec * authlen);
+   alen = ALIGN(req-assoclen, AES_BLOCK_SIZE);
+   cryptlen = req-cryptlen - (dec * authlen);
+   clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
 
dd-sgs_copied = 0;
 
@@ -101,20 +122,65 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev 
*dd,
 
sg_init_table(dd-in_sgl, nsg);
if (req-assoclen) {
-   buf_in = sg_virt(req-assoc);
+   if (omap_aes_check_aligned(req-assoc, req-assoclen)) {
+   dd-sgs_copied |= AES_ASSOC_DATA_COPIED;
+   pages = get_order(alen);
+   buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+   if (!buf_in) {
+   pr_err(Couldn't allocate for unaligncases.\n);
+   return -1;
+   }
+
+   scatterwalk_map_and_copy(buf_in, req-assoc, 0,
+req-assoclen, 0);
+   memset(buf_in + req-assoclen, 0, alen - req-assoclen);
+   } else {
+   buf_in = sg_virt(req-assoc);
+   }
sg_set_buf(dd-in_sgl, buf_in, alen);
}
 
if (req-cryptlen) {
-   buf_in = sg_virt(req-src);
+   if (omap_aes_check_aligned(req-src, req-cryptlen)) {
+   dd-sgs_copied |= AES_IN_DATA_COPIED;
+   pages = get_order(clen);
+   buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+   if (!buf_in) {
+   pr_err(Couldn't allocate for unaligncases.\n);
+   return -1;
+   }
+
+   memset(buf_in + cryptlen, 0, clen - cryptlen);
+   scatterwalk_map_and_copy(buf_in, req-src, 0, cryptlen,
+0);
+   } else {
+   buf_in = sg_virt(req-src);
+   }
sg_set_buf(dd-in_sgl[nsg - 1], buf_in, clen);
}
 
dd-in_sg = dd-in_sgl;
-   dd-total = clen;
+   dd-total = cryptlen;
dd-assoc_len = req-assoclen;
dd-authsize = authlen;
-   dd-out_sg = req-dst;
+
+   if (omap_aes_check_aligned(req-dst, cryptlen)) {
+   pages = get_order(clen);
+
+   buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+   if 

[PATCH 06/10] crypto: omap-aes: gcm: Handle inputs properly

2015-07-01 Thread Lokesh Vutla
Its not necessary that assoc data and plain text is passed always.
Add these checks before processing the input.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes-gcm.c |   26 --
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 1be9d91..72815af 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -87,7 +87,7 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
 struct aead_request *req)
 {
void *buf_in;
-   int alen, clen;
+   int alen, clen, nsg;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authlen = crypto_aead_authsize(aead);
u32 dec = !(dd-flags  FLAGS_ENCRYPT);
@@ -97,12 +97,18 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev 
*dd,
 
dd-sgs_copied = 0;
 
-   sg_init_table(dd-in_sgl, 2);
-   buf_in = sg_virt(req-assoc);
-   sg_set_buf(dd-in_sgl, buf_in, alen);
+   nsg = 1 + !!(req-assoclen  req-cryptlen);
 
-   buf_in = sg_virt(req-src);
-   sg_set_buf(dd-in_sgl[1], buf_in, clen);
+   sg_init_table(dd-in_sgl, nsg);
+   if (req-assoclen) {
+   buf_in = sg_virt(req-assoc);
+   sg_set_buf(dd-in_sgl, buf_in, alen);
+   }
+
+   if (req-cryptlen) {
+   buf_in = sg_virt(req-src);
+   sg_set_buf(dd-in_sgl[nsg - 1], buf_in, clen);
+   }
 
dd-in_sg = dd-in_sgl;
dd-total = clen;
@@ -258,6 +264,8 @@ static int omap_aes_gcm_crypt(struct aead_request *req, 
unsigned long mode)
 {
struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+   struct crypto_aead *aead = crypto_aead_reqtfm(req);
+   unsigned int authlen = crypto_aead_authsize(aead);
struct omap_aes_dev *dd;
__be32 counter = cpu_to_be32(1);
int err;
@@ -270,6 +278,12 @@ static int omap_aes_gcm_crypt(struct aead_request *req, 
unsigned long mode)
if (err)
return err;
 
+   if (req-assoclen + req-cryptlen == 0) {
+   scatterwalk_map_and_copy(ctx-auth_tag, req-dst, 0, authlen,
+1);
+   return 0;
+   }
+
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 09/10] crypto: omap-aes: gcm: Add support for PIO mode

2015-07-01 Thread Lokesh Vutla
Add support for PIO mode for GCM mode.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes-gcm.c |   10 ++
 drivers/crypto/omap-aes.c |   24 ++--
 drivers/crypto/omap-aes.h |3 ++-
 3 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9c68ff0..370891b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -52,8 +52,8 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
u8 *tag;
int pages, alen, clen, i, ret = 0, nsg;
 
-   alen = ALIGN(dd-assoc_len, AES_BLOCK_SIZE);
-   clen = ALIGN(dd-total, AES_BLOCK_SIZE);
+   alen = ALIGN(dd-assoc_len_save, AES_BLOCK_SIZE);
+   clen = ALIGN(dd-total_save, AES_BLOCK_SIZE);
 
nsg = 1 + !!(dd-assoc_len  dd-total);
 
@@ -161,7 +161,9 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev 
*dd,
 
dd-in_sg = dd-in_sgl;
dd-total = cryptlen;
+   dd-total_save = cryptlen;
dd-assoc_len = req-assoclen;
+   dd-assoc_len_save = req-assoclen;
dd-authsize = authlen;
 
if (omap_aes_check_aligned(req-dst, cryptlen)) {
@@ -248,14 +250,14 @@ static int do_encrypt_iv(struct aead_request *req, u32 
*tag)
return ret;
 }
 
-void omap_aes_gcm_dma_out_callback(void *data)
+void omap_aes_gcm_process_auth_tag(void *data)
 {
struct omap_aes_dev *dd = data;
int i, val;
u32 *auth_tag, tag[4];
 
if (!(dd-flags  FLAGS_ENCRYPT))
-   scatterwalk_map_and_copy(tag, dd-aead_req-src, dd-total,
+   scatterwalk_map_and_copy(tag, dd-aead_req-src, dd-total_save,
 dd-authsize, 0);
 
auth_tag = dd-ctx-auth_tag;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 11f3850..8aeb913 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -340,7 +340,7 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
}
 
if (dd-flags  FLAGS_GCM)
-   tx_out-callback = omap_aes_gcm_dma_out_callback;
+   tx_out-callback = omap_aes_gcm_process_auth_tag;
else
tx_out-callback = omap_aes_dma_out_callback;
tx_out-callback_param = dd;
@@ -927,8 +927,15 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
status = ~AES_REG_IRQ_DATA_IN;
omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
 
-   /* Enable DATA_OUT interrupt */
-   omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
+   /*
+* if GCM mode enable DATA_IN till assoc data is copied
+* else Enable DATA_OUT interrupt
+* */
+   if ((dd-flags  FLAGS_GCM)  dd-assoc_len)
+   dd-assoc_len -= min((size_t)AES_BLOCK_SIZE,
+dd-assoc_len);
+   else
+   omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
 
} else if (status  AES_REG_IRQ_DATA_OUT) {
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
@@ -961,12 +968,17 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
status = ~AES_REG_IRQ_DATA_OUT;
omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
 
-   if (!dd-total)
+   if (!dd-total) {
/* All bytes read! */
-   tasklet_schedule(dd-done_task);
-   else
+   if (dd-flags  FLAGS_GCM)
+   /* Process auth tag and call done_task */
+   omap_aes_gcm_process_auth_tag(dd);
+   else
+   tasklet_schedule(dd-done_task);
+   } else {
/* Enable DATA_IN interrupt for next block */
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
+   }
}
 
return IRQ_HANDLED;
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0863874..e0621dd 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -164,6 +164,7 @@ struct omap_aes_dev {
size_t  total;
size_t  total_save;
size_t  assoc_len;
+   size_t  assoc_len_save;
size_t  authsize;
 
struct scatterlist  *in_sg;
@@ -199,7 +200,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req);
 int omap_aes_write_ctrl(struct omap_aes_dev *dd);
 int omap_aes_check_aligned(struct scatterlist *sg, int total);
 int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
-void omap_aes_gcm_dma_out_callback(void *data);
+void omap_aes_gcm_process_auth_tag(void *data);
 int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
 

[PATCH 02/10] crypto: omap-aes: Fix configuring of AES mode

2015-07-01 Thread Lokesh Vutla
AES_CTRL_REG is used to configure AES mode. Before configuring
any mode we need to make sure all other modes are reset or else
driver will misbehave. So mask all modes before configuring
any AES mode.

Signed-off-by: Lokesh Vutla lokeshvu...@ti.com
---
 drivers/crypto/omap-aes.c |   13 +
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a923101..96fc7f7 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -63,6 +63,7 @@
 #define AES_REG_CTRL_DIRECTION (1  2)
 #define AES_REG_CTRL_INPUT_READY   (1  1)
 #define AES_REG_CTRL_OUTPUT_READY  (1  0)
+#define AES_REG_CTRL_MASK  FLD_MASK(24, 2)
 
 #define AES_REG_DATA_N(dd, x)  ((dd)-pdata-data_ofs + ((x) * 0x04))
 
@@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
unsigned int key32;
int i, err;
-   u32 val, mask = 0;
+   u32 val;
 
err = omap_aes_hw_init(dd);
if (err)
@@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
val = FLD_VAL(((dd-ctx-keylen  3) - 1), 4, 3);
if (dd-flags  FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
-   if (dd-flags  FLAGS_CTR) {
+   if (dd-flags  FLAGS_CTR)
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
-   mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
-   }
+
if (dd-flags  FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
 
-   mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
-   AES_REG_CTRL_KEY_SIZE;
-
-   omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+   omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
 
return 0;
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 00/10] crypto: omap-aes: Add support for GCM mode

2015-07-01 Thread Lokesh Vutla
This series does some basic cleanup and adds support for
AES GCM mode for omap aes driver.
Also adds a test case for async aead algos.

Tested on BeagelBoneBlack: http://pastebin.ubuntu.com/11808341/

Lokesh Vutla (10):
  crypto: omap-aes: Add support for lengths not aligned with
AES_BLOCK_SIZE
  crypto: omap-aes: Fix configuring of AES mode
  crypto: aead: Add aead_request_cast() api
  crypto: omap-aes: Use BIT() macro
  crypto: omap-aes: Add support for GCM mode
  crypto: omap-aes: gcm: Handle inputs properly
  crypto: omap-aes: gcm: Add support for unaligned lengths
  crypto: omap-aes: gmc: Add algo info
  crypto: omap-aes: gcm: Add support for PIO mode
  crypto: tcrypt: Added speed tests for Async AEAD crypto alogrithms

 crypto/tcrypt.c   |  233 +
 crypto/tcrypt.h   |1 +
 drivers/crypto/Makefile   |3 +-
 drivers/crypto/omap-aes-gcm.c |  386 +
 drivers/crypto/omap-aes.c |  322 +-
 drivers/crypto/omap-aes.h |  206 ++
 include/linux/crypto.h|6 +
 7 files changed, 955 insertions(+), 202 deletions(-)
 create mode 100644 drivers/crypto/omap-aes-gcm.c
 create mode 100644 drivers/crypto/omap-aes.h

-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-crypto in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html