[PATCH 5/5] crypto: caam/qi2 - add support for Chacha20 + Poly1305

2018-11-08 Thread Horia Geantă
Add support for Chacha20 + Poly1305 combined AEAD:
-generic (rfc7539)
-IPsec (rfc7634 - known as rfc7539esp in the kernel)

Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg.c  |   4 +-
 drivers/crypto/caam/caamalg_desc.c |  24 ++-
 drivers/crypto/caam/caamalg_desc.h |   3 +-
 drivers/crypto/caam/caamalg_qi2.c  | 129 -
 4 files changed, 154 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index cbaeb264a261..523565ce0060 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -527,13 +527,13 @@ static int chachapoly_set_sh_desc(struct crypto_aead 
*aead)
 
desc = ctx->sh_desc_enc;
cnstr_shdsc_chachapoly(desc, >cdata, >adata, ivsize,
-  ctx->authsize, true);
+  ctx->authsize, true, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
desc = ctx->sh_desc_dec;
cnstr_shdsc_chachapoly(desc, >cdata, >adata, ivsize,
-  ctx->authsize, false);
+  ctx->authsize, false, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index 0eb2add7e4e2..7db1640d3577 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1227,10 +1227,12 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
  * @ivsize: initialization vector size
  * @icvsize: integrity check value (ICV) size (truncated or full)
  * @encap: true if encapsulation, false if decapsulation
+ * @is_qi: true when called from caam/qi
  */
 void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
-   unsigned int icvsize, const bool encap)
+   unsigned int icvsize, const bool encap,
+   const bool is_qi)
 {
u32 *key_jump_cmd, *wait_cmd;
u32 nfifo;
@@ -1267,6 +1269,26 @@ void cnstr_shdsc_chachapoly(u32 * const desc, struct 
alginfo *cdata,
 OP_ALG_DECRYPT);
}
 
+   if (is_qi) {
+   u32 *wait_load_cmd;
+   u32 ctx1_iv_off = is_ipsec ? 8 : 4;
+
+   /* REG3 = assoclen */
+   append_seq_load(desc, 4, LDST_CLASS_DECO |
+   LDST_SRCDST_WORD_DECO_MATH3 |
+   4 << LDST_OFFSET_SHIFT);
+
+   wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+   JUMP_COND_CALM | JUMP_COND_NCP |
+   JUMP_COND_NOP | JUMP_COND_NIP |
+   JUMP_COND_NIFP);
+   set_jump_tgt_here(desc, wait_load_cmd);
+
+   append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+   LDST_SRCDST_BYTE_CONTEXT |
+   ctx1_iv_off << LDST_OFFSET_SHIFT);
+   }
+
/*
 * MAGIC with NFIFO
 * Read associated data from the input and send them to class1 and
diff --git a/drivers/crypto/caam/caamalg_desc.h 
b/drivers/crypto/caam/caamalg_desc.h
index a1a7b0e6889d..d5ca42ff961a 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -98,7 +98,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct 
alginfo *cdata,
 
 void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
-   unsigned int icvsize, const bool encap);
+   unsigned int icvsize, const bool encap,
+   const bool is_qi);
 
 void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
diff --git a/drivers/crypto/caam/caamalg_qi2.c 
b/drivers/crypto/caam/caamalg_qi2.c
index a9e264bb9629..2598640aa98b 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -462,7 +462,15 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
 
-   edesc->assoclen = cpu_to_caam32(req->assoclen);
+   if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+   OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
+   /*
+* The associated data comes already with the IV but we need
+* to skip it when we authenticate or encrypt...
+*/
+   

[PATCH 4/5] crypto: caam/jr - add support for Chacha20 + Poly1305

2018-11-08 Thread Horia Geantă
Add support for Chacha20 + Poly1305 combined AEAD:
-generic (rfc7539)
-IPsec (rfc7634 - known as rfc7539esp in the kernel)

Signed-off-by: Cristian Stoica 
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg.c  | 221 -
 drivers/crypto/caam/caamalg_desc.c | 111 +++
 drivers/crypto/caam/caamalg_desc.h |   4 +
 drivers/crypto/caam/compat.h   |   1 +
 drivers/crypto/caam/desc.h |  15 +++
 drivers/crypto/caam/desc_constr.h  |   7 +-
 6 files changed, 354 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 9f1414030bc2..cbaeb264a261 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -72,6 +72,8 @@
 #define AUTHENC_DESC_JOB_IO_LEN(AEAD_DESC_JOB_IO_LEN + \
 CAAM_CMD_SZ * 5)
 
+#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
+
 #define DESC_MAX_USED_BYTES(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
 #define DESC_MAX_USED_LEN  (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
 
@@ -513,6 +515,61 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
 }
 
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(aead);
+   struct device *jrdev = ctx->jrdev;
+   unsigned int ivsize = crypto_aead_ivsize(aead);
+   u32 *desc;
+
+   if (!ctx->cdata.keylen || !ctx->authsize)
+   return 0;
+
+   desc = ctx->sh_desc_enc;
+   cnstr_shdsc_chachapoly(desc, >cdata, >adata, ivsize,
+  ctx->authsize, true);
+   dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+  desc_bytes(desc), ctx->dir);
+
+   desc = ctx->sh_desc_dec;
+   cnstr_shdsc_chachapoly(desc, >cdata, >adata, ivsize,
+  ctx->authsize, false);
+   dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+  desc_bytes(desc), ctx->dir);
+
+   return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+   if (authsize != POLY1305_DIGEST_SIZE)
+   return -EINVAL;
+
+   ctx->authsize = authsize;
+   return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+unsigned int keylen)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(aead);
+   unsigned int ivsize = crypto_aead_ivsize(aead);
+   unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+   if (keylen != CHACHA20_KEY_SIZE + saltlen) {
+   crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+   return -EINVAL;
+   }
+
+   ctx->cdata.key_virt = key;
+   ctx->cdata.keylen = keylen - saltlen;
+
+   return chachapoly_set_sh_desc(aead);
+}
+
 static int aead_setkey(struct crypto_aead *aead,
   const u8 *key, unsigned int keylen)
 {
@@ -1031,6 +1088,40 @@ static void init_gcm_job(struct aead_request *req,
/* End of blank commands */
 }
 
+static void init_chachapoly_job(struct aead_request *req,
+   struct aead_edesc *edesc, bool all_contig,
+   bool encrypt)
+{
+   struct crypto_aead *aead = crypto_aead_reqtfm(req);
+   unsigned int ivsize = crypto_aead_ivsize(aead);
+   unsigned int assoclen = req->assoclen;
+   u32 *desc = edesc->hw_desc;
+   u32 ctx_iv_off = 4;
+
+   init_aead_job(req, edesc, all_contig, encrypt);
+
+   if (ivsize != CHACHAPOLY_IV_SIZE) {
+   /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
+   ctx_iv_off += 4;
+
+   /*
+* The associated data comes already with the IV but we need
+* to skip it when we authenticate or encrypt...
+*/
+   assoclen -= ivsize;
+   }
+
+   append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
+
+   /*
+* For IPsec load the IV further in the same register.
+* For RFC7539 simply load the 12 bytes nonce in a single operation
+*/
+   append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
+  LDST_SRCDST_BYTE_CONTEXT |
+  ctx_iv_off << LDST_OFFSET_SHIFT);
+}
+
 static void init_authenc_job(struct aead_request *req,
 struct aead_edesc *edesc,
 bool all_contig, bool encrypt)
@@ -1289,6 +1380,72 @@ static int gcm_encrypt(struct aead_request *req)
return ret;
 }
 
+static int chachapoly_encrypt(struct aead_request *req)
+{
+   struct aea

[PATCH 0/5] crypto: caam - add support for Era 10

2018-11-08 Thread Horia Geantă
This patch set adds support for CAAM Era 10, currently used in LX2160A SoC:
-new register mapping: some registers/fields are deprecated and moved
to different locations, mainly version registers
-algorithms
chacha20 (over DPSECI - Data Path SEC Interface on fsl-mc bus)
rfc7539(chacha20,poly1305) (over both DPSECI and Job Ring Interface)
rfc7539esp(chacha20,poly1305) (over both DPSECI and Job Ring Interface)

Note: the patch set is generated on top of cryptodev-2.6, however testing
was performed based on linux-next (tag: next-20181108) - which includes
LX2160A platform support + manually updating LX2160A dts with:
-fsl-mc bus DT node
-missing dma-ranges property in soc DT node

Cristian Stoica (1):
  crypto: export CHACHAPOLY_IV_SIZE

Horia Geantă (4):
  crypto: caam - add register map changes cf. Era 10
  crypto: caam/qi2 - add support for ChaCha20
  crypto: caam/jr - add support for Chacha20 + Poly1305
  crypto: caam/qi2 - add support for Chacha20 + Poly1305

 crypto/chacha20poly1305.c  |   2 -
 drivers/crypto/caam/caamalg.c  | 266 ++---
 drivers/crypto/caam/caamalg_desc.c | 139 ++-
 drivers/crypto/caam/caamalg_desc.h |   5 +
 drivers/crypto/caam/caamalg_qi.c   |  37 --
 drivers/crypto/caam/caamalg_qi2.c  | 156 +-
 drivers/crypto/caam/caamhash.c |  20 ++-
 drivers/crypto/caam/caampkc.c  |  10 +-
 drivers/crypto/caam/caamrng.c  |  10 +-
 drivers/crypto/caam/compat.h   |   2 +
 drivers/crypto/caam/ctrl.c |  28 +++-
 drivers/crypto/caam/desc.h |  28 
 drivers/crypto/caam/desc_constr.h  |   7 +-
 drivers/crypto/caam/regs.h |  74 +--
 include/crypto/chacha20.h  |   1 +
 15 files changed, 724 insertions(+), 61 deletions(-)

-- 
2.16.2



[PATCH 1/5] crypto: caam - add register map changes cf. Era 10

2018-11-08 Thread Horia Geantă
Era 10 changes the register map.

The updates that affect the drivers:
-new version registers are added
-DBG_DBG[deco_state] field is moved to a new register -
DBG_EXEC[19:16] @ 8_0E3Ch.

Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg.c| 47 +
 drivers/crypto/caam/caamalg_qi.c | 37 +++-
 drivers/crypto/caam/caamhash.c   | 20 ---
 drivers/crypto/caam/caampkc.c| 10 --
 drivers/crypto/caam/caamrng.c| 10 +-
 drivers/crypto/caam/ctrl.c   | 28 +++
 drivers/crypto/caam/desc.h   |  7 
 drivers/crypto/caam/regs.h   | 74 ++--
 8 files changed, 184 insertions(+), 49 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 869f092432de..9f1414030bc2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3135,7 +3135,7 @@ static int __init caam_algapi_init(void)
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
-   u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+   u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
 
@@ -3168,14 +3168,34 @@ static int __init caam_algapi_init(void)
 * Register crypto algorithms the device supports.
 * First, detect presence and attributes of DES, AES, and MD blocks.
 */
-   cha_vid = rd_reg32(>ctrl->perfmon.cha_id_ls);
-   cha_inst = rd_reg32(>ctrl->perfmon.cha_num_ls);
-   des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
-   aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
-   md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+   if (priv->era < 10) {
+   u32 cha_vid, cha_inst;
+
+   cha_vid = rd_reg32(>ctrl->perfmon.cha_id_ls);
+   aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+   md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+   cha_inst = rd_reg32(>ctrl->perfmon.cha_num_ls);
+   des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+  CHA_ID_LS_DES_SHIFT;
+   aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+   md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+   } else {
+   u32 aesa, mdha;
+
+   aesa = rd_reg32(>ctrl->vreg.aesa);
+   mdha = rd_reg32(>ctrl->vreg.mdha);
+
+   aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+   md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+   des_inst = rd_reg32(>ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+   aes_inst = aesa & CHA_VER_NUM_MASK;
+   md_inst = mdha & CHA_VER_NUM_MASK;
+   }
 
/* If MD is present, limit digest size based on LP256 */
-   if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+   if (md_inst && md_vid  == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
 
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
@@ -3196,10 +3216,10 @@ static int __init caam_algapi_init(void)
 * Check support for AES modes not available
 * on LP devices.
 */
-   if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
-   if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
-OP_ALG_AAI_XTS)
-   continue;
+   if (aes_vid == CHA_VER_VID_AES_LP &&
+   (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
+   OP_ALG_AAI_XTS)
+   continue;
 
caam_skcipher_alg_init(t_alg);
 
@@ -3236,9 +3256,8 @@ static int __init caam_algapi_init(void)
 * Check support for AES algorithms not available
 * on LP devices.
 */
-   if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
-   if (alg_aai == OP_ALG_AAI_GCM)
-   continue;
+   if (aes_vid  == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+   continue;
 
/*
 * Skip algorithms requiring message digests
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 23c9fc4975f8..c0d55310aade 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -2462,7 +2462,7 @@ static int __init caam_qi_algapi_init(void)
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
-  

[PATCH 3/5] crypto: export CHACHAPOLY_IV_SIZE

2018-11-08 Thread Horia Geantă
From: Cristian Stoica 

Move CHACHAPOLY_IV_SIZE to header file, so it can be reused.

Signed-off-by: Cristian Stoica 
Signed-off-by: Horia Geantă 
---
 crypto/chacha20poly1305.c | 2 --
 include/crypto/chacha20.h | 1 +
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 600afa99941f..f9dd5453046a 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -22,8 +22,6 @@
 
 #include "internal.h"
 
-#define CHACHAPOLY_IV_SIZE 12
-
 struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
struct crypto_ahash_spawn poly;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index f76302d99e2b..2d3129442a52 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,6 +13,7 @@
 #define CHACHA20_IV_SIZE   16
 #define CHACHA20_KEY_SIZE  32
 #define CHACHA20_BLOCK_SIZE64
+#define CHACHAPOLY_IV_SIZE 12
 
 struct chacha20_ctx {
u32 key[8];
-- 
2.16.2



[PATCH 2/5] crypto: caam/qi2 - add support for ChaCha20

2018-11-08 Thread Horia Geantă
Add support for ChaCha20 skcipher algorithm.

Signed-off-by: Carmen Iorga 
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg_desc.c |  6 --
 drivers/crypto/caam/caamalg_qi2.c  | 27 +--
 drivers/crypto/caam/compat.h   |  1 +
 drivers/crypto/caam/desc.h |  6 ++
 4 files changed, 36 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index 1a6f0da14106..d850590079a2 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1228,7 +1228,8 @@ static inline void skcipher_append_src_dst(u32 *desc)
  * @desc: pointer to buffer used for descriptor construction
  * @cdata: pointer to block cipher transform definitions
  * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ *- OP_ALG_ALGSEL_CHACHA20
  * @ivsize: initialization vector size
  * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
  * @ctx1_iv_off: IV offset in CONTEXT1 register
@@ -1293,7 +1294,8 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
  * @desc: pointer to buffer used for descriptor construction
  * @cdata: pointer to block cipher transform definitions
  * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ *- OP_ALG_ALGSEL_CHACHA20
  * @ivsize: initialization vector size
  * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
  * @ctx1_iv_off: IV offset in CONTEXT1 register
diff --git a/drivers/crypto/caam/caamalg_qi2.c 
b/drivers/crypto/caam/caamalg_qi2.c
index 7d8ac0222fa3..a9e264bb9629 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -816,7 +816,9 @@ static int skcipher_setkey(struct crypto_skcipher 
*skcipher, const u8 *key,
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-  OP_ALG_AAI_CTR_MOD128);
+  OP_ALG_AAI_CTR_MOD128) &&
+  ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
+  OP_ALG_ALGSEL_CHACHA20);
const bool is_rfc3686 = alg->caam.rfc3686;
 
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
@@ -1494,7 +1496,23 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
-   }
+   },
+   {
+   .skcipher = {
+   .base = {
+   .cra_name = "chacha20",
+   .cra_driver_name = "chacha20-caam-qi2",
+   .cra_blocksize = 1,
+   },
+   .setkey = skcipher_setkey,
+   .encrypt = skcipher_encrypt,
+   .decrypt = skcipher_decrypt,
+   .min_keysize = CHACHA20_KEY_SIZE,
+   .max_keysize = CHACHA20_KEY_SIZE,
+   .ivsize = CHACHA20_IV_SIZE,
+   },
+   .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
+   },
 };
 
 static struct caam_aead_alg driver_aeads[] = {
@@ -4908,6 +4926,11 @@ static int dpaa2_caam_probe(struct fsl_mc_device 
*dpseci_dev)
alg_sel == OP_ALG_ALGSEL_AES)
continue;
 
+   /* Skip CHACHA20 algorithms if not supported by device */
+   if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+   !priv->sec_attr.ccha_acc_num)
+   continue;
+
t_alg->caam.dev = dev;
caam_skcipher_alg_init(t_alg);
 
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 9604ff7a335e..a5081b4050b6 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -36,6 +36,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec1ef06049b4..9d117e51629f 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1159,6 +1159,7 @@
 #define OP_ALG_ALGSEL_KASUMI   (0x70 << OP_ALG_ALGSEL_SHIFT)
 #define OP_ALG_ALGSEL_CRC  (0x90 << OP_ALG_ALGSEL_SHIFT)
 #define OP_ALG_ALGSEL_SNOW_F9  (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
 
 #define OP_ALG_AAI_SHIFT   4
 #define OP_ALG_AAI_MASK(0x1ff << OP_ALG_AAI_SHIFT)
@@ -1206,6 

[PATCH] crypto: caam - add SPDX license identifier to all files

2018-10-10 Thread Horia Geantă
Previously, a tree-wide change added SPDX license identifiers to
files lacking licensing information:
b24413180f56 ("License cleanup: add SPDX GPL-2.0 license identifier to files 
with no license")

To be consistent update the rest of the files:
-files with license specified by means of MODULE_LICENSE()
-files with complete license text
-Kconfig

Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/Kconfig|  1 +
 drivers/crypto/caam/caamalg.c  |  1 +
 drivers/crypto/caam/caamalg_desc.c |  1 +
 drivers/crypto/caam/caamalg_qi.c   |  1 +
 drivers/crypto/caam/caamhash.c |  1 +
 drivers/crypto/caam/caampkc.c  |  1 +
 drivers/crypto/caam/caamrng.c  |  1 +
 drivers/crypto/caam/ctrl.c |  1 +
 drivers/crypto/caam/jr.c   |  1 +
 drivers/crypto/caam/sg_sw_qm.h | 29 +
 drivers/crypto/caam/sg_sw_qm2.h| 30 +-
 11 files changed, 11 insertions(+), 57 deletions(-)

diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 619a2c929f3e..c4b1cade55c1 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 config CRYPTO_DEV_FSL_CAAM_COMMON
tristate
 
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2f9f803bb1c4..869f092432de 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * caam - Freescale FSL CAAM support for crypto API
  *
diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index 3f217093a219..1a6f0da14106 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Shared descriptors for aead, skcipher algorithms
  *
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index b4644366e53a..23c9fc4975f8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Freescale FSL CAAM support for crypto API over QI backend.
  * Based on caamalg.c
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 4c30bb5516d6..46924affa0bd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * caam - Freescale FSL CAAM support for ahash functions of crypto API
  *
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e5533a..4fc209cbbeab 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 /*
  * caam - Freescale FSL CAAM support for Public Key Cryptography
  *
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4ff019..4318b0aa6fb9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * caam - Freescale FSL CAAM support for hw_random
  *
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f428c1..3fc793193821 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /* * CAAM control-plane driver backend
  * Controller-level driver, kernel property detection, initialization
  *
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd72016ffe..d50085a03597 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * CAAM/SEC 4.x transport/backend driver
  * JobR backend functionality
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4df745f..b3e1aaaeffea 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
 /*
  * Copyright 2013-2016 Freescale Semiconductor, Inc.
  * Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- *   notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- *   notice, this list of conditions and the following disclaimer in the
- *   documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- *   names of its contributors may be used to endorse or promote products
- *   derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distrib

[PATCH] crypto: tcrypt - remove remnants of pcomp-based zlib

2018-09-19 Thread Horia Geantă
Commit 110492183c4b ("crypto: compress - remove unused pcomp interface")
removed pcomp interface but missed cleaning up tcrypt.

Signed-off-by: Horia Geantă 
---
 crypto/tcrypt.c  | 7 +--
 crypto/testmgr.h | 2 --
 2 files changed, 1 insertion(+), 8 deletions(-)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index bdde95e8d369..b4ed248095be 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,8 +76,7 @@ static char *check[] = {
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
-   "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
-   NULL
+   "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", NULL
 };
 
 static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
@@ -1878,10 +1877,6 @@ static int do_test(const char *alg, u32 type, u32 mask, 
int m, u32 num_mb)
ret += tcrypt_test("ecb(seed)");
break;
 
-   case 44:
-   ret += tcrypt_test("zlib");
-   break;
-
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 0b3d7cadbe93..13b82af11bcd 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -24,8 +24,6 @@
 #ifndef _CRYPTO_TESTMGR_H
 #define _CRYPTO_TESTMGR_H
 
-#include 
-
 #define MAX_DIGEST_SIZE64
 #define MAX_TAP8
 
-- 
2.16.2



[PATCH] crypto: caam/jr - fix ablkcipher_edesc pointer arithmetic

2018-09-14 Thread Horia Geantă
In some cases the zero-length hw_desc array at the end of
ablkcipher_edesc struct requires for 4B of tail padding.

Due to tail padding and the way pointers to S/G table and IV
are computed:
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
 desc_bytes;
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
first 4 bytes of IV are overwritten by S/G table.

Update computation of pointer to S/G table to rely on offset of hw_desc
member and not on sizeof() operator.

Cc:  # 4.13+
Fixes: 115957bb3e59 ("crypto: caam - fix IV DMA mapping and updating")
Signed-off-by: Horia Geantă 
---

This is for crypto-2.6 tree / current v4.19 release cycle.

Note that it will create merge conflicts later in v4.20 due to commits
cf5448b5c3d8 ("crypto: caam/jr - remove ablkcipher IV generation")
5ca7badb1f62 ("crypto: caam/jr - ablkcipher -> skcipher conversion")
from cryptodev-2.6 tree.

Should I send a similar fix for skcipher-based caam/jr driver
on cryptodev-2.6 tree, or will this be handled while solving the conflicts?

 drivers/crypto/caam/caamalg.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d67667970f7e..ec40f991e6c6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
-   edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-desc_bytes;
+   edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+ desc_bytes);
edesc->iv_dir = DMA_TO_DEVICE;
 
/* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
-   edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-desc_bytes;
+   edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+ desc_bytes);
edesc->iv_dir = DMA_FROM_DEVICE;
 
/* Make sure IV is located in a DMAable area */
-- 
2.16.2



[PATCH] crypto: tcrypt - fix ghash-generic speed test

2018-09-12 Thread Horia Geantă
ghash is a keyed hash algorithm, thus setkey needs to be called.
Otherwise the following error occurs:
$ modprobe tcrypt mode=318 sec=1
testing speed of async ghash-generic (ghash-generic)
tcrypt: test  0 (   16 byte blocks,   16 bytes per update,   1 updates):
tcrypt: hashing failed ret=-126

Cc:  # 4.6+
Fixes: 0660511c0bee ("crypto: tcrypt - Use ahash")
Tested-by: Franck Lenormand 
Signed-off-by: Horia Geantă 
---
 crypto/tcrypt.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index bdde95e8d369..6e0a054bb61d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1103,6 +1103,9 @@ static void test_ahash_speed_common(const char *algo, 
unsigned int secs,
break;
}
 
+   if (speed[i].klen)
+   crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
+
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / 
speed[i].plen);
-- 
2.16.2



[PATCH 1/4] crypto: caam/jr - remove ablkcipher IV generation

2018-08-06 Thread Horia Geantă
IV generation is done only at AEAD level.
Support in ablkcipher is not needed, thus remove the dead code.

Link: 
https://www.mail-archive.com/search?l=mid=20160901101257.ga3...@gondor.apana.org.au
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg.c | 275 ++
 1 file changed, 11 insertions(+), 264 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d67667970f7e..b23730c07fda 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -102,11 +102,9 @@ struct caam_aead_alg {
 struct caam_ctx {
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
-   u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
-   dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
@@ -703,13 +701,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
-   /* ablkcipher_givencrypt shared descriptor */
-   desc = ctx->sh_desc_givenc;
-   cnstr_shdsc_ablkcipher_givencap(desc, >cdata, ivsize, is_rfc3686,
-   ctx1_iv_off);
-   dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
-  desc_bytes(desc), ctx->dir);
-
return 0;
 }
 
@@ -769,7 +760,6 @@ struct aead_edesc {
  * @src_nents: number of segments in input s/w scatterlist
  * @dst_nents: number of segments in output s/w scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
- * @iv_dir: DMA mapping direction for IV
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
  * @sec4_sg: pointer to h/w link table
@@ -780,7 +770,6 @@ struct ablkcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
-   enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -790,8 +779,7 @@ struct ablkcipher_edesc {
 static void caam_unmap(struct device *dev, struct scatterlist *src,
   struct scatterlist *dst, int src_nents,
   int dst_nents,
-  dma_addr_t iv_dma, int ivsize,
-  enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+  dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
   int sec4_sg_bytes)
 {
if (dst != src) {
@@ -803,7 +791,7 @@ static void caam_unmap(struct device *dev, struct 
scatterlist *src,
}
 
if (iv_dma)
-   dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+   dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
 DMA_TO_DEVICE);
@@ -814,7 +802,7 @@ static void aead_unmap(struct device *dev,
   struct aead_request *req)
 {
caam_unmap(dev, req->src, req->dst,
-  edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+  edesc->src_nents, edesc->dst_nents, 0, 0,
   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -827,7 +815,7 @@ static void ablkcipher_unmap(struct device *dev,
 
caam_unmap(dev, req->src, req->dst,
   edesc->src_nents, edesc->dst_nents,
-  edesc->iv_dma, ivsize, edesc->iv_dir,
+  edesc->iv_dma, ivsize,
   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -916,18 +904,6 @@ static void ablkcipher_encrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
 ivsize, 0);
 
-   /* In case initial IV was generated, copy it in GIVCIPHER request */
-   if (edesc->iv_dir == DMA_FROM_DEVICE) {
-   u8 *iv;
-   struct skcipher_givcrypt_request *greq;
-
-   greq = container_of(req, struct skcipher_givcrypt_request,
-   creq);
-   iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
-edesc->sec4_sg_bytes;
-   memcpy(greq->giv, iv, ivsize);
-   }
-
kfree(edesc);
 
ablkcipher_request_complete(req, err);
@@ -1148,47 +1124,6 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t 
ptr,
append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
 }
 
-/*
- * Fill in ablkcipher givencrypt job descriptor
- */
-static void init_ablkcipher_g

[PATCH 4/4] crypto: caam/qi - ablkcipher -> skcipher conversion

2018-08-06 Thread Horia Geantă
Convert driver from deprecated ablkcipher API to skcipher.

Link: 
https://www.mail-archive.com/search?l=mid=20170728085622.gc19...@gondor.apana.org.au
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg.c  |  12 +-
 drivers/crypto/caam/caamalg_desc.c |  61 +++---
 drivers/crypto/caam/caamalg_desc.h |  24 +--
 drivers/crypto/caam/caamalg_qi.c   | 373 +
 4 files changed, 217 insertions(+), 253 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c6e3c8ad6d2d..8152c6319c57 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -693,15 +693,15 @@ static int skcipher_setkey(struct crypto_skcipher 
*skcipher, const u8 *key,
 
/* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_ablkcipher_encap(desc, >cdata, ivsize, is_rfc3686,
-ctx1_iv_off);
+   cnstr_shdsc_skcipher_encap(desc, >cdata, ivsize, is_rfc3686,
+  ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
/* skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_ablkcipher_decap(desc, >cdata, ivsize, is_rfc3686,
-ctx1_iv_off);
+   cnstr_shdsc_skcipher_decap(desc, >cdata, ivsize, is_rfc3686,
+  ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
@@ -727,13 +727,13 @@ static int xts_skcipher_setkey(struct crypto_skcipher 
*skcipher, const u8 *key,
 
/* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_xts_ablkcipher_encap(desc, >cdata);
+   cnstr_shdsc_xts_skcipher_encap(desc, >cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
/* xts_skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_xts_ablkcipher_decap(desc, >cdata);
+   cnstr_shdsc_xts_skcipher_decap(desc, >cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index f70b0877274c..3f217093a219 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,7 @@
 /*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
  *
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
  */
 
 #include "compat.h"
@@ -1212,11 +1212,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct 
alginfo *cdata,
 }
 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
 
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
+/* For skcipher encrypt and decrypt, read from req->src and write to req->dst 
*/
+static inline void skcipher_append_src_dst(u32 *desc)
 {
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,7 +1223,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
 }
 
 /**
- * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
  * @desc: pointer to buffer used for descriptor construction
  * @cdata: pointer to block cipher transform definitions
  * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1235,9 +1232,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
  * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
  * @ctx1_iv_off: IV offset in CONTEXT1 register
  */
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+   unsigned int ivsize, const bool is_rfc3686,
+   const u32 ctx1_iv_off)
 {
u32 *key_jump_cmd;
 
@@ -1280,18 +1277,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, 
struct alginfo *cdata,
 OP_ALG_ENCRYPT);
 
/* Perform operation */
-   ablkcipher_append_src_dst(desc);
+   skcipher_append_src_dst(desc);
 
 #ifdef DEBUG
print_hex_dump(KERN_ERR,
-  "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+

[PATCH 3/4] crypto: caam/jr - ablkcipher -> skcipher conversion

2018-08-06 Thread Horia Geantă
Convert driver from deprecated ablkcipher API to skcipher.

Link: 
https://www.mail-archive.com/search?l=mid=20170728085622.gc19...@gondor.apana.org.au
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg.c | 448 +++---
 drivers/crypto/caam/compat.h  |   1 +
 2 files changed, 208 insertions(+), 241 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b23730c07fda..c6e3c8ad6d2d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,7 +2,7 @@
  * caam - Freescale FSL CAAM support for crypto API
  *
  * Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
  *
  * Based on talitos crypto API driver.
  *
@@ -81,8 +81,6 @@
 #define debug(format, arg...)
 #endif
 
-static struct list_head alg_list;
-
 struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -96,6 +94,12 @@ struct caam_aead_alg {
bool registered;
 };
 
+struct caam_skcipher_alg {
+   struct skcipher_alg skcipher;
+   struct caam_alg_entry caam;
+   bool registered;
+};
+
 /*
  * per-session context
  */
@@ -646,20 +650,20 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return rfc4543_set_sh_desc(aead);
 }
 
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+  unsigned int keylen)
 {
-   struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-   struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-   const char *alg_name = crypto_tfm_alg_name(tfm);
+   struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+   struct caam_skcipher_alg *alg =
+   container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+skcipher);
struct device *jrdev = ctx->jrdev;
-   unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+   unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
   OP_ALG_AAI_CTR_MOD128);
-   const bool is_rfc3686 = (ctr_mode &&
-(strstr(alg_name, "rfc3686") != NULL));
+   const bool is_rfc3686 = alg->caam.rfc3686;
 
 #ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -687,14 +691,14 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
 
-   /* ablkcipher_encrypt shared descriptor */
+   /* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_ablkcipher_encap(desc, >cdata, ivsize, is_rfc3686,
 ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
-   /* ablkcipher_decrypt shared descriptor */
+   /* skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, >cdata, ivsize, is_rfc3686,
 ctx1_iv_off);
@@ -704,16 +708,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
return 0;
 }
 
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+  unsigned int keylen)
 {
-   struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+   struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
 
if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
-   crypto_ablkcipher_set_flags(ablkcipher,
-   CRYPTO_TFM_RES_BAD_KEY_LEN);
+   crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -722,13 +725,13 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
 
-   /* xts_ablkcipher_encrypt shared descriptor */
+   /* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, >cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
-   /* xts_ablkcipher

[PATCH 2/4] crypto: caam/qi - remove ablkcipher IV generation

2018-08-06 Thread Horia Geantă
IV generation is done only at AEAD level.
Support in ablkcipher is not needed, thus remove the dead code.

Link: 
https://www.mail-archive.com/search?l=mid=20160901101257.ga3...@gondor.apana.org.a
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg_desc.c |  81 
 drivers/crypto/caam/caamalg_desc.h |   4 -
 drivers/crypto/caam/caamalg_qi.c   | 261 +++--
 drivers/crypto/caam/qi.h   |   1 -
 4 files changed, 20 insertions(+), 327 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index a408edd84f34..f70b0877274c 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1358,87 +1358,6 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, 
struct alginfo *cdata,
 }
 EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
 
-/**
- * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
- *   with HW-generated initialization vector.
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC.
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- */
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
-unsigned int ivsize, const bool is_rfc3686,
-const u32 ctx1_iv_off)
-{
-   u32 *key_jump_cmd, geniv;
-
-   init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-   /* Skip if already shared */
-   key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-  JUMP_COND_SHRD);
-
-   /* Load class1 key only */
-   append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
-   /* Load Nonce into CONTEXT1 reg */
-   if (is_rfc3686) {
-   const u8 *nonce = cdata->key_virt + cdata->keylen;
-
-   append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
-  LDST_CLASS_IND_CCB |
-  LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
-   append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
-   MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
-   (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
-   }
-   set_jump_tgt_here(desc, key_jump_cmd);
-
-   /* Generate IV */
-   geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
-   NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
-   (ivsize << NFIFOENTRY_DLEN_SHIFT);
-   append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
-   LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
-   append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-   append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
-   MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
-   (ctx1_iv_off << MOVE_OFFSET_SHIFT));
-   append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-   /* Copy generated IV to memory */
-   append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
-LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
-   /* Load Counter into CONTEXT1 reg */
-   if (is_rfc3686)
-   append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-LDST_SRCDST_BYTE_CONTEXT |
-((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
-   if (ctx1_iv_off)
-   append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
-   (1 << JUMP_OFFSET_SHIFT));
-
-   /* Load operation */
-   append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
-OP_ALG_ENCRYPT);
-
-   /* Perform operation */
-   ablkcipher_append_src_dst(desc);
-
-#ifdef DEBUG
-   print_hex_dump(KERN_ERR,
-  "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
-  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
-
 /**
  * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
  *descriptor
diff --git a/drivers/crypto/caam/caamalg_desc.h 
b/drivers/crypto/caam/caamalg_desc.h
index a917af5776ce..f726f54a05b8 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -104,1

[PATCH 0/4] crypto: caam - ablkcipher -> skcipher conversion

2018-08-06 Thread Horia Geantă
This patch set converts caam/jr and caam/qi top level drivers
from ablkcipher API to skcipher.

First two patches remove the unused ablkcipher algorithms with
support for IV generation.
The following two patches deal with the conversion.

Note: There is a dependency for the patch set - a fix sent separately:
"crypto: caam/qi - fix error path in xts setkey"
https://patchwork.kernel.org/patch/10557015

Horia Geantă (4):
  crypto: caam/jr - remove ablkcipher IV generation
  crypto: caam/qi - remove ablkcipher IV generation
  crypto: caam/jr - ablkcipher -> skcipher conversion
  crypto: caam/qi - ablkcipher -> skcipher conversion

 drivers/crypto/caam/caamalg.c  | 729 +++--
 drivers/crypto/caam/caamalg_desc.c | 142 ++--
 drivers/crypto/caam/caamalg_desc.h |  28 +-
 drivers/crypto/caam/caamalg_qi.c   | 626 ++-
 drivers/crypto/caam/compat.h   |   1 +
 drivers/crypto/caam/qi.h   |   1 -
 6 files changed, 449 insertions(+), 1078 deletions(-)

-- 
2.16.2



[PATCH] crypto: caam - fix DMA mapping direction for RSA forms 2 & 3

2018-08-06 Thread Horia Geantă
Crypto engine needs some temporary locations in external memory for
running RSA decrypt forms 2 and 3 (CRT).
These are named "tmp1" and "tmp2" in the PDB.

Update DMA mapping direction of tmp1 and tmp2 from TO_DEVICE to
BIDIRECTIONAL, since engine needs r/w access.

Cc:  # 4.13+
Fixes: 52e26d77b8b3 ("crypto: caam - add support for RSA key form 2")
Fixes: 4a651b122adb ("crypto: caam - add support for RSA key form 3")
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caampkc.c | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 578ea63a3109..f26d62e5533a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct 
rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
-   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
-   dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+   dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
 }
 
 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct 
rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
-   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
-   dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+   dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
 }
 
 /* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request 
*req,
goto unmap_p;
}
 
-   pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+   pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q;
}
 
-   pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+   pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0;
 
 unmap_tmp1:
-   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
 unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
 unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request 
*req,
goto unmap_dq;
}
 
-   pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+   pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv;
}
 
-   pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+   pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0;
 
 unmap_tmp1:
-   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+   dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
 unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
 unmap_dq:
-- 
2.16.2



[PATCH] crypto: caam/qi - fix error path in xts setkey

2018-08-06 Thread Horia Geantă
xts setkey callback returns 0 on some error paths.
Fix this by returning -EINVAL.

Cc:  # 4.12+
Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/caamalg_qi.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 6e61cc93c2b0..d7aa7d7ff102 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
int ret = 0;
 
if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
-   crypto_ablkcipher_set_flags(ablkcipher,
-   CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
-   return -EINVAL;
+   goto badkey;
}
 
ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
return ret;
 badkey:
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-   return 0;
+   return -EINVAL;
 }
 
 /*
-- 
2.16.2



[PATCH] crypto: caam/jr - fix descriptor DMA unmapping

2018-08-06 Thread Horia Geantă
Descriptor address needs to be swapped to CPU endianness before being
DMA unmapped.

Cc:  # 4.8+
Fixes: 261ea058f016 ("crypto: caam - handle core endianness != caam endianness")
Reported-by: Laurentiu Tudor 
Signed-off-by: Horia Geantă 
---
 drivers/crypto/caam/jr.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f4f258075b89..acdd72016ffe 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
 
/* Unmap just-run descriptor so we can post-process */
-   dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+   dma_unmap_single(dev,
+caam_dma_to_cpu(jrp->outring[hw_idx].desc),
 jrp->entinfo[sw_idx].desc_size,
 DMA_TO_DEVICE);
 
-- 
2.16.2



[PATCH] crypto: tcrypt - reschedule during speed tests

2018-07-23 Thread Horia Geantă
Avoid RCU stalls in the case of non-preemptible kernel and lengthy
speed tests by rescheduling when advancing from one block size
to another.

Signed-off-by: Horia Geantă 
---
 crypto/tcrypt.c | 36 
 1 file changed, 24 insertions(+), 12 deletions(-)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 078ec36007bf..bdde95e8d369 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, 
int secs,
 
}
 
-   if (secs)
+   if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
   secs, num_mb);
-   else
+   cond_resched();
+   } else {
ret = test_mb_aead_cycles(data, enc, *b_size,
  num_mb);
+   }
 
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, 
unsigned int secs,
   *b_size + (enc ? 0 : authsize),
   iv);
 
-   if (secs)
+   if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
-   else
+   cond_resched();
+   } else {
ret = test_aead_cycles(req, enc, *b_size);
+   }
 
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, 
unsigned int secs,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
 
-   if (secs)
+   if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
-   else
+   cond_resched();
+   } else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+   }
 
 
if (ret) {
@@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, 
unsigned int secs,
 
ahash_request_set_crypt(req, sg, output, speed[i].plen);
 
-   if (secs)
+   if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
 speed[i].plen, output, secs);
-   else
+   cond_resched();
+   } else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
+   }
 
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
@@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, 
int enc, int secs,
   iv);
}
 
-   if (secs)
+   if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
  *b_size, secs,
  num_mb);
-   else
+   cond_resched();
+   } else {
ret = test_mb_acipher_cycles(data, enc,
 *b_size, num_mb);
+   }
 
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int 
enc, unsigned int secs,
 
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
 
-   if (secs)
+   if (secs) {
ret = test_acipher_jiffies(req, enc,
   *b_size, secs);
-   else
+   cond_resched();
+   } else {
ret = test_acipher_cycles(req, enc,
  *b_size);
+   }
 
if (ret) {
pr_err("%s() failed flags=%x\n", e,
-- 
2.16.2



[PATCH 3/3] crypto: caam/qi - fix warning in init_cgr()

2018-05-23 Thread Horia Geantă
Coverity warns about an
"Unintentional integer overflow (OVERFLOW_BEFORE_WIDEN)"
when computing the congestion threshold value.

Even though it is highly unlikely for an overflow to happen,
use this as an opportunity to simplify the code.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/qi.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b9480828da38..67f7f8c42c93 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -657,9 +657,8 @@ static int init_cgr(struct device *qidev)
 {
int ret;
struct qm_mcc_initcgr opts;
-   const u64 cpus = *(u64 *)qman_affine_cpus();
-   const int num_cpus = hweight64(cpus);
-   const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
+   const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
+   MAX_RSP_FQ_BACKLOG_PER_CPU;
 
ret = qman_alloc_cgrid();
if (ret) {
-- 
2.16.2



[PATCH 1/3] crypto: caam - fix MC firmware detection

2018-05-23 Thread Horia Geantă
Management Complex (MC) f/w detection is based on CTPR_MS[DPAA2] bit.

This is incorrect since:
-the bit is set for all CAAM blocks integrated in SoCs with a certain
Layerscape Chassis
-some SoCs with LS Chassis don't have an MC block (thus no MC f/w)

To fix this, MC f/w detection will be based on the presence of
"fsl,qoriq-mc" compatible string in the device tree.

Fixes: 297b9cebd2fc0 ("crypto: caam/jr - add support for DPAA2 parts")
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/ctrl.c   | 21 -
 drivers/crypto/caam/intern.h |  1 +
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index a28868d5b2d0..538c01f428c1 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -322,9 +322,9 @@ static int caam_remove(struct platform_device *pdev)
 
/*
 * De-initialize RNG state handles initialized by this driver.
-* In case of DPAA 2.x, RNG is managed by MC firmware.
+* In case of SoCs with Management Complex, RNG is managed by MC f/w.
 */
-   if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
+   if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
 
/* Shut down debug views */
@@ -618,11 +618,15 @@ static int caam_probe(struct platform_device *pdev)
/*
 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
 * long pointers in master configuration register.
-* In case of DPAA 2.x, Management Complex firmware performs
+* In case of SoCs with Management Complex, MC f/w performs
 * the configuration.
 */
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
-   if (!caam_dpaa2)
+   np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+   ctrlpriv->mc_en = !!np;
+   of_node_put(np);
+
+   if (!ctrlpriv->mc_en)
clrsetbits_32(>mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
  MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
  MCFGR_WDENABLE | MCFGR_LARGE_BURST |
@@ -733,9 +737,9 @@ static int caam_probe(struct platform_device *pdev)
/*
 * If SEC has RNG version >= 4 and RNG state handle has not been
 * already instantiated, do RNG instantiation
-* In case of DPAA 2.x, RNG is managed by MC firmware.
+* In case of SoCs with Management Complex, RNG is managed by MC f/w.
 */
-   if (!caam_dpaa2 &&
+   if (!ctrlpriv->mc_en &&
(cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(>r4tst[0].rdsta);
@@ -804,9 +808,8 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
 ctrlpriv->era);
-   dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
-ctrlpriv->total_jobrs, ctrlpriv->qi_present,
-caam_dpaa2 ? "yes" : "no");
+   dev_info(dev, "job rings = %d, qi = %d\n",
+ctrlpriv->total_jobrs, ctrlpriv->qi_present);
 
 #ifdef CONFIG_DEBUG_FS
debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 7696a774a362..babc78abd155 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -82,6 +82,7 @@ struct caam_drv_private {
 */
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present;  /* Nonzero if QI present in device */
+   u8 mc_en;   /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en;/* Virtualization enabled in CAAM */
int era;/* CAAM Era (internal HW revision) */
-- 
2.16.2



[PATCH 2/3] crypto: caam - fix rfc4543 descriptors

2018-05-23 Thread Horia Geantă
In some cases the CCB DMA-based internal transfer started by the MOVE
command (src=M3 register, dst=descriptor buffer) does not finish
in time and DECO executes the unpatched descriptor.
This leads eventually to a DECO Watchdog Timer timeout error.

To make sure the transfer ends, change the MOVE command to be blocking.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg_desc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index 8ae7a1be7dfd..a408edd84f34 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1093,7 +1093,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct 
alginfo *cdata,
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
(0x6 << MOVE_LEN_SHIFT));
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
-(0x8 << MOVE_LEN_SHIFT));
+(0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
 
/* Will read assoclen + cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1178,7 +1178,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct 
alginfo *cdata,
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
(0x6 << MOVE_LEN_SHIFT));
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
-(0x8 << MOVE_LEN_SHIFT));
+(0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
 
/* Will read assoclen + cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-- 
2.16.2



[PATCH 4.9.y, 4.14.y] crypto: talitos - fix IPsec cipher in length

2018-05-03 Thread Horia Geantă
commit 2b1227301a8e4729409694e323b72c064c47cb6b upstream.

For SEC 2.x+, cipher in length must contain only the ciphertext length.
In case of using hardware ICV checking, the ICV length is provided via
the "extent" field of the descriptor pointer.

Cc: <sta...@vger.kernel.org> # 4.8+
Fixes: 549bd8bc5987 ("crypto: talitos - Implement AEAD for SEC1 using 
HMAC_SNOOP_NO_AFEU")
Reported-by: Horia Geantă <horia.gea...@nxp.com>
Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
Tested-by: Horia Geantă <horia.gea...@nxp.com>
Signed-off-by: Herbert Xu <herb...@gondor.apana.org.au>
[backported to 4.9.y, 4.14.y]
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
The upstream fix has already been applied "as is" on 4.15.y.
Submitting this backport since it failed to apply on 4.9.y and 4.14.y.

 drivers/crypto/talitos.c | 41 +
 1 file changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 42c060c7ae15..7c71722be395 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1116,10 +1116,10 @@ static int sg_to_link_tbl_offset(struct scatterlist 
*sg, int sg_count,
return count;
 }
 
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
-  unsigned int len, struct talitos_edesc *edesc,
-  struct talitos_ptr *ptr,
-  int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen)
 {
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1130,7 +1130,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist 
*src,
}
 
to_talitos_ptr_len(ptr, len, is_sec1);
-   to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+   to_talitos_ptr_ext_set(ptr, elen, is_sec1);
 
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
@@ -1140,7 +1140,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist 
*src,
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
return sg_count;
}
-   sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+   sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
 >link_tbl[tbl_off]);
if (sg_count == 1) {
/* Only one segment now, so no link tbl needed*/
@@ -1154,6 +1154,15 @@ int talitos_sg_map(struct device *dev, struct 
scatterlist *src,
return sg_count;
 }
 
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off)
+{
+   return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+ tbl_off, 0);
+}
+
 /*
  * fill in and submit ipsec_esp descriptor
  */
@@ -1171,7 +1180,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct 
aead_request *areq,
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
-   int sg_link_tbl_len;
+   int elen = 0;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1225,20 +1234,12 @@ static int ipsec_esp(struct talitos_edesc *edesc, 
struct aead_request *areq,
 * extent is bytes of HMAC postpended to ciphertext,
 * typically 12 for ipsec
 */
-   to_talitos_ptr_len(>ptr[4], cryptlen, is_sec1);
-   to_talitos_ptr_ext_set(>ptr[4], 0, is_sec1);
-
-   sg_link_tbl_len = cryptlen;
-
-   if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
-   to_talitos_ptr_ext_set(>ptr[4], authsize, is_sec1);
-
-   if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
-   sg_link_tbl_len += authsize;
-   }
+   if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+   (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+   elen = authsize;
 
-   ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
->ptr[4], sg_count, areq->assoclen, tbl_off);
+   ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, >ptr[4],
+sg_count, areq->assoclen, tbl_off, elen);
 
if (ret > 1) {
tbl_off += ret;
-- 
2.16.2



[PATCH] crypto: caam - fix size of RSA prime factor q

2018-04-27 Thread Horia Geantă
Fix a typo where size of RSA prime factor q is using the size of
prime factor p.

Cc: <sta...@vger.kernel.org> # 4.13+
Fixes: 52e26d77b8b3 ("crypto: caam - add support for RSA key form 2")
Fixes: 4a651b122adb ("crypto: caam - add support for RSA key form 3")
Reported-by: David Binderman <dcb...@hotmail.com>
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caampkc.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 6f990139f324..578ea63a3109 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct 
rsa_edesc *edesc,
struct caam_rsa_key *key = >key;
struct rsa_priv_f2_pdb *pdb = >pdb.priv_f2;
size_t p_sz = key->p_sz;
-   size_t q_sz = key->p_sz;
+   size_t q_sz = key->q_sz;
 
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct 
rsa_edesc *edesc,
struct caam_rsa_key *key = >key;
struct rsa_priv_f3_pdb *pdb = >pdb.priv_f3;
size_t p_sz = key->p_sz;
-   size_t q_sz = key->p_sz;
+   size_t q_sz = key->q_sz;
 
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
@@ -397,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
struct rsa_priv_f2_pdb *pdb = >pdb.priv_f2;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
-   size_t q_sz = key->p_sz;
+   size_t q_sz = key->q_sz;
 
pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->d_dma)) {
@@ -472,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
struct rsa_priv_f3_pdb *pdb = >pdb.priv_f3;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
-   size_t q_sz = key->p_sz;
+   size_t q_sz = key->q_sz;
 
pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->p_dma)) {
-- 
2.16.2



Re: [PATCH v2] crypto: caam: - Use kmemdup() function

2018-04-16 Thread Horia Geantă
On 4/16/2018 7:07 PM, Fabio Estevam wrote:
> From: Fabio Estevam <fabio.este...@nxp.com>
> 
> Use kmemdup() rather than duplicating its implementation.
> 
> By usign kmemdup() we can also get rid of the 'val' variable.
> 
> Detected with Coccinelle script.
> 
> Signed-off-by: Fabio Estevam <fabio.este...@nxp.com>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Thanks,
Horia


Re: [PATCH] crypto: caam: - Use kmemdup() function

2018-04-16 Thread Horia Geantă
On 4/16/2018 5:08 PM, Fabio Estevam wrote:
> From: Fabio Estevam 
> 
> Use kmemdup() rather than duplicating its implementation.
> 
> Detected with Coccinelle script.
> 
> Signed-off-by: Fabio Estevam 
> ---
>  drivers/crypto/caam/caampkc.c | 4 +---
>  1 file changed, 1 insertion(+), 3 deletions(-)
> 
> diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
> index 979072b..c3518ce 100644
> --- a/drivers/crypto/caam/caampkc.c
> +++ b/drivers/crypto/caam/caampkc.c
> @@ -789,12 +789,10 @@ static inline u8 *caam_read_raw_data(const u8 *buf, 
> size_t *nbytes)
>   if (!*nbytes)
>   return NULL;
>  
> - val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
> + val = kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
>   if (!val)
>   return NULL;
>  
> - memcpy(val, buf, *nbytes);
> -
>   return val;
>  }
Could be further simplified:
return kmemdup(...);

Horia


Re: [PATCH v3] crypto: caam: Drop leading zero from input buffer

2018-04-16 Thread Horia Geantă
On 4/15/2018 6:51 PM, Martin Townsend wrote:
> On Sun, Apr 15, 2018 at 3:12 PM, Fabio Estevam  wrote:
>> From: Fabio Estevam 
>>
>> imx6ul and imx7 report the following error:
>>
>> caam_jr 2142000.jr1: 4789: DECO: desc idx 7:
>> Protocol Size Error - A protocol has seen an error in size. When
>> running RSA, pdb size N < (size of F) when no formatting is used; or
>> pdb size N < (F + 11) when formatting is used.
>>
>> [ cut here ]
>> WARNING: CPU: 0 PID: 1 at crypto/asymmetric_keys/public_key.c:148
>> public_key_verify_signature+0x27c/0x2b0
>>
>> This error happens because the signature contains 257 bytes, including
>> a leading zero as the first element.
>>
>> Fix the problem by stripping off the leading zero from input data
>> before feeding it to the CAAM accelerator.
>>
>> Fixes: 8c419778ab57e497b5 ("crypto: caam - add support for RSA algorithm")
>> Cc: 
>> Reported-by: Martin Townsend 
>> Signed-off-by: Fabio Estevam 
>> ---
>> Changes since v2:
>> - Check if the lenght is zero after calling caam_rsa_drop_leading_zeros()
>>
>>  drivers/crypto/caam/caampkc.c | 45 
>> +++
>>  1 file changed, 37 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
>> index 7a897209..47467ff 100644
>> --- a/drivers/crypto/caam/caampkc.c
>> +++ b/drivers/crypto/caam/caampkc.c
>> @@ -166,6 +166,14 @@ static void rsa_priv_f3_done(struct device *dev, u32 
>> *desc, u32 err,
>> akcipher_request_complete(req, err);
>>  }
>>
>> +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
>> +{
>> +   while (!**ptr && *nbytes) {
>> +   (*ptr)++;
>> +   (*nbytes)--;
>> +   }
>> +}
>> +
>>  static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
>>  size_t desclen)
>>  {
>> @@ -178,7 +186,36 @@ static struct rsa_edesc *rsa_edesc_alloc(struct 
>> akcipher_request *req,
>> int sgc;
>> int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
>> int src_nents, dst_nents;
>> +   const u8 *temp;
>> +   void *buffer;
>> +   size_t len;
>> +
>> +   buffer = kzalloc(req->src_len, GFP_ATOMIC);
>> +   if (!buffer)
>> +   return ERR_PTR(-ENOMEM);
>> +
>> +   sg_copy_to_buffer(req->src, sg_nents(req->src),
>> + buffer, req->src_len);
>> +   temp = (u8 *)buffer;
>> +   len = req->src_len;
>>
>> +   /*
>> +* Check if the buffer contains leading zeros and if
>> +* it does, drop the leading zeros
>> +*/
>> +   if (temp[0] == 0) {
>> +   caam_rsa_drop_leading_zeros(, );
>> +   if (!len) {
>> +   kfree(buffer);
>> +   return ERR_PTR(-ENOMEM);
>> +   }
>> +
>> +   req->src_len = len;
>> +   sg_copy_from_buffer(req->src, sg_nents(req->src),
>> +   (void *)temp, req->src_len);
>> +   }
>> +
>> +   kfree(buffer);
>> src_nents = sg_nents_for_len(req->src, req->src_len);
>> dst_nents = sg_nents_for_len(req->dst, req->dst_len);
>>
>> @@ -683,14 +720,6 @@ static void caam_rsa_free_key(struct caam_rsa_key *key)
>> memset(key, 0, sizeof(*key));
>>  }
>>
>> -static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
>> -{
>> -   while (!**ptr && *nbytes) {
>> -   (*ptr)++;
>> -   (*nbytes)--;
>> -   }
>> -}
>> -
>>  /**
>>   * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
>>   * dP, dQ and qInv could decode to less than corresponding p, q length, as 
>> the
>> --
>> 2.7.4
>>
> 
> Hi Fabio,
> 
> just tried it and it works fine on my i.MX6UL board running linux-imx
> 4.9 (had to manually apply the patch as it doesn't have
> caam_rsa_drop_leading_zeros) so a
> Reviewed and Tested-By Martin Townsend
> 
I've sent a fix that does not copy data back and forth, instead it counts the
leading zeros and ffwds the S/G.
Please check it works in your case.

Thanks,
Horia


[PATCH RESEND] crypto: caam - strip input zeros from RSA input buffer

2018-04-16 Thread Horia Geantă
Sometimes the provided RSA input buffer provided is not stripped
of leading zeros. This could cause its size to be bigger than that
of the modulus, making the HW complain:

caam_jr 2142000.jr1: 4789: DECO: desc idx 7:
Protocol Size Error - A protocol has seen an error in size. When
running RSA, pdb size N < (size of F) when no formatting is used; or
pdb size N < (F + 11) when formatting is used.

Fix the problem by stripping off the leading zero from input data
before feeding it to the CAAM accelerator.

Fixes: 8c419778ab57e ("crypto: caam - add support for RSA algorithm")
Cc: <sta...@vger.kernel.org> # 4.8+
Reported-by: Martin Townsend <mtownsend1...@gmail.com>
Link: 
https://lkml.kernel.org/r/cabatt_ytyoryktapcb4izhnanekkgfi9xaqmjhi_n-8ywoc...@mail.gmail.com
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
(Hopefully this one will reach the mailing list.
Sorry for the noise, problems with SMTP.)

 drivers/crypto/caam/caampkc.c | 54 +++
 drivers/crypto/caam/caampkc.h |  8 +++
 2 files changed, 62 insertions(+)

diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 7a897209f181..979072b25eaa 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 
*desc, u32 err,
akcipher_request_complete(req, err);
 }
 
+static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+   unsigned int nbytes,
+   unsigned int flags)
+{
+   struct sg_mapping_iter miter;
+   int lzeros, ents;
+   unsigned int len;
+   unsigned int tbytes = nbytes;
+   const u8 *buff;
+
+   ents = sg_nents_for_len(sgl, nbytes);
+   if (ents < 0)
+   return ents;
+
+   sg_miter_start(, sgl, ents, SG_MITER_FROM_SG | flags);
+
+   lzeros = 0;
+   len = 0;
+   while (nbytes > 0) {
+   while (len && !*buff) {
+   lzeros++;
+   len--;
+   buff++;
+   }
+
+   if (len && *buff)
+   break;
+
+   sg_miter_next();
+   buff = miter.addr;
+   len = miter.length;
+
+   nbytes -= lzeros;
+   lzeros = 0;
+   }
+
+   miter.consumed = lzeros;
+   sg_miter_stop();
+   nbytes -= lzeros;
+
+   return tbytes - nbytes;
+}
+
 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
 size_t desclen)
 {
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
+   struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
   GFP_KERNEL : GFP_ATOMIC;
+   int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+   int lzeros;
+
+   lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
+   if (lzeros < 0)
+   return ERR_PTR(lzeros);
+
+   req->src_len -= lzeros;
+   req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
 
src_nents = sg_nents_for_len(req->src, req->src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
@@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
.max_size = caam_rsa_max_size,
.init = caam_rsa_init_tfm,
.exit = caam_rsa_exit_tfm,
+   .reqsize = sizeof(struct caam_rsa_req_ctx),
.base = {
.cra_name = "rsa",
.cra_driver_name = "rsa-caam",
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index fd145c46eae1..82645bcf8b27 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -95,6 +95,14 @@ struct caam_rsa_ctx {
struct device *dev;
 };
 
+/**
+ * caam_rsa_req_ctx - per request context.
+ * @src: input scatterlist (stripped of leading zeros)
+ */
+struct caam_rsa_req_ctx {
+   struct scatterlist src[2];
+};
+
 /**
  * rsa_edesc - s/w-extended rsa descriptor
  * @src_nents : number of segments in input scatterlist
-- 
2.16.2



Re: CAAM and IMA/EVM : caam_rsa_enc: DECO: desc idx 7: Protocol Size Error

2018-04-13 Thread Horia Geantă
On 4/13/2018 3:12 AM, Fabio Estevam wrote:
> Hi Horia,
> 
> On Thu, Apr 12, 2018 at 4:12 AM, Horia Geantă <horia.gea...@nxp.com> wrote:
> 
>> Yes, driver needs to strip off leading zeros from input data before feeding 
>> it
>> to the accelerator.
>> I am working at a fix.
> 
> I was able to to strip off the leading zeros from input data as you suggested.
> 
> My changes are like this at the moment:
> 
[snip]
> but still get the original error as shown below.
> 
> Any ideas?
> 
Stripping should happen before set_rsa_pub_pdb() is called since the Protocol
Data Block contains the input length that is used by the accelerator:
pdb->f_len = req->src_len;

It should probably be moved at the top of rsa_edesc_alloc().

Ideally stripping would avoid copying data (and memory allocation for temporary
buffers).

Horia


Re: CAAM and IMA/EVM : caam_rsa_enc: DECO: desc idx 7: Protocol Size Error

2018-04-12 Thread Horia Geantă
On 4/11/2018 8:26 PM, Fabio Estevam wrote:
> Hi Horia,
> 
> On Wed, Apr 11, 2018 at 7:15 AM, Horia Geantă <horia.gea...@nxp.com> wrote:
> 
>> You'd want to make sure rsa is offloaded to caam in this case - check in
>> /proc/crypto.
>> IIRC there are some i.mx parts that don't have support for Public Key
>> acceleration (PKHA).
> 
> PKHA is present on mx6ul and not present on mx6q.
> 
> mx6uq uses the generic rsa driver and handles the certificate correctly.
> 
> mx6ul uses pkcs1pad(rsa-caam,sha256) and it fails to handle the certificate.
> 
> So that explains the different behavior of mx6q versus mx6ul.
> 
Thanks for confirming my guess.

> Any ideas as to how to fix rsa-caam?
> 
Yes, driver needs to strip off leading zeros from input data before feeding it
to the accelerator.
I am working at a fix.

Horia


Re: [PATCH v4 2/2] crypto: caam - allow retrieving 'era' from register

2018-04-11 Thread Horia Geantă
On 4/11/2018 3:45 PM, Fabio Estevam wrote:
> From: Fabio Estevam <fabio.este...@nxp.com>
> 
> The 'era' information can be retrieved from CAAM registers, so
> introduce a caam_get_era_from_hw() function that gets it via register
> reads in case the 'fsl,sec-era' property is not passed in the device
> tree.
> 
> This function is based on the U-Boot implementation from
> drivers/crypto/fsl/sec.c
> 
> Signed-off-by: Fabio Estevam <fabio.este...@nxp.com>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>
for the series.

Thanks,
Horia


Re: [PATCH v3 2/2] crypto: caam - allow retrieving 'era' from register

2018-04-11 Thread Horia Geantă
On 4/11/2018 2:59 PM, Fabio Estevam wrote:
> From: Fabio Estevam 
> 
> The 'era' information can be retrieved from CAAM registers, so
> introduce a caam_get_era_from_hw() function that gets it via register
> reads in case the 'fsl,sec-era' property is not passed in the device
> tree.
> 
> This function is based on the U-Boot implementation from
> drivers/crypto/fsl/sec.c
> 
> Signed-off-by: Fabio Estevam 
> ---
> Changes since v2:
> - Use a local struct for the CAAM era ID
> - Only read caam_id_ms only if ccb_id register does not provide era
> - Improve comment on caam_get_era()
> - Return -ENOTSUPP to keep old behavior
> - Put the defines before the relevant fields
> 
>  drivers/crypto/caam/ctrl.c | 56 
> ++
>  drivers/crypto/caam/regs.h |  6 +
>  2 files changed, 58 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
> index bee690a..ab67e97 100644
> --- a/drivers/crypto/caam/ctrl.c
> +++ b/drivers/crypto/caam/ctrl.c
> @@ -396,11 +396,56 @@ static void kick_trng(struct platform_device *pdev, int 
> ent_delay)
>   clrsetbits_32(>rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
>  }
>  
> +static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
> +{
> + static const struct {
> + u16 ip_id;
> + u8 maj_rev;
> + u8 era;
> + } id[] = {
> + {0x0A10, 1, 1},
> + {0x0A10, 2, 2},
> + {0x0A12, 1, 3},
> + {0x0A14, 1, 3},
> + {0x0A14, 2, 4},
> + {0x0A16, 1, 4},
> + {0x0A10, 3, 4},
> + {0x0A11, 1, 4},
> + {0x0A18, 1, 4},
> + {0x0A11, 2, 5},
> + {0x0A12, 2, 5},
> + {0x0A13, 1, 5},
> + {0x0A1C, 1, 5}
> + };
> + u32 ccbvid, id_ms;
> + u8 maj_rev,era;
Nitpick: ^^^ space needed

Please run checkpatch before submitting.

Thanks,
Horia


Re: CAAM and IMA/EVM : caam_rsa_enc: DECO: desc idx 7: Protocol Size Error

2018-04-11 Thread Horia Geantă
On 4/11/2018 1:36 AM, James Bottomley wrote:
> On Tue, 2018-04-10 at 23:01 +0100, Martin Townsend wrote:
>> Using openssl to get the signature in my x509 cert
>>
>>    Signature Algorithm: sha256WithRSAEncryption
>>  68:82:cc:5d:f9:ee:fb:1a:77:72:a6:a9:c6:4c:cc:d7:f6:2a:
>>  17:a5:db:bf:5a:2b:8d:39:60:dc:a0:93:39:45:0f:bc:a7:e8:
>>  7f:6c:06:84:2d:f3:c1:94:0a:60:56:1c:50:78:dc:34:d1:87:
>>
>> So there's an extra 0x00 and the signature is 257 bytes so I guess
>> this is upsetting CAAM, just need to work out where it's coming from,
>> or whether it's valid and CAAM should be handling it.
> 
> A signature is just a bignum so leading zeros are permitted because
> it's the same numeric value; however, there are normalization
> procedures that require stripping the leading zeros, say before doing a
> hash or other operation which would be affected by them.
> 
> CAAM should definitely handle it on the "be liberal in what you accept"
>  principle.  The kernel should probably remove the leading zeros on the
> "be conservative in what you do" part of the same principle. 
> 
Looking at the generic SW implementation (crypto/rsa.c, rsa_verify()), leading
zeros are removed:
s = mpi_read_raw_from_sgl(req->src, req->src_len);

CAAM implementation of rsa is not doing this (though it is removing leading
zeros when reading public, private keys).
It has to be fixed. Thanks for the report.

>>   I notice that in my stack trace I have pkcs1pad_verify which
>> suggests some sort of padding?
> 
> Yes, RSA has various forms of padding because the information being
> encrypted is usually much smaller than the encryption unit; PKCS1 is
> the most common (although its now deprecated in favour of OAEP because
> of all the padding oracle problems).
> 
RSA padding has been intentionally added as a template, wrapping "textbook"
(raw) RSA primitives.
For PKCS#1 v1.5, a template instantiation is called pkcs1pad(rsa, hash_alg).

Currently in kernel the only supported RSA padding scheme is PKCS#1 v1.5.
When implemented, another scheme - for e.g. OAEP - would be added in a similar
way, as a template: oaep(rsa, ...).

Horia


Re: CAAM and IMA/EVM : caam_rsa_enc: DECO: desc idx 7: Protocol Size Error

2018-04-11 Thread Horia Geantă
On 4/11/2018 5:21 AM, Fabio Estevam wrote:
> Hi Martin,
> 
> On Tue, Apr 10, 2018 at 7:01 PM, Martin Townsend
>  wrote:
> 
>> A hexdump of the signature reveals a 0x00 at the start
> 
> Yes, same is happening here on my mx6ul evk running linux-next:
> 
[snip]
> 
> However, the same kernel running on a mx6 board does not give the
> "Protocol Size Error":
> 
You'd want to make sure rsa is offloaded to caam in this case - check in
/proc/crypto.
IIRC there are some i.mx parts that don't have support for Public Key
acceleration (PKHA).

Horia


Re: [PATCH v2 2/2] crypto: caam - allow retrieving 'era' from register

2018-04-11 Thread Horia Geantă
On 4/11/2018 4:54 AM, Fabio Estevam wrote:
> From: Fabio Estevam 
> 
> The 'era' information can be retrieved from CAAM registers, so
> introduce a caam_get_era_from_hw() function that gets it via register
> reads in case the 'fsl,sec-era' property is not passed in the device
> tree.
> 
Indeed, "fsl,sec-era" property is marked as optional in DT bindings doc.
This means the previous commit
883619a931e9 ("crypto: caam - fix ERA retrieval function")
should have kept the detection based on registers as a fallback.

Have you actually hit a case where the property was missing from DT?

> This function is based on the U-Boot implementation from
> drivers/crypto/fsl/sec.c
> 
> Signed-off-by: Fabio Estevam 
> ---
> Changes since v1:
> - None. I previously asked to put the linux-crypto list on Cc
> 
>  drivers/crypto/caam/ctrl.c | 45 ++---
>  drivers/crypto/caam/regs.h |  6 ++
>  2 files changed, 48 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
> index bee690a..3f10791 100644
> --- a/drivers/crypto/caam/ctrl.c
> +++ b/drivers/crypto/caam/ctrl.c
> @@ -396,11 +396,47 @@ static void kick_trng(struct platform_device *pdev, int 
> ent_delay)
>   clrsetbits_32(>rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
>  }
>  
> +static u8 caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
> +{
> + const struct sec_vid id[] = {
Make the struct static.

sec_vid should not be used since:
-it defines the layout of SECVID_MS register (ip_id, maj_rev, min_rev)
-while the array below contains (ip_id, maj_rev, era) tuples
You could instead use an anonymous struct, just like in kernel commit
82c2f9607b8a4 or as in u-boot.

> + {0x0A10, 1, 1},
> + {0x0A10, 2, 2},
> + {0x0A12, 1, 3},
> + {0x0A14, 1, 3},
> + {0x0A14, 2, 4},
> + {0x0A16, 1, 4},
> + {0x0A10, 3, 4},
> + {0x0A11, 1, 4},
> + {0x0A18, 1, 4},
> + {0x0A11, 2, 5},
> + {0x0A12, 2, 5},
> + {0x0A13, 1, 5},
> + {0x0A1C, 1, 5},
> + };
> + int i;
> +
> + u32 secvid_ms = rd_reg32(>perfmon.caam_id_ms);
Reading caam_id_ms should be done only if ccbvid does not provide the era, i.e.
should be moved just before the for loop.

> + u32 ccbvid = rd_reg32(>perfmon.ccb_id);
> + u16 ip_id = (secvid_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
> + u8 maj_rev = (secvid_ms & SECVID_MS_MAJ_REV_MASK) >>
> +   SECVID_MS_MAJ_REV_SHIFT;
> + u8 era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
> +
> + if (era)/* This is '0' prior to CAAM ERA-6 */
> + return era;
> +
> + for (i = 0; i < ARRAY_SIZE(id); i++)
> + if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
> + return id[i].min_rev;
> +
> + return 0;
Should return -ENOTSUPP, to keep the semantics of caam_get_era().

> +}
> +
>  /**
>   * caam_get_era() - Return the ERA of the SEC on SoC, based
>   * on "sec-era" propery in the DTS. This property is updated by u-boot.
While here:
_optional_ ^^^ s/propery/property
Should also mention that ERA detection fallback relies on SEC registers (CCBVID
or SECVID).

>   **/
> -static int caam_get_era(void)
> +static int caam_get_era(struct caam_ctrl __iomem *ctrl)
>  {
>   struct device_node *caam_node;
>   int ret;
> @@ -410,7 +446,10 @@ static int caam_get_era(void)
>   ret = of_property_read_u32(caam_node, "fsl,sec-era", );
>   of_node_put(caam_node);
>  
> - return ret ? -ENOTSUPP : prop;
> + if (!ret)
> + return prop;
> + else
> + return caam_get_era_from_hw(ctrl);
>  }
>  
>  static const struct of_device_id caam_match[] = {
> @@ -622,7 +661,7 @@ static int caam_probe(struct platform_device *pdev)
>   goto iounmap_ctrl;
>   }
>  
> - ctrlpriv->era = caam_get_era();
> + ctrlpriv->era = caam_get_era(ctrl);
>  
>   ret = of_platform_populate(nprop, caam_match, NULL, dev);
>   if (ret) {
> diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
> index fee3638..6f96d7b 100644
> --- a/drivers/crypto/caam/regs.h
> +++ b/drivers/crypto/caam/regs.h
> @@ -311,6 +311,12 @@ struct caam_perfmon {
>   u64 rsvd3;
>  
>   /* Component Instantiation Parameters   fe0-fff */
> +#define SECVID_MS_IPID_MASK  0x
> +#define SECVID_MS_IPID_SHIFT 16
> +#define SECVID_MS_MAJ_REV_MASK   0xff00
> +#define SECVID_MS_MAJ_REV_SHIFT  8
> +#define CCBVID_ERA_MASK  0xff00
> +#define CCBVID_ERA_SHIFT 24
Please add the defines in front of each register:
-SECVID_* before caam_id_ms
-CCBVID_* before ccb_id

>   u32 rtic_id;/* RVID - RTIC Version ID   */
>   u32 ccb_id; /* CCBVID - CCB Version ID  */
>   u32 

Re: [PATCH v2 1/2] crypto: caam - staticize caam_get_era()

2018-04-11 Thread Horia Geantă
On 4/11/2018 4:54 AM, Fabio Estevam wrote:
> From: Fabio Estevam <fabio.este...@nxp.com>
> 
> caam_get_era() is only used locally, so do not export this function
> and make it static instead.
> 
> Signed-off-by: Fabio Estevam <fabio.este...@nxp.com>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Thanks,
Horia



[PATCH 3/3] crypto: caam/qi - fix IV DMA mapping and updating

2018-03-28 Thread Horia Geantă
There are two IV-related issues:
(1) crypto API does not guarantee to provide an IV buffer that is DMAable,
thus it's incorrect to DMA map it
(2) for in-place decryption, since ciphertext is overwritten with
plaintext, updated IV (req->info) will contain the last block of plaintext
(instead of the last block of ciphertext)

While these two issues could be fixed separately, it's straightforward
to fix both in the same time - by using the {ablkcipher,aead}_edesc
extended descriptor to store the IV that will be fed to the crypto engine;
this allows for fixing (2) by saving req->src[last_block] in req->info
directly, i.e. without allocating yet another temporary buffer.

A side effect of the fix is that it's no longer possible to have the IV
contiguous with req->src or req->dst.
Code checking for this case is removed.

Cc: <sta...@vger.kernel.org> # 4.14+
Fixes: a68a19380522 ("crypto: caam/qi - properly set IV after {en,de}crypt")
Link: http://lkml.kernel.org/r/20170113084620.gf22...@gondor.apana.org.au
Reported-by: Gilad Ben-Yossef <gi...@benyossef.com>
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 227 ---
 1 file changed, 116 insertions(+), 111 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c2b5762d56a0..a6b76b3c8abe 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -726,7 +726,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
  * @assoclen: associated data length, in CAAM endianness
  * @assoclen_dma: bus physical mapped address of req->assoclen
  * @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
  */
 struct aead_edesc {
int src_nents;
@@ -737,9 +737,6 @@ struct aead_edesc {
unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_AEAD_SG\
-   ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
-sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
 };
 
@@ -751,7 +748,7 @@ struct aead_edesc {
  * @qm_sg_bytes: length of dma mapped h/w link table
  * @qm_sg_dma: bus physical mapped address of h/w link table
  * @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
  */
 struct ablkcipher_edesc {
int src_nents;
@@ -760,9 +757,6 @@ struct ablkcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_ABLKCIPHER_SG  \
-   ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
-sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
 };
 
@@ -984,17 +978,8 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
}
}
 
-   if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+   if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
ivsize = crypto_aead_ivsize(aead);
-   iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-   if (dma_mapping_error(qidev, iv_dma)) {
-   dev_err(qidev, "unable to map IV\n");
-   caam_unmap(qidev, req->src, req->dst, src_nents,
-  dst_nents, 0, 0, op_type, 0, 0);
-   qi_cache_free(edesc);
-   return ERR_PTR(-ENOMEM);
-   }
-   }
 
/*
 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
@@ -1002,16 +987,33 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
 */
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-   if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
-   dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-   qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
-   caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-  iv_dma, ivsize, op_type, 0, 0);
+   sg_table = >sgt[0];
+   qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+   if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+CAAM_QI_MEMCACHE_SIZE)) {
+   dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+   qm_sg_ents, ivsize);
+   caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+  0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
   

[PATCH 2/3] crypto: caam - fix IV DMA mapping and updating

2018-03-28 Thread Horia Geantă
There are two IV-related issues:
(1) crypto API does not guarantee to provide an IV buffer that is DMAable,
thus it's incorrect to DMA map it
(2) for in-place decryption, since ciphertext is overwritten with
plaintext, updated req->info will contain the last block of plaintext
(instead of the last block of ciphertext)

While these two issues could be fixed separately, it's straightforward
to fix both in the same time - by allocating extra space in the
ablkcipher_edesc for the IV that will be fed to the crypto engine;
this allows for fixing (2) by saving req->src[last_block] in req->info
directly, i.e. without allocating another temporary buffer.

A side effect of the fix is that it's no longer possible to have the IV
and req->src contiguous. Code checking for this case is removed.

Cc: <sta...@vger.kernel.org> # 4.13+
Fixes: 854b06f76879 ("crypto: caam - properly set IV after {en,de}crypt")
Link: http://lkml.kernel.org/r/20170113084620.gf22...@gondor.apana.org.au
Reported-by: Gilad Ben-Yossef <gi...@benyossef.com>
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg.c | 212 ++
 1 file changed, 91 insertions(+), 121 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a9a4df7012e2..0c3b19e8cd6b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -772,6 +772,7 @@ struct aead_edesc {
  * @sec4_sg_dma: bus physical mapped address of h/w link table
  * @sec4_sg: pointer to h/w link table
  * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ *  and IV
  */
 struct ablkcipher_edesc {
int src_nents;
@@ -913,6 +914,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
 ivsize, 0);
 
+   /* In case initial IV was generated, copy it in GIVCIPHER request */
+   if (edesc->iv_dir == DMA_FROM_DEVICE) {
+   u8 *iv;
+   struct skcipher_givcrypt_request *greq;
+
+   greq = container_of(req, struct skcipher_givcrypt_request,
+   creq);
+   iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
+edesc->sec4_sg_bytes;
+   memcpy(greq->giv, iv, ivsize);
+   }
+
kfree(edesc);
 
ablkcipher_request_complete(req, err);
@@ -923,10 +936,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
 {
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
-#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
@@ -944,14 +957,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, 
u32 *desc, u32 err,
 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 
ablkcipher_unmap(jrdev, edesc, req);
-
-   /*
-* The crypto API expects us to set the IV (req->info) to the last
-* ciphertext block.
-*/
-   scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
-ivsize, 0);
-
kfree(edesc);
 
ablkcipher_request_complete(req, err);
@@ -1100,15 +1105,14 @@ static void init_authenc_job(struct aead_request *req,
  */
 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
struct ablkcipher_edesc *edesc,
-   struct ablkcipher_request *req,
-   bool iv_contig)
+   struct ablkcipher_request *req)
 {
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc = edesc->hw_desc;
-   u32 out_options = 0, in_options;
-   dma_addr_t dst_dma, src_dma;
-   int len, sec4_sg_index = 0;
+   u32 out_options = 0;
+   dma_addr_t dst_dma;
+   int len;
 
 #ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1124,30 +1128,18 @@ static void init_ablkcipher_job(u32 *sh_desc, 
dma_addr_t ptr,
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
-   if (iv_contig) {
-   src_dma = edesc->iv_dma;
-   in_options = 0;
-   } else {
-   src_dma = edesc->sec4_sg_dma;
-   sec4_sg_index += edesc->src_nents + 1;
-   in_options = LDST_SGF;
-   }
-   append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+   append_se

[PATCH 0/3] crypto: caam - IV-related fixes

2018-03-28 Thread Horia Geantă
This patch set fixes several issues related to IV handling:
-in some cases IV direction is incorrect
-IVs need to be DMA mapped (when they are not provided directly
as "immediate" values in the descriptors); however, crypto API does not
guarantee the IV buffers to be DMAable
-in-place ablkcipher decryption needs special handling since ciphertext
will be overwritten by the time we want to update the IV with the last
ciphertext block

Horia Geantă (3):
  crypto: caam - fix DMA mapping dir for generated IV
  crypto: caam - fix IV DMA mapping and updating
  crypto: caam/qi - fix IV DMA mapping and updating

 drivers/crypto/caam/caamalg.c| 231 +--
 drivers/crypto/caam/caamalg_qi.c | 227 +++---
 2 files changed, 219 insertions(+), 239 deletions(-)

-- 
2.16.2



[PATCH 1/3] crypto: caam - fix DMA mapping dir for generated IV

2018-03-28 Thread Horia Geantă
In case of GIVCIPHER, IV is generated by the device.
Fix the DMA mapping direction.

Cc: <sta...@vger.kernel.org> # 3.19+
Fixes: 7222d1a34103 ("crypto: caam - add support for givencrypt cbc(aes) and 
rfc3686(ctr(aes))")
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg.c | 29 +
 1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 584a6c183548..a9a4df7012e2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -767,6 +767,7 @@ struct aead_edesc {
  * @src_nents: number of segments in input s/w scatterlist
  * @dst_nents: number of segments in output s/w scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
+ * @iv_dir: DMA mapping direction for IV
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
  * @sec4_sg: pointer to h/w link table
@@ -776,6 +777,7 @@ struct ablkcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
+   enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -785,7 +787,8 @@ struct ablkcipher_edesc {
 static void caam_unmap(struct device *dev, struct scatterlist *src,
   struct scatterlist *dst, int src_nents,
   int dst_nents,
-  dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+  dma_addr_t iv_dma, int ivsize,
+  enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
   int sec4_sg_bytes)
 {
if (dst != src) {
@@ -797,7 +800,7 @@ static void caam_unmap(struct device *dev, struct 
scatterlist *src,
}
 
if (iv_dma)
-   dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+   dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
 DMA_TO_DEVICE);
@@ -808,7 +811,7 @@ static void aead_unmap(struct device *dev,
   struct aead_request *req)
 {
caam_unmap(dev, req->src, req->dst,
-  edesc->src_nents, edesc->dst_nents, 0, 0,
+  edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -821,7 +824,7 @@ static void ablkcipher_unmap(struct device *dev,
 
caam_unmap(dev, req->src, req->dst,
   edesc->src_nents, edesc->dst_nents,
-  edesc->iv_dma, ivsize,
+  edesc->iv_dma, ivsize, edesc->iv_dir,
   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -1285,7 +1288,7 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-  0, 0, 0);
+  0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
 
@@ -1548,7 +1551,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-  0, 0, 0);
+  0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
 
@@ -1570,7 +1573,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-  iv_dma, ivsize, 0, 0);
+  iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
return ERR_PTR(-ENOMEM);
}
 
@@ -1579,6 +1582,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
 desc_bytes;
+   edesc->iv_dir = DMA_TO_DEVICE;
 
if (!in_contig) {
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
@@ -1596,7 +1600,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-  iv_dma, ivsize

Re: [PATCH v2 3/9] crypto: caam - don't leak pointers to authenc keys

2018-03-26 Thread Horia Geantă
On 3/23/2018 12:42 PM, Tudor Ambarus wrote:
> In caam's aead_setkey we save pointers to the authenc keys in a
> local variable of type struct crypto_authenc_keys and we don't
> zeroize it after use. Fix this and don't leak pointers to the
> authenc keys.
> 
> Signed-off-by: Tudor Ambarus <tudor.amba...@microchip.com>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Thanks,
Horia


Re: [PATCH v2 4/9] crypto: caam/qi - don't leak pointers to authenc keys

2018-03-26 Thread Horia Geantă
On 3/23/2018 12:42 PM, Tudor Ambarus wrote:
> In caam/qi's aead_setkey we save pointers to the authenc keys in
> a local variable of type struct crypto_authenc_keys and we don't
> zeroize it after use. Fix this and don't leak pointers to the
> authenc keys.
> 
> Signed-off-by: Tudor Ambarus <tudor.amba...@microchip.com>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Thanks,
Horia


Re: [PATCH] crypto: talitos - fix IPsec cipher in length

2018-03-23 Thread Horia Geantă
On 3/22/2018 12:04 PM, Christophe LEROY wrote:
> Le 16/03/2018 à 15:07, Horia Geantă a écrit :
>> On 3/16/2018 2:42 PM, Christophe LEROY wrote:
>>> Le 16/03/2018 à 09:48, Horia Geantă a écrit :
>>>> For SEC 2.x+, cipher in length must contain only the ciphertext length.
>>>> In case of using hardware ICV checking, the ICV length is provided via
>>>> the "extent" field of the descriptor pointer.
>>>>
>>>> Cc: <sta...@vger.kernel.org> # 4.8+
>>>> Fixes: 549bd8bc5987 ("crypto: talitos - Implement AEAD for SEC1 using 
>>>> HMAC_SNOOP_NO_AFEU")
>>>
>>> It looks like the issue comes more from commit fbb22137c4d9b ("crypto:
>>> talitos - fix use of sg_link_tbl_len"), doesn't it ?
>>>
>> No, the first commit that breaks IPsec for SEC 2.x+ is the one I mentioned.
> 
> Today without your patch, IPsec works well on my mpc8321E. It was broken
> by 549bd8bc5987 and fixed by fbb22137c4d9b.
> But it seems the fix is not complete as it doesn't work yet in your case.
> 
Ok, I checked closer.
It seems the "extent" field is required starting with SEC 3.x, that's why I am
seeing the failure (I am testing on P2020, P1022).
In your case it's working since MPC8321E has a SEC 2.x.

> I have proposed a v2 version of your patch which takes it into 
> talitos_sg_map() hence avoiding direct access to ptr[4] without using 
> the helpers.
> 
Thanks.

Horia


Re: [PATCH v2] crypto: doc - clarify hash callbacks state machine

2018-03-20 Thread Horia Geantă
On 3/20/2018 10:50 AM, Kamil Konieczny wrote:
> On 20.03.2018 08:56, Horia Geantă wrote:
>> Add a note that it is perfectly legal to "abandon" a request object:
>> - call .init() and then (as many times) .update()
>> - _not_ call any of .final(), .finup() or .export() at any point in
>>   future
>>
>> Link: https://lkml.kernel.org/r/20180222114741.ga27...@gondor.apana.org.au
>> Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
>> ---
>>  Documentation/crypto/devel-algos.rst | 8 
>>  1 file changed, 8 insertions(+)
>>
>> diff --git a/Documentation/crypto/devel-algos.rst 
>> b/Documentation/crypto/devel-algos.rst
>> index 66f50d32dcec..c45c6f400dbd 100644
>> --- a/Documentation/crypto/devel-algos.rst
>> +++ b/Documentation/crypto/devel-algos.rst
>> @@ -236,6 +236,14 @@ when used from another part of the kernel.
>> |
>> '---> HASH2
>>  
>> +Note that it is perfectly legal to "abandon" a request object:
>> +- call .init() and then (as many times) .update()
>> +- _not_ call any of .final(), .finup() or .export() at any point in future
>> +
>> +In other words implementations should mind the resource allocation and 
>> clean-up.
>> +No resources related to request objects should remain allocated after a call
> -- 
>> +to .init() or .update(), since there might be no chance to free them.
> 
> is it for crypto api  users or for drivers ?
> 
For drivers / providers (below crypto API).

> the creator of request context is responsible for alloc and destroy,
> so why there are no chance of free ?
> 
Hash request object (including request context) is allocated by the user /
client by means of ahash_request_alloc(), and later on freed using
ahash_request_free().
I don't see a problem with this.

However, besides the memory allocated for the request context, other resources
(related to the request) might be needed by the driver.
I provided an example of needing to DMA map a buffer (to load/store HW state
from/to crypto engine), and I am not happy with either solutions:
-DMA map & unmap after each .update()
-Herbert's proposal to use a convoluted DMA mapping scheme

Another example: dynamic memory allocation might be needed beyond what's
available in request context, i.e. driver might not have apriori all the
information needed to inform the tfm about required memory using
crypto_ahash_set_reqsize().

This happens due to the semantics of the crypto API, which allows the user to
initialize a request object and drop it without getting a result (final or
partial hash).
I don't see what below use case is good for, maybe just for benchmarking:
req = ahash_request_alloc();
[...]
crypto_ahash_init(req);
crypto_ahash_update(req);
ahash_request_free(req);

Horia



[PATCH v2] crypto: doc - clarify hash callbacks state machine

2018-03-20 Thread Horia Geantă
Add a note that it is perfectly legal to "abandon" a request object:
- call .init() and then (as many times) .update()
- _not_ call any of .final(), .finup() or .export() at any point in
  future

Link: https://lkml.kernel.org/r/20180222114741.ga27...@gondor.apana.org.au
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 Documentation/crypto/devel-algos.rst | 8 
 1 file changed, 8 insertions(+)

diff --git a/Documentation/crypto/devel-algos.rst 
b/Documentation/crypto/devel-algos.rst
index 66f50d32dcec..c45c6f400dbd 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -236,6 +236,14 @@ when used from another part of the kernel.
|
'---> HASH2
 
+Note that it is perfectly legal to "abandon" a request object:
+- call .init() and then (as many times) .update()
+- _not_ call any of .final(), .finup() or .export() at any point in future
+
+In other words implementations should mind the resource allocation and 
clean-up.
+No resources related to request objects should remain allocated after a call
+to .init() or .update(), since there might be no chance to free them.
+
 
 Specifics Of Asynchronous HASH Transformation
 ~
-- 
2.16.2



Re: [PATCH] crypto: doc - clarify hash callbacks state machine

2018-03-19 Thread Horia Geantă
On 3/19/2018 11:25 AM, Herbert Xu wrote:
> On Mon, Mar 19, 2018 at 06:39:50AM +0000, Horia Geantă wrote:
>>
>> The fact that there can be multiple requests in parallel (for a given tfm) 
>> is a
>> different topic.
>> Each request object has its state in its own state machine, independent from 
>> the
>> other request objects.
>> I assume this is clear enough.
> 
> My point is that all of the state associated with a request needs
> to be stored in the request object.  If you're start storing things
> in the driver/hardware, then things will get ugly one way or another.
> 
Agree, the request state should be stored in the request object; I am not
debating that.

Still there are limitations even when keeping state in the request object.
For e.g. an implementation cannot DMA map a buffer for the entire lifetime of a
request object, because this lifetime is unknown - user can "abandon" the object
after a few .update() calls, or even after .init(). By "abandon" I mean not call
_ever_ any of .final(), .finup() or .export() on the object.

The only solution to avoid leaks in this case is to repeatedly DMA map & unmap
the buffer.
IOW, if one wants to load/save HW state in a buffer after an .update() and to
instruct the crypto engine to do this operation, the following steps are 
involved:
-gpp: DMA map the buffer, get its IOVA
-gpp: program the crypto engine with IOVA, wait for crypto engine's signal
-crypto engine: load HW state from buffer, perform the partial hash, save HW
state in buffer, signal gpp
-gpp: DMA unmap the buffer

I'd say this is pretty inefficient, yet I don't see an alternative.

Good or bad, the documentation should reflect this limitation - hence this 
patch.

Thanks,
Horia


Re: [PATCH] crypto: doc - clarify hash callbacks state machine

2018-03-19 Thread Horia Geantă
On 3/16/2018 5:16 PM, Herbert Xu wrote:
> On Mon, Mar 05, 2018 at 12:39:45PM +0200, Horia Geantă wrote:
>> Even though it doesn't make too much sense, it is perfectly legal to:
>> - call .init() and then (as many times) .update()
>> - subseqently _not_ call any of .final(), .finup() or .export()
> 
> Actually it makes perfect sense, because there can be an arbitrary
> number of requests for a given tfm.  There is no requirement that
> you must finalise the first request before submitting new ones.
> 
> IOW there can be an arbitrary number of outstanding requests even
> without the user intentionally abandoning any hash request.
> 
The fact that there can be multiple requests in parallel (for a given tfm) is a
different topic.
Each request object has its state in its own state machine, independent from the
other request objects.
I assume this is clear enough.

Why I wanted to underline is that "abandoning" a hash request is allowed (even
though doing this is at least questionable), thus implementations must take
special care not to leak resources in this case.

If you think the commit message should be updated, then probably so should the
documentation update.

Thanks,
Horia


Re: [PATCH] crypto: talitos - fix IPsec cipher in length

2018-03-16 Thread Horia Geantă
On 3/16/2018 2:42 PM, Christophe LEROY wrote:
> Le 16/03/2018 à 09:48, Horia Geantă a écrit :
>> For SEC 2.x+, cipher in length must contain only the ciphertext length.
>> In case of using hardware ICV checking, the ICV length is provided via
>> the "extent" field of the descriptor pointer.
>>
>> Cc: <sta...@vger.kernel.org> # 4.8+
>> Fixes: 549bd8bc5987 ("crypto: talitos - Implement AEAD for SEC1 using 
>> HMAC_SNOOP_NO_AFEU")
> 
> It looks like the issue comes more from commit fbb22137c4d9b ("crypto: 
> talitos - fix use of sg_link_tbl_len"), doesn't it ?
> 
No, the first commit that breaks IPsec for SEC 2.x+ is the one I mentioned.

Afterwards, the refactoring of helper functions lead to current situation where
talitos_sg_map() is fed with "len" parameter that is used for two things:
-HW S/G table entries generation
-setting talitos pointer length

But in certain cases (like pointer nr. 4 - cipher in - for SEC 2.x+ IPsec),
talitos pointer length is only part of the total length, the other part being
set in the "extent" pointer field.

Currently talitos_sg_map() does not accommodate for this case.
In order to keep the fix to a minimum I've overwritten talitos pointer length:
-first talitos_sg_map() sets length to sg_link_tbl_len = cryptlen + authsize
-in case of SEC 2.x IPsec, length is corrected to cryptlen (while extent = 
authsize)

Regards,
Horia


[PATCH] crypto: talitos - fix IPsec cipher in length

2018-03-16 Thread Horia Geantă
For SEC 2.x+, cipher in length must contain only the ciphertext length.
In case of using hardware ICV checking, the ICV length is provided via
the "extent" field of the descriptor pointer.

Cc: <sta...@vger.kernel.org> # 4.8+
Fixes: 549bd8bc5987 ("crypto: talitos - Implement AEAD for SEC1 using 
HMAC_SNOOP_NO_AFEU")
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/talitos.c | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 447cb8b1b16a..61a30704847f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1251,6 +1251,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct 
aead_request *areq,
ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
 >ptr[4], sg_count, areq->assoclen, tbl_off);
 
+   /*
+* In case of SEC 2.x+, cipher in len must include only the ciphertext,
+* while extent is used for ICV len.
+*/
+   if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+   desc->ptr[4].len = cpu_to_be16(cryptlen);
+
if (ret > 1) {
tbl_off += ret;
sync_needed = true;
-- 
2.16.2



Re: [PATCH v11 6/7] crypto: caam: cleanup CONFIG_64BIT ifdefs when using io{read|write}64

2018-03-06 Thread Horia Geantă
On 3/5/2018 9:08 PM, Logan Gunthorpe wrote:
> Clean up the extra ifdefs which defined the wr_reg64 and rd_reg64
> functions in non-64bit cases in favour of the new common
> io-64-nonatomic-lo-hi header.
> 
> To be consistent with CAAM engine HW spec: in case of 64-bit registers,
> irrespective of device endianness, the lower address should be read from
> / written to first, followed by the upper address. Indeed the I/O
> accessors in CAAM driver currently don't follow the spec, however this
> is a good opportunity to fix the code.
> 
> Signed-off-by: Logan Gunthorpe <log...@deltatee.com>
> Cc: Andy Shevchenko <andy.shevche...@gmail.com>
> Cc: Horia Geantă <horia.gea...@nxp.com>
> Cc: Dan Douglass <dan.dougl...@nxp.com>
> Cc: Herbert Xu <herb...@gondor.apana.org.au>
> Cc: "David S. Miller" <da...@davemloft.net>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Please get used to carrying Reviewed-bys from previous iterations when patches
stay the same:
https://lkml.kernel.org/r/vi1pr0402mb334282217bc0ae1f8da8bfc398...@vi1pr0402mb3342.eurprd04.prod.outlook.com

Thanks,
Horia



[PATCH] crypto: doc - clarify hash callbacks state machine

2018-03-05 Thread Horia Geantă
Even though it doesn't make too much sense, it is perfectly legal to:
- call .init() and then (as many times) .update()
- subseqently _not_ call any of .final(), .finup() or .export()

Update documentation since this is an important issue to consider
from resource management perspective.

Link: https://lkml.kernel.org/r/20180222114741.ga27...@gondor.apana.org.au
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 Documentation/crypto/devel-algos.rst | 8 
 1 file changed, 8 insertions(+)

diff --git a/Documentation/crypto/devel-algos.rst 
b/Documentation/crypto/devel-algos.rst
index 66f50d32dcec..0f4617019227 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -236,6 +236,14 @@ when used from another part of the kernel.
|
'---> HASH2
 
+Note that it is perfectly legal to:
+- call .init() and then (as many times) .update()
+- subseqently _not_ call any of .final(), .finup() or .export()
+
+In other words mind the resource allocation and clean-up,
+since this basically means no resources can remain allocated
+after a call to .init() or .update().
+
 
 Specifics Of Asynchronous HASH Transformation
 ~
-- 
2.16.2



Re: [PATCH 1/2] crypto: talitos - don't persistently map req_ctx->hw_context and req_ctx->buf

2018-03-02 Thread Horia Geantă
On 2/26/2018 6:40 PM, Christophe Leroy wrote:
> Commit 49f9783b0cea ("crypto: talitos - do hw_context DMA mapping
> outside the requests") introduced a persistent dma mapping of
> req_ctx->hw_context
> Commit 37b5e8897eb5 ("crypto: talitos - chain in buffered data for ahash
> on SEC1") introduced a persistent dma mapping of req_ctx->buf
> 
> As there is no destructor for req_ctx (the request context), the
> associated dma handlers where set in ctx (the tfm context). This is
> wrong as several hash operations can run with the same ctx.
> 
> This patch removes this persistent mapping.
> 
> Reported-by: Horia Geanta <horia.gea...@nxp.com>
> Fixes: 49f9783b0cea ("crypto: talitos - do hw_context DMA mapping outside the 
> requests")
> Fixes: 37b5e8897eb5 ("crypto: talitos - chain in buffered data for ahash on 
> SEC1")
> Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
Tested-by: Horia Geantă <horia.gea...@nxp.com>

Please add this to 4.15.y -stable tree.

Thanks,
Horia


Re: [PATCH 16/18] crypto: talitos - do hw_context DMA mapping outside the requests

2018-02-22 Thread Horia Geantă
On 2/22/2018 1:47 PM, Herbert Xu wrote:
> On Tue, Feb 20, 2018 at 11:32:25AM +0000, Horia Geantă wrote:
>>
>> If final/finup is optional, how is the final hash supposed to be retrieved?
> 
> Sometimes the computation ends with a partial hash, that's what
> export is for.  Also it is completely legal to abandon the hash
> state entirely.
> 
Thanks for the explanation.
It's unintuitive to call .init() -> .update() and then not to call any of
.final(), .finup(), .export().

Christophe,

IIUC this means that there is no room for improvement.
This patch needs to be reverted, to restore previous behaviour when the
hw_context was mapped / unmapped for every request.

Thanks,
Horia


Re: [PATCH 4.14, 4.9] crypto: talitos - fix Kernel Oops on hashing an empty file

2018-02-22 Thread Horia Geantă
On 2/22/2018 9:08 AM, Christophe Leroy wrote:
> Upstream 87a81dce53b1ea61acaeefa5191a0376a2d1d721
> 
> Performing the hash of an empty file leads to a kernel Oops
> 
> [   44.504600] Unable to handle kernel paging request for data at address 
> 0x000c
> [   44.512819] Faulting instruction address: 0xc02d2be8
> [   44.524088] Oops: Kernel access of bad area, sig: 11 [#1]
> [   44.529171] BE PREEMPT CMPC885
> [   44.532232] CPU: 0 PID: 491 Comm: md5sum Not tainted 
> 4.15.0-rc8-00211-g3a968610b6ea #81
> [   44.540814] NIP:  c02d2be8 LR: c02d2984 CTR: 
> [   44.545812] REGS: c6813c90 TRAP: 0300   Not tainted  
> (4.15.0-rc8-00211-g3a968610b6ea)
> [   44.554223] MSR:  9032   CR: 48222822  XER: 2000
> [   44.560855] DAR: 000c DSISR: c000
> [   44.560855] GPR00: c02d28fc c6813d40 c6828000 c646fa40 0001 0001 
> 0001 
> [   44.560855] GPR08: 004c  c000bfcc  28222822 100280d4 
>  10020008
> [   44.560855] GPR16:  0020   10024008  
> c646f9f0 c6179a10
> [   44.560855] GPR24:  0001 c62f0018 c6179a10  c6367a30 
> c62f c646f9c0
> [   44.598542] NIP [c02d2be8] ahash_process_req+0x448/0x700
> [   44.603751] LR [c02d2984] ahash_process_req+0x1e4/0x700
> [   44.608868] Call Trace:
> [   44.611329] [c6813d40] [c02d28fc] ahash_process_req+0x15c/0x700 
> (unreliable)
> [   44.618302] [c6813d90] [c02060c4] hash_recvmsg+0x11c/0x210
> [   44.623716] [c6813db0] [c0331354] ___sys_recvmsg+0x98/0x138
> [   44.629226] [c6813eb0] [c03332c0] __sys_recvmsg+0x40/0x84
> [   44.634562] [c6813f10] [c03336c0] SyS_socketcall+0xb8/0x1d4
> [   44.640073] [c6813f40] [c000d1ac] ret_from_syscall+0x0/0x38
> [   44.645530] Instruction dump:
> [   44.648465] 38c1 7f63db78 4e800421 7c791b78 54690ffe 0f09 80ff0190 
> 2f87
> [   44.656122] 40befe50 2f990001 409e0210 813f01bc <8129000c> b39e003a 
> 7d29c214 913e003c
> 
> This patch fixes that Oops by checking if src is NULL.
> 
> Fixes: 6a1e8d14156d4 ("crypto: talitos - making mapping helpers more generic")
> Cc: 
> Signed-off-by: Christophe Leroy 

Isn't this needed also in 4.15.y?

Thanks,
Horia


Re: [PATCH 16/18] crypto: talitos - do hw_context DMA mapping outside the requests

2018-02-20 Thread Horia Geantă
On 2/20/2018 12:34 PM, Herbert Xu wrote:
> On Mon, Feb 19, 2018 at 01:16:30PM +0000, Horia Geantă wrote:
>>
>>> And what about ALGIF path from user space ?
>>> What if the user never calls the last sendmsg() which will call 
>>> hash_finup() ?
>>>
>> User is expected to follow the rules of the crypto API.
>> Of course, kernel won't (or at least shouldn't) crash in case of misuse.
>> However, in these cases some resources might not be freed - it's unavoidable.
> 
> the crypto API does not require the presence of a finalisation.
> It is entirely optional.  So leaving resources pinned down until
> final/finup occurs is unacceptable, both from user-space and the
> kernel.
> 
If final/finup is optional, how is the final hash supposed to be retrieved?

According to documentation, these are the accepted flows (with the option to
export/import a partial hash b/w update and final/finup):

.init() -> .update() -> .final()
^| |
'' '---> HASH

.init() -> .update() -> .finup()
^| |
'' '---> HASH

   .digest()
   |
   '---> HASH

Note that digest() is not an issue in the case we are discussing, since resource
allocation happens only in init().

Thanks,
Horia


Re: [PATCH 16/18] crypto: talitos - do hw_context DMA mapping outside the requests

2018-02-19 Thread Horia Geantă
On 2/19/2018 11:14 AM, Christophe LEROY wrote:
> Le 19/02/2018 à 09:30, Horia Geantă a écrit :
>> On 2/19/2018 9:58 AM, Christophe LEROY wrote:
>>> Le 18/02/2018 à 18:14, Horia Geantă a écrit :
>>>> There is no ahash_exit() callback mirroring ahash_init().
>>>>
>>>> The clean-up of request ctx should be done in the last states of the hash 
>>>> flows
>>>> described here:
>>>> https://www.kernel.org/doc/html/latest/crypto/devel-algos.html#cipher-definition-with-struct-shash-alg-and-ahash-alg
>>>> for e.g. in the final() callback.
>>>
>>> Unfortunatly it seems that we can't rely on those finalising functions
>>> being called all the time.
>>> If you look into test_ahash_jiffies() for instance, in case of error the
>>> call of crypto_hash_final() is skipped.
>>
>> If test_ahash_jiffies() errors before calling crypto_ahash_final(req), this
>> means a previous callback failed.
>> Accordingly, DMA unmapping should be performed also on the corresponding 
>> errors
>> paths in the driver.
>>
> 
> And what about ALGIF path from user space ?
> What if the user never calls the last sendmsg() which will call 
> hash_finup() ?
> 
User is expected to follow the rules of the crypto API.
Of course, kernel won't (or at least shouldn't) crash in case of misuse.
However, in these cases some resources might not be freed - it's unavoidable.

Horia


Re: [PATCH 16/18] crypto: talitos - do hw_context DMA mapping outside the requests

2018-02-19 Thread Horia Geantă
On 2/19/2018 9:58 AM, Christophe LEROY wrote:
> Le 18/02/2018 à 18:14, Horia Geantă a écrit :
>> There is no ahash_exit() callback mirroring ahash_init().
>>
>> The clean-up of request ctx should be done in the last states of the hash 
>> flows
>> described here:
>> https://www.kernel.org/doc/html/latest/crypto/devel-algos.html#cipher-definition-with-struct-shash-alg-and-ahash-alg
>> for e.g. in the final() callback.
> 
> Unfortunatly it seems that we can't rely on those finalising functions 
> being called all the time.
> If you look into test_ahash_jiffies() for instance, in case of error the 
> call of crypto_hash_final() is skipped.

If test_ahash_jiffies() errors before calling crypto_ahash_final(req), this
means a previous callback failed.
Accordingly, DMA unmapping should be performed also on the corresponding errors
paths in the driver.

Horia


Re: [PATCH 16/18] crypto: talitos - do hw_context DMA mapping outside the requests

2018-02-18 Thread Horia Geantă
On 2/17/2018 6:32 PM, Christophe LEROY wrote:
> 
> 
> Le 07/02/2018 à 15:39, Horia Geantă a écrit :
>> On 10/6/2017 4:06 PM, Christophe Leroy wrote:
>>> At every request, we map and unmap the same hash hw_context.
>>>
>>> This patch moves the dma mapping/unmapping in functions ahash_init()
>>> and ahash_import().
>>>
>>> Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
>>> ---
>>>   drivers/crypto/talitos.c | 80 
>>> ++--
>>>   1 file changed, 57 insertions(+), 23 deletions(-)
>>>
>>> diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
>>> index ebfd6d982ed6..d495649d5267 100644
>>> --- a/drivers/crypto/talitos.c
>>> +++ b/drivers/crypto/talitos.c
>>> @@ -819,6 +819,7 @@ struct talitos_ctx {
>>> unsigned int keylen;
>>> unsigned int enckeylen;
>>> unsigned int authkeylen;
>>> +   dma_addr_t dma_hw_context;
>> This doesn't look correct.
>>
>> talitos_ctx structure is the tfm context.
>> dma_hw_context is the IOVA of hw_context, located in talitos_ahash_req_ctx
>> structure (request context).
> 
> Yes but I have now found how I can know that the request context is 
> being released in order to unmap() dma at that time.
> It is tricky to use the tmf context I agree, but at least I know when 
> tmf context get destroyed, ie in talitos_cra_exit_ahash()
> The request context is created by ahash_request_alloc() and released by
> ahash_request_free(). I have not found the way to call dma_unmap() 
> before ahash_request_free() gets called.
> 
>>
>> If there are multiple requests in flight for the same tfm, dma_hw_context 
>> will
>> be overwritten.
> 
> Before overwritting dma_hw_context, it is always released, see 
> talitos_cra_exit_ahash(), ahash_init(), ahash_import()
> 
The problem is not the unmapping.
If there are two requests for the same tfm, then given the following sequence
1. tfm->ahash_init(req1)
tfm_ctx->dma_hw_context points to req1_ctx->hw_context
2. tfm->ahash_init(req2)
tfm_ctx->dma_hw_context [unmapped, then] points to req2_ctx->hw_context
i.e. req1 will use the hw_context of req2.

>>
>> dma_hw_context needs to be moved in request context (talitos_ahash_req_ctx 
>> struct).
> 
> Any suggestion then on how to handle the issue explained above ?
> 
There is no ahash_exit() callback mirroring ahash_init().

The clean-up of request ctx should be done in the last states of the hash flows
described here:
https://www.kernel.org/doc/html/latest/crypto/devel-algos.html#cipher-definition-with-struct-shash-alg-and-ahash-alg
for e.g. in the final() callback.

Hope this helps,
Horia


Re: [PATCH 2/2] crypto: caam: Use common error handling code in four functions

2018-02-15 Thread Horia Geantă
On 2/14/2018 8:32 PM, SF Markus Elfring wrote:
> From: Markus Elfring 
> Date: Wed, 14 Feb 2018 19:14:49 +0100
> 
> Add jump targets so that a bit of exception handling can be better reused
> at the end of these functions.
> 
> Signed-off-by: Markus Elfring 
[snip]
> @@ -1096,6 +1092,7 @@ static int ahash_digest(struct ahash_request *req)
>   if (!ret) {
>   ret = -EINPROGRESS;
>   } else {
> +unmap_hash:
>   ahash_unmap(jrdev, edesc, req, digestsize);
>   kfree(edesc);
>   }
> 
I understand jumps are a necessary evil for dealing with shortcomings of C,
however please avoid jumping in an if/else branch.

Code could be rewritten as:

if (!ret)
return -EINPROGRESS;

unmap_hash:
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);

Thanks,
Horia


Re: [PATCH 1/2] crypto: caam: Delete an error message for a failed memory allocation in seven functions

2018-02-14 Thread Horia Geantă
On 2/14/2018 8:31 PM, SF Markus Elfring wrote:
> From: Markus Elfring <elfr...@users.sourceforge.net>
> Date: Wed, 14 Feb 2018 18:22:38 +0100
> 
> Omit an extra message for a memory allocation failure in these functions.
> 
> This issue was detected by using the Coccinelle software.
> 
> Signed-off-by: Markus Elfring <elfr...@users.sourceforge.net>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

though please consider the following

> @@ -689,10 +687,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct 
> caam_hash_ctx *ctx,
>   unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
>  
>   edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
> - if (!edesc) {
> - dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
> + if (!edesc)
>   return NULL;
> - }
>  
With this change, ctx parameter is no longer used in ahash_edesc_alloc().
Either here on in a different patch the function should be updated.

Thanks,
Horia


Re: [PATCH 16/18] crypto: talitos - do hw_context DMA mapping outside the requests

2018-02-07 Thread Horia Geantă
On 10/6/2017 4:06 PM, Christophe Leroy wrote:
> At every request, we map and unmap the same hash hw_context.
> 
> This patch moves the dma mapping/unmapping in functions ahash_init()
> and ahash_import().
> 
> Signed-off-by: Christophe Leroy 
> ---
>  drivers/crypto/talitos.c | 80 
> ++--
>  1 file changed, 57 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
> index ebfd6d982ed6..d495649d5267 100644
> --- a/drivers/crypto/talitos.c
> +++ b/drivers/crypto/talitos.c
> @@ -819,6 +819,7 @@ struct talitos_ctx {
>   unsigned int keylen;
>   unsigned int enckeylen;
>   unsigned int authkeylen;
> + dma_addr_t dma_hw_context;
This doesn't look correct.

talitos_ctx structure is the tfm context.
dma_hw_context is the IOVA of hw_context, located in talitos_ahash_req_ctx
structure (request context).

If there are multiple requests in flight for the same tfm, dma_hw_context will
be overwritten.

dma_hw_context needs to be moved in request context (talitos_ahash_req_ctx 
struct).

Thanks,
Horia


[PATCH] crypto: caam - fix endless loop when DECO acquire fails

2018-02-05 Thread Horia Geantă
In case DECO0 cannot be acquired - i.e. run_descriptor_deco0() fails
with -ENODEV, caam_probe() enters an endless loop:

run_descriptor_deco0
ret -ENODEV
-> instantiate_rng
-ENODEV, overwritten by -EAGAIN
ret -EAGAIN
-> caam_probe
-EAGAIN results in endless loop

It turns out the error path in instantiate_rng() is incorrect,
the checks are done in the wrong order.

Cc: <sta...@vger.kernel.org> # 3.13+
Fixes: 1005bccd7a4a6 ("crypto: caam - enable instantiation of all RNG4 state 
handles")
Reported-by: Bryan O'Donoghue <pure.lo...@nexus-software.ie>
Suggested-by: Auer Lukas <lukas.a...@aisec.fraunhofer.de>
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/ctrl.c | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 75d280cb2dc0..e843cf410373 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int 
state_handle_mask,
 * without any error (HW optimizations for later
 * CAAM eras), then try again.
 */
+   if (ret)
+   break;
+
rdsta_val = rd_reg32(>r4tst[0].rdsta) & RDSTA_IFMASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
-   !(rdsta_val & (1 << sh_idx)))
+   !(rdsta_val & (1 << sh_idx))) {
ret = -EAGAIN;
-   if (ret)
break;
+   }
+
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
-- 
2.12.0.264.gd6db3f216544



Re: [PATCH v3 2/5] crypto: caam: Fix endless loop when RNG is already initialized

2018-02-05 Thread Horia Geantă
On 2/2/2018 2:54 PM, Auer, Lukas wrote:
> On Fri, 2018-02-02 at 11:20 +, Bryan O'Donoghue wrote:
>> On 01/02/18 12:16, Horia Geantă wrote:
>>> If the loop cannot exit based on value of "ret" != -EAGAIN, then it
>>> means
>>> caam_probe() will eventually fail due to ret == -EAGAIN:
>>> if (ret) {
>>> dev_err(dev, "failed to instantiate RNG");
>>> goto caam_remove;
>>> }
>>
>> For me it's an endless loop applying the first two
>>
>> https://patchwork.ozlabs.org/patch/866460/
>> https://patchwork.ozlabs.org/patch/866462/
>>
>> but not this one
>>
>> https://patchwork.ozlabs.org/patch/865890/
>>
[snip]
> 
> I think the problem lies in the instantiate_rng() function. If the
> driver is unable to acquire DEC0 it'll return -ENODEV. This should
> terminate the while loop in the probe function. However, the return
> value is never checked and is instead overwritten with -EAGAIN, causing
> the endless loop.
> 
> This problem only occurs if u-boot instantiates only one of the state
> handles (ent_delay doesn't get incremented) and the kernel runs in non-
> secure mode (DEC0 can't get acquired). Instantiating all state handles
> in u-boot therefore fixes this problem. In addition, the return value
> in instantiate_rng() should be handled correctly by including
> 
> if (ret)
>   break;
> 
> right after "ret = run_descriptor_deco0(ctrldev, desc, );".
> 
Indeed, the error path is incorrect and should be fixed as you mentioned.
I will send a patch replacing this one.
Note that this fixes only the error path, meaning caam_probe() won't go into an
endless loop and instead will return -ENODEV, due to being unable to acquire
control of DECO0.

There are still a few hurdles to cross for CAAM to work in a TZ environment.

For e.g. could you please check / confirm whether DECO0MIDR (DECO0 MID registers
@0xA0, @0xA4) are set such that Linux kernel is allowed to r/w DECO0-related
registers?

Thanks,
Horia


Re: [PATCH v3 2/5] crypto: caam: Fix endless loop when RNG is already initialized

2018-02-01 Thread Horia Geantă
On 1/31/2018 4:00 AM, Bryan O'Donoghue wrote:
> commit 1005bccd7a4a ("crypto: caam - enable instantiation of all RNG4 state
> handles") introduces a control when incrementing ent_delay which contains
> the following comment above it:
> 
> /*
>  * If either SH were instantiated by somebody else
>  * (e.g. u-boot) then it is assumed that the entropy
>  * parameters are properly set and thus the function
>  * setting these (kick_trng(...)) is skipped.
>  * Also, if a handle was instantiated, do not change
>  * the TRNG parameters.
>  */
> 
> This is a problem observed when sec_init() has been run in u-boot and
> and TrustZone is enabled. We can fix this by instantiating all rng state
> handles in u-boot but, on the Kernel side we should ensure that this
> non-terminating path is dealt with.
> 
> Fixes: 1005bccd7a4a ("crypto: caam - enable instantiation of all RNG4 state
> handles")
> 
> Reported-by: Ryan Harkin <ryan.har...@linaro.org>
> Cc: "Horia Geantă" <horia.gea...@nxp.com>
> Cc: Aymen Sghaier <aymen.sgha...@nxp.com>
> Cc: Fabio Estevam <fabio.este...@nxp.com>
> Cc: Peng Fan <peng@nxp.com>
> Cc: "David S. Miller" <da...@davemloft.net>
> Cc: Lukas Auer <lukas.a...@aisec.fraunhofer.de>
> Cc: <sta...@vger.kernel.org> # 4.12+
> Signed-off-by: Bryan O'Donoghue <pure.lo...@nexus-software.ie>
> ---
>  drivers/crypto/caam/ctrl.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
> index 98986d3..0a1e96b 100644
> --- a/drivers/crypto/caam/ctrl.c
> +++ b/drivers/crypto/caam/ctrl.c
> @@ -704,7 +704,10 @@ static int caam_probe(struct platform_device *pdev)
>ent_delay);
>   kick_trng(pdev, ent_delay);
>   ent_delay += 400;
> + } else if (ctrlpriv->rng4_sh_init && inst_handles) {
> + ent_delay += 400;
>   }
If both RNG state handles are initialized before kernel runs, then
instantiate_rng() should be a no-op and return 0, which is enough to exit the
loop: while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX))

If the loop cannot exit based on value of "ret" != -EAGAIN, then it means
caam_probe() will eventually fail due to ret == -EAGAIN:
if (ret) {
dev_err(dev, "failed to instantiate RNG");
goto caam_remove;
}

Please provide more details, so that the root cause is found and fixed.
For e.g. what is the value of RDSTA (RNG DRNG Status register @0x6C0):
-before & after u-boot initializes RNG
-as seen by kernel during caam_probe()
Also provide related error messages displayed during boot.

Thanks,
Horia


Re: [PATCH v3 5/5] ARM: dts: imx7s: add CAAM device node

2018-02-01 Thread Horia Geantă
On 1/31/2018 4:00 AM, Bryan O'Donoghue wrote:
> From: Rui Miguel Silva <rui.si...@linaro.org>
> 
> Add CAAM device node to the i.MX7s device tree.
> 
> Signed-off-by: Rui Miguel Silva <rui.si...@linaro.org>
> Cc: Shawn Guo <shawn...@kernel.org>
> Cc: Sascha Hauer <ker...@pengutronix.de>
> Cc: linux-arm-ker...@lists.infradead.org
> Cc: "Horia Geantă" <horia.gea...@nxp.com>
> Cc: Aymen Sghaier <aymen.sgha...@nxp.com>
> Cc: Fabio Estevam <fabio.este...@nxp.com>
> Cc: Peng Fan <peng@nxp.com>
> Cc: "David S. Miller" <da...@davemloft.net>
> Cc: Lukas Auer <lukas.a...@aisec.fraunhofer.de>
> Signed-off-by: Bryan O'Donoghue <pure.lo...@nexus-software.ie>
> ---
>  arch/arm/boot/dts/imx7s.dtsi | 31 +++
>  1 file changed, 31 insertions(+)
> 
> diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
> index 82ad26e..e38c159 100644
> --- a/arch/arm/boot/dts/imx7s.dtsi
> +++ b/arch/arm/boot/dts/imx7s.dtsi
> @@ -805,6 +805,37 @@
>   status = "disabled";
>   };
>  
> + crypto: caam@3090 {
> + compatible = "fsl,sec-v4.0";
> + fsl,sec-era = <4>;
CCBVID[CAAM_ERA] = 8.
Either remove this (optional) property and let the bootloader add it
dynamically, or provide a correct value for it.

Thanks,
Horia



[PATCH 2/3] crypto: caam - prepare for gcm(aes) support over QI interface

2018-01-29 Thread Horia Geantă
Update gcm(aes) descriptors (generic, rfc4106 and rfc4543) such that
they would also work when submitted via the QI interface.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg.c  |  19 +++--
 drivers/crypto/caam/caamalg_desc.c | 165 ++---
 drivers/crypto/caam/caamalg_desc.h |  24 --
 3 files changed, 183 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2188235be02d..584a6c183548 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -328,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 {
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -349,7 +350,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
}
 
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_gcm_encap(desc, >cdata, ctx->authsize);
+   cnstr_shdsc_gcm_encap(desc, >cdata, ivsize, ctx->authsize, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
@@ -366,7 +367,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
}
 
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_gcm_decap(desc, >cdata, ctx->authsize);
+   cnstr_shdsc_gcm_decap(desc, >cdata, ivsize, ctx->authsize, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
@@ -387,6 +388,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 {
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -408,7 +410,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
}
 
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_rfc4106_encap(desc, >cdata, ctx->authsize);
+   cnstr_shdsc_rfc4106_encap(desc, >cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
@@ -425,7 +428,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
}
 
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_rfc4106_decap(desc, >cdata, ctx->authsize);
+   cnstr_shdsc_rfc4106_decap(desc, >cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
@@ -447,6 +451,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 {
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -468,7 +473,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
}
 
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_rfc4543_encap(desc, >cdata, ctx->authsize);
+   cnstr_shdsc_rfc4543_encap(desc, >cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), ctx->dir);
 
@@ -485,7 +491,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
}
 
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_rfc4543_decap(desc, >cdata, ctx->authsize);
+   cnstr_shdsc_rfc4543_decap(desc, >cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), ctx->dir);
 
diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index ceb93fbb76e6..8ae7a1be7dfd 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -625,10 +625,13 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
  * @desc: pointer to buffer used for descriptor construction
  * @cdata: pointer to block cipher transform definitions
  * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with 
OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
  * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
  */
 void cnstr_shdsc_gcm_encap(u32 * co

[PATCH 3/3] crypto: caam/qi - add GCM support

2018-01-29 Thread Horia Geantă
Add support for AES working in Galois Counter Mode.
The following algorithms are added:
gcm(aes)
rfc4106(gcm(aes))
rfc4543(gcm(aes))

There is a limitation related to IV size, similar to the one present in
SW implementation (crypto/gcm.c):
The only IV size allowed is 12 bytes. It will be padded by HW to the right
with 0x_0001 (up to 16 bytes - AES block size), according to the
GCM specification.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 374 +++
 1 file changed, 374 insertions(+)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f4db39495d22..c2b5762d56a0 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -284,6 +284,309 @@ static int aead_setkey(struct crypto_aead *aead, const u8 
*key,
return -EINVAL;
 }
 
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(aead);
+   unsigned int ivsize = crypto_aead_ivsize(aead);
+   int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+   ctx->cdata.keylen;
+
+   if (!ctx->cdata.keylen || !ctx->authsize)
+   return 0;
+
+   /*
+* Job Descriptor and Shared Descriptor
+* must fit into the 64-word Descriptor h/w Buffer
+*/
+   if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+   ctx->cdata.key_inline = true;
+   ctx->cdata.key_virt = ctx->key;
+   } else {
+   ctx->cdata.key_inline = false;
+   ctx->cdata.key_dma = ctx->key_dma;
+   }
+
+   cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, >cdata, ivsize,
+ ctx->authsize, true);
+
+   /*
+* Job Descriptor and Shared Descriptor
+* must fit into the 64-word Descriptor h/w Buffer
+*/
+   if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+   ctx->cdata.key_inline = true;
+   ctx->cdata.key_virt = ctx->key;
+   } else {
+   ctx->cdata.key_inline = false;
+   ctx->cdata.key_dma = ctx->key_dma;
+   }
+
+   cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, >cdata, ivsize,
+ ctx->authsize, true);
+
+   return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+   ctx->authsize = authsize;
+   gcm_set_sh_desc(authenc);
+
+   return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(aead);
+   struct device *jrdev = ctx->jrdev;
+   int ret;
+
+#ifdef DEBUG
+   print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+   memcpy(ctx->key, key, keylen);
+   dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+   ctx->cdata.keylen = keylen;
+
+   ret = gcm_set_sh_desc(aead);
+   if (ret)
+   return ret;
+
+   /* Now update the driver contexts with the new shared descriptor */
+   if (ctx->drv_ctx[ENCRYPT]) {
+   ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+   if (ret) {
+   dev_err(jrdev, "driver enc context update failed\n");
+   return ret;
+   }
+   }
+
+   if (ctx->drv_ctx[DECRYPT]) {
+   ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+   if (ret) {
+   dev_err(jrdev, "driver dec context update failed\n");
+   return ret;
+   }
+   }
+
+   return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+   struct caam_ctx *ctx = crypto_aead_ctx(aead);
+   unsigned int ivsize = crypto_aead_ivsize(aead);
+   int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+   ctx->cdata.keylen;
+
+   if (!ctx->cdata.keylen || !ctx->authsize)
+   return 0;
+
+   ctx->cdata.key_virt = ctx->key;
+
+   /*
+* Job Descriptor and Shared Descriptor
+* must fit into the 64-word Descriptor h/w Buffer
+*/
+   if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+   ctx->cdata.key_inline = true;
+   } else {
+   ctx->cdata.key_inline = false;
+   ctx->cdata.key_dma = ctx->key_dma;
+   }
+
+   cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, >cdata, ivsize,
+ ctx->authsiz

[PATCH 1/3] crypto: caam/qi - return -EBADMSG for ICV check failure

2018-01-29 Thread Horia Geantă
Crypto drivers are expected to return -EBADMSG in case of
ICV check (authentication) failure.

In this case it also makes sense to suppress the error message
in the QI dequeue callback.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 12 +++-
 drivers/crypto/caam/qi.c | 11 +--
 2 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4aecc9435f69..f4db39495d22 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -562,8 +562,18 @@ static void aead_done(struct caam_drv_req *drv_req, u32 
status)
qidev = caam_ctx->qidev;
 
if (unlikely(status)) {
+   u32 ssrc = status & JRSTA_SSRC_MASK;
+   u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
caam_jr_strstatus(qidev, status);
-   ecode = -EIO;
+   /*
+* verify hw auth check passed else return -EBADMSG
+*/
+   if (ssrc == JRSTA_SSRC_CCB_ERROR &&
+   err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+   ecode = -EBADMSG;
+   else
+   ecode = -EIO;
}
 
edesc = container_of(drv_req, typeof(*edesc), drv_req);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f9a44f485aac..b9480828da38 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -579,8 +579,15 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct 
qman_portal *p,
 
fd = >fd;
status = be32_to_cpu(fd->status);
-   if (unlikely(status))
-   dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
+   if (unlikely(status)) {
+   u32 ssrc = status & JRSTA_SSRC_MASK;
+   u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+   if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+   err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+   dev_err(qidev, "Error: %#x in CAAM response FD\n",
+   status);
+   }
 
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
dev_err(qidev, "Non-compound FD from CAAM\n");
-- 
2.12.0.264.gd6db3f216544



Re: [RESEND PATCH 0/6] Enable CAAM on i.MX7s fix TrustZone issues

2018-01-25 Thread Horia Geantă
On 1/24/2018 4:50 PM, Bryan O'Donoghue wrote:
> This patch-set enables CAAM on the i.MX7s and fixes a number of issues
> identified with the CAAM driver and hardware when TrustZone mode is
> enabled.
> 
> The first block of patches are simple bug-fixes, followed by a second block
> of patches which are simple enabling patches for the i.MX7Solo - note we
> aren't enabling for the i.MX7Dual since we don't have hardware to test that
> out but it should be a 1:1 mapping for others to enable when appropriate.
> 
> The final block in this series implements a fix for using the CAAM when
> OPTEE/TrustZone is enabled. The various details are logged in these
> threads.
> 
> Link: https://github.com/OP-TEE/optee_os/issues/1408
> Link: https://tinyurl.com/yam5gv9a
> Link: https://patchwork.ozlabs.org/cover/865042
> 
> In simple terms, when TrustZone is active the first page of the CAAM
> becomes inaccessible to Linux as it has a special 'TZ bit' associated with
> it that software cannot toggle or even view AFAIK.
If the first ("global") caam register page is not accessible, RNG init is not
the only problem. For e.g. device endianness detection won't work. A complete
list could be generated by auditing all places where this page is r/w.

IMHO the correct direction for solving such cases (i.e. Linux kernel is provided
only with access to a few job rings) is to split the driver in two independent
ones - controller driver and job ring driver - and have corresponding DT nodes
for them. Controller DT node and one or more of the job ring DT nodes would be
deleted by the boot loader / trusted firmware if needed.
Of course, the job ring DT node might need additional properties for the driver
to work.

Thanks,
Horia

> 
> The patches here then
> 
> 1. Detect when TrustZone is active
> 2. Detect if u-boot (or OPTEE) has already initialized the RNG
> 
> and loads the CAAM driver in a different way - skipping over the RNG
> initialization that Linux now no-longer has permissions to carry out.
> 
> Should #1 be true but #2 not be true, driver loading stops (and Rui's patch
> for the NULL pointer dereference fixes a cash on this path). If #2 is true
> but #1 is not then it's a NOP as Linux has full permission to rewrite the
> deco registers in the first page of CAAM registers.
> 
> Finally then if #1 and #2 are true, the fixes here allow the CAAM to come
> up and for the RNG to be useable again.
> 
> Bryan O'Donoghue (3):
>   crypto: caam: Fix endless loop when RNG is already initialized
>   crypto: caam: add logic to detect when running under TrustZone
>   crypto: caam: detect RNG init when TrustZone is active
> 
> Rui Miguel Silva (3):
>   crypto: caam: Fix null dereference at error path
>   ARM: dts: imx7s: add CAAM device node
>   imx7d: add CAAM clocks
> 
>  arch/arm/boot/dts/imx7s.dtsi| 26 +++
>  drivers/clk/imx/clk-imx7d.c |  3 +++
>  drivers/crypto/caam/ctrl.c  | 45 
> ++---
>  drivers/crypto/caam/intern.h|  1 +
>  include/dt-bindings/clock/imx7d-clock.h |  5 +++-
>  5 files changed, 76 insertions(+), 4 deletions(-)
> 


Re: [RESEND PATCH 1/6] crypto: caam: Fix null dereference at error path

2018-01-25 Thread Horia Geantă
On 1/24/2018 4:50 PM, Bryan O'Donoghue wrote:
> From: Rui Miguel Silva <rui.si...@linaro.org>
> 
> caam_remove already removes the debugfs entry, so we need to remove the one
> immediately before calling caam_remove.
> 
> This fix a NULL dereference at error paths is caam_probe fail.
> 
> [bod: changed name prefix to "crypto: caam: Fix .."]
> [bod: added Fixes tag]
> 
> Fixes: 67c2315def06 ("crypto: caam - add Queue Interface (QI) backend
> support")
Cc: <sta...@vger.kernel.org> # 4.12+

> 
> Tested-by: Ryan Harkin <ryan.har...@linaro.org>
> Signed-off-by: Rui Miguel Silva <rui.si...@linaro.org>
> Cc: "Horia Geantă" <horia.gea...@nxp.com>
> Cc: Aymen Sghaier <aymen.sgha...@nxp.com>
> Cc: Fabio Estevam <fabio.este...@nxp.com>
> Cc: Peng Fan <peng@nxp.com>
> Cc: Herbert Xu <herb...@gondor.apana.org.au>
> Cc: "David S. Miller" <da...@davemloft.net>
> Cc: Lukas Auer <lukas.a...@aisec.fraunhofer.de>
> Signed-off-by: Bryan O'Donoghue <pure.lo...@nexus-software.ie>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Thanks,
Horia


Re: [PATCH v10 7/8] crypto: caam: cleanup CONFIG_64BIT ifdefs when using io{read|write}64

2018-01-05 Thread Horia Geantă
On 1/4/2018 9:16 PM, Logan Gunthorpe wrote:
> Clean up the extra ifdefs which defined the wr_reg64 and rd_reg64
> functions in non-64bit cases in favour of the new common
> io-64-nonatomic-lo-hi header.
> 
> To be consistent with CAAM engine HW spec: in case of 64-bit registers,
> irrespective of device endianness, the lower address should be read from
> / written to first, followed by the upper address. Indeed the I/O
> accessors in CAAM driver currently don't follow the spec, however this
> is a good opportunity to fix the code.
> 
> Signed-off-by: Logan Gunthorpe <log...@deltatee.com>
> Cc: Andy Shevchenko <andy.shevche...@gmail.com>
> Cc: Horia Geantă <horia.gea...@nxp.com>
> Cc: Dan Douglass <dan.dougl...@nxp.com>
> Cc: Herbert Xu <herb...@gondor.apana.org.au>
> Cc: "David S. Miller" <da...@davemloft.net>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Thanks,
Horia



Re: [PATCH v9 7/8] crypto: caam: cleanup CONFIG_64BIT ifdefs when using io{read|write}64

2018-01-03 Thread Horia Geantă
On 1/3/2018 8:07 PM, Logan Gunthorpe wrote:
> Clean up the extra ifdefs which defined the wr_reg64 and rd_reg64
> functions in non-64bit cases in favour of the new common
> io-64-nonatomic-lo-hi header.
> 
> Signed-off-by: Logan Gunthorpe <log...@deltatee.com>
> Cc: Andy Shevchenko <andy.shevche...@gmail.com>
> Cc: Horia Geantă <horia.gea...@nxp.com>
> Cc: Dan Douglass <dan.dougl...@nxp.com>
> Cc: Herbert Xu <herb...@gondor.apana.org.au>
> Cc: "David S. Miller" <da...@davemloft.net>
> ---
>  drivers/crypto/caam/regs.h | 26 +-
>  1 file changed, 1 insertion(+), 25 deletions(-)
> 
> diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
> index fee363865d88..ec6528e5ce9d 100644
> --- a/drivers/crypto/caam/regs.h
> +++ b/drivers/crypto/caam/regs.h
> @@ -10,7 +10,7 @@
>  
>  #include 
>  #include 
> -#include 
> +#include 
Typo: lo-hi should be used instead (see previous patch versions).

Please add in the commit message the explanation (which was there in v8 but
removed in v9):
To be consistent with CAAM engine HW spec: in case of 64-bit registers,
irrespective of device endianness, the lower address should be read from
/ written to first, followed by the upper address. Indeed the I/O
accessors in CAAM driver currently don't follow the spec, however this
is a good opportunity to fix the code.

>  
>  /*
>   * Architecture-specific register access methods
> @@ -136,7 +136,6 @@ static inline void clrsetbits_32(void __iomem *reg, u32 
> clear, u32 set)
>   *base + 0x : least-significant 32 bits
>   *base + 0x0004 : most-significant 32 bits
>   */
> -#ifdef CONFIG_64BIT
>  static inline void wr_reg64(void __iomem *reg, u64 data)
>  {
>   if (caam_little_end)
Since the 2 cases (32/64-bit) are merged, caam_imx should be accounted for the
logic to stay the same.

This means for e.g. for wr_reg64 (similar for rd_reg64):
static inline void wr_reg64(void __iomem *reg, u64 data)
{
if (!caam_imx && caam_little_end)
iowrite64(data, reg);
else
iowrite64be(data, reg);
}

Thanks,
Horia

> @@ -153,29 +152,6 @@ static inline u64 rd_reg64(void __iomem *reg)
>   return ioread64be(reg);
>  }
>  
> -#else /* CONFIG_64BIT */
> -static inline void wr_reg64(void __iomem *reg, u64 data)
> -{
> - if (!caam_imx && caam_little_end) {
> - wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
> - wr_reg32((u32 __iomem *)(reg), data);
> - } else {
> - wr_reg32((u32 __iomem *)(reg), data >> 32);
> - wr_reg32((u32 __iomem *)(reg) + 1, data);
> - }
> -}
> -
> -static inline u64 rd_reg64(void __iomem *reg)
> -{
> - if (!caam_imx && caam_little_end)
> - return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
> - (u64)rd_reg32((u32 __iomem *)(reg)));
> -
> - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
> - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
> -}
> -#endif /* CONFIG_64BIT  */
> -
>  static inline u64 cpu_to_caam_dma64(dma_addr_t value)
>  {
>   if (caam_imx)
> 


[PATCH v2 3/4] crypto: caam - save Era in driver's private data

2017-12-19 Thread Horia Geantă
Save Era in driver's private data for further usage,
like deciding whether an erratum applies or a feature is available
based on its value.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/ctrl.c   | 4 +++-
 drivers/crypto/caam/intern.h | 1 +
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
 
+   ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
 
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
-caam_get_era());
+ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
 ctrlpriv->total_jobrs, ctrlpriv->qi_present,
 caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present;  /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en;/* Virtualization enabled in CAAM */
+   int era;/* CAAM Era (internal HW revision) */
 
 #defineRNG4_MAX_HANDLES 2
/* RNG4 block */
-- 
2.12.0.264.gd6db3f216544



[PATCH v2 4/4] crypto: caam - add Derived Key Protocol (DKP) support

2017-12-19 Thread Horia Geantă
Offload split key generation in CAAM engine, using DKP.
DKP is supported starting with Era 6.

Note that the way assoclen is transmitted from the job descriptor
to the shared descriptor changes - DPOVRD register is used instead
of MATH3 (where available), since DKP protocol thrashes the MATH
registers.

The replacement of MDHA split key generation with DKP has the side
effect of the crypto engine writing the authentication key, and thus
the DMA mapping direction for the buffer holding the key has to change
from DMA_TO_DEVICE to DMA_BIDIRECTIONAL.
There are two cases:
-key is inlined in descriptor - descriptor buffer mapping changes
-key is referenced - key buffer mapping changes

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
v2: fix DMA mapping direction for buffers holding the split key

 drivers/crypto/caam/caamalg.c  | 112 ---
 drivers/crypto/caam/caamalg_desc.c | 176 ++---
 drivers/crypto/caam/caamalg_desc.h |  10 +--
 drivers/crypto/caam/caamalg_qi.c   |  54 +---
 drivers/crypto/caam/caamhash.c |  73 ++-
 drivers/crypto/caam/desc.h |  29 ++
 drivers/crypto/caam/desc_constr.h  |  41 +
 drivers/crypto/caam/key_gen.c  |  30 ---
 drivers/crypto/caam/key_gen.h  |  30 +++
 9 files changed, 384 insertions(+), 171 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index f5666e50c1e7..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+   enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_aead_null_encap(desc, >adata, ctx->authsize);
+   cnstr_shdsc_aead_null_encap(desc, >adata, ctx->authsize,
+   ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-  desc_bytes(desc), DMA_TO_DEVICE);
+  desc_bytes(desc), ctx->dir);
 
/*
 * Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_aead_null_decap(desc, >adata, ctx->authsize);
+   cnstr_shdsc_aead_null_decap(desc, >adata, ctx->authsize,
+   ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-  desc_bytes(desc), DMA_TO_DEVICE);
+  desc_bytes(desc), ctx->dir);
 
return 0;
 }
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, >cdata, >adata, ivsize,
   ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
-  false);
+  false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-  desc_bytes(desc), DMA_TO_DEVICE);
+  desc_bytes(desc), ctx->dir);
 
 skip_enc:
/*
@@ -266,9 +271,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, >cdata, >adata, ivsize,
   ctx->authsize, alg->caam.geniv, is_rfc3686,
-  nonce, ctx1_iv_off, false);
+  nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-  desc_bytes(desc), DMA_TO_DEVICE);
+  desc_bytes(des

[PATCH v2 1/4] crypto: caam - constify key data

2017-12-19 Thread Horia Geantă
Key data is not modified, it is copied in the shared descriptor.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg_desc.c |  6 +++---
 drivers/crypto/caam/desc_constr.h  | 10 +-
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..2db9e85bf81c 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1075,7 +1075,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, 
struct alginfo *cdata,
 
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
-   u8 *nonce = cdata->key_virt + cdata->keylen;
+   const u8 *nonce = cdata->key_virt + cdata->keylen;
 
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
   LDST_CLASS_IND_CCB |
@@ -1140,7 +1140,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, 
struct alginfo *cdata,
 
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
-   u8 *nonce = cdata->key_virt + cdata->keylen;
+   const u8 *nonce = cdata->key_virt + cdata->keylen;
 
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
   LDST_CLASS_IND_CCB |
@@ -1209,7 +1209,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, 
struct alginfo *cdata,
 
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
-   u8 *nonce = cdata->key_virt + cdata->keylen;
+   const u8 *nonce = cdata->key_virt + cdata->keylen;
 
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
   LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/desc_constr.h 
b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..5b39b7d7a47a 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, 
dma_addr_t ptr,
append_ptr(desc, ptr);
 }
 
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
 {
u32 *offset = desc_end(desc);
 
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, 
dma_addr_t ptr,
append_cmd(desc, len);
 }
 
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
   u32 command)
 {
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
 APPEND_SEQ_PTR_INTLEN(out, OUT)
 
 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
 unsigned int len, u32 options) \
 { \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
  * from length of immediate data provided, e.g., split keys
  */
 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
 unsigned int data_len, \
 unsigned int len, u32 options) \
 { \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
-   void *key_virt;
+   const void *key_virt;
};
bool key_inline;
 };
-- 
2.12.0.264.gd6db3f216544



[PATCH v2 2/4] crypto: caam - remove needless ablkcipher key copy

2017-12-19 Thread Horia Geantă
ablkcipher shared descriptors are relatively small, thus there is enough
space for the key to be inlined.
Accordingly, there is no need to copy the key in ctx->key.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg.c| 8 ++--
 drivers/crypto/caam/caamalg_qi.c | 8 ++--
 2 files changed, 4 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..f5666e50c1e7 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -625,7 +625,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
 (strstr(alg_name, "rfc3686") != NULL));
 
-   memcpy(ctx->key, key, keylen);
 #ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
   DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +647,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
 
-   dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
-   ctx->cdata.key_virt = ctx->key;
+   ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
 
/* ablkcipher_encrypt shared descriptor */
@@ -691,10 +689,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
return -EINVAL;
}
 
-   memcpy(ctx->key, key, keylen);
-   dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
-   ctx->cdata.key_virt = ctx->key;
+   ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
 
/* xts_ablkcipher_encrypt shared descriptor */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index ad14b69a052e..b45401786530 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -272,7 +272,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
 
-   memcpy(ctx->key, key, keylen);
 #ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
   DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +294,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
 
-   dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
-   ctx->cdata.key_virt = ctx->key;
+   ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
 
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +354,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher 
*ablkcipher,
return -EINVAL;
}
 
-   memcpy(ctx->key, key, keylen);
-   dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
-   ctx->cdata.key_virt = ctx->key;
+   ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
 
/* xts ablkcipher encrypt, decrypt shared descriptors */
-- 
2.12.0.264.gd6db3f216544



Re: [PATCH 00/18] crypto: talitos - fixes and performance improvement

2017-12-08 Thread Horia Geantă
On 10/12/2017 6:20 PM, Herbert Xu wrote:
> On Fri, Oct 06, 2017 at 03:04:31PM +0200, Christophe Leroy wrote:
>> This serie fixes and improves the talitos crypto driver.
>>
>> First 6 patchs are fixes of failures reported by the new tests in the
>> kernel crypto test manager.
>>
Looks like these fixes are required also on older 4.9+ -stable kernels.
(I haven't seen them on latest 4.9.68-stable mail from Greg, even though
they are in main tree.)

In case you agree, what would be the recommended way to add the patches
to -stable?

Thanks,
Horia

>> The 8 following patches are cleanups and simplifications.
>>
>> The last 4 ones are performance improvement. The main improvement is
>> in the one before the last, it divides by 2 the time needed for a md5
>> hash on the SEC1.
>>
>> Christophe Leroy (18):
>>   crypto: talitos - fix AEAD test failures
>>   crypto: talitos - fix memory corruption on SEC2
>>   crypto: talitos - fix setkey to check key weakness
>>   crypto: talitos - fix AEAD for sha224 on non sha224 capable chips
>>   crypto: talitos - fix use of sg_link_tbl_len
>>   crypto: talitos - fix ctr-aes-talitos
>>   crypto: talitos - zeroize the descriptor with memset()
>>   crypto: talitos - declare local functions static
>>   crypto: talitos - use devm_kmalloc()
>>   crypto: talitos - use of_property_read_u32()
>>   crypto: talitos - use devm_ioremap()
>>   crypto: talitos - don't check the number of channels at each interrupt
>>   crypto: talitos - remove to_talitos_ptr_len()
>>   crypto: talitos - simplify tests in ipsec_esp()
>>   crypto: talitos - DMA map key in setkey()
>>   crypto: talitos - do hw_context DMA mapping outside the requests
>>   crypto: talitos - chain in buffered data for ahash on SEC1
>>   crypto: talitos - avoid useless copy
> 
> All applied.  Thanks.
> 


[PATCH 4.9-stable] Revert "crypto: caam - get rid of tasklet"

2017-12-05 Thread Horia Geantă
commit 2b163b5bce04546da72617bfb6c8bf07a45c4b17 upstream.

This reverts commit 66d2e2028091a074aa1290d2eeda5ddb1a6c329c.

Quoting from Russell's findings:
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg21136.html

[quote]
Okay, I've re-tested, using a different way of measuring, because using
openssl speed is impractical for off-loaded engines.  I've decided to
use this way to measure the performance:

dd if=/dev/zero bs=1048576 count=128 | /usr/bin/time openssl dgst -md5

For the threaded IRQs case gives:

0.05user 2.74system 0:05.30elapsed 52%CPU (0avgtext+0avgdata 2400maxresident)k
0.06user 2.52system 0:05.18elapsed 49%CPU (0avgtext+0avgdata 2404maxresident)k
0.12user 2.60system 0:05.61elapsed 48%CPU (0avgtext+0avgdata 2460maxresident)k
=> 5.36s => 25.0MB/s

and the tasklet case:

0.08user 2.53system 0:04.83elapsed 54%CPU (0avgtext+0avgdata 2468maxresident)k
0.09user 2.47system 0:05.16elapsed 49%CPU (0avgtext+0avgdata 2368maxresident)k
0.10user 2.51system 0:04.87elapsed 53%CPU (0avgtext+0avgdata 2460maxresident)k
=> 4.95 => 27.1MB/s

which corresponds to an 8% slowdown for the threaded IRQ case.  So,
tasklets are indeed faster than threaded IRQs.

[...]

I think I've proven from the above that this patch needs to be reverted
due to the performance regression, and that there _is_ most definitely
a deterimental effect of switching from tasklets to threaded IRQs.
[/quote]

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
Signed-off-by: Herbert Xu <herb...@gondor.apana.org.au>
---

Mihai Ordean reported soft lockups at IPsec ESP high rates on i.MX6Q,
on kernels 4.9.{35,36}.
This patch, cherry-picked from 4.10, fixes the issue.

 drivers/crypto/caam/intern.h |  1 +
 drivers/crypto/caam/jr.c | 25 -
 2 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c05074a5c..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device   *dev;
int ridx;
struct caam_job_ring __iomem *rregs;/* JobR's register space */
+   struct tasklet_struct irqtask;
int irq;/* One per queue */
 
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f9953d..9e7f28122bb7 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
 
ret = caam_reset_hw_jr(dev);
 
+   tasklet_kill(>irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
 
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 
/*
 * Check the output ring for ready responses, kick
-* the threaded irq if jobs done.
+* tasklet if jobs done.
 */
irqstate = rd_reg32(>rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void 
*st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(>rregs->jrintstatus, irqstate);
 
-   return IRQ_WAKE_THREAD;
+   preempt_disable();
+   tasklet_schedule(>irqtask);
+   preempt_enable();
+
+   return IRQ_HANDLED;
 }
 
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
 {
int hw_idx, sw_idx, i, head, tail;
-   struct device *dev = st_dev;
+   struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
 
/* reenable / unmask IRQs */
clrsetbits_32(>rregs->rconfig_lo, JRCFG_IMSK, 0);
-
-   return IRQ_HANDLED;
 }
 
 /**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
 
jrp = dev_get_drvdata(dev);
 
+   tasklet_init(>irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
-   error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
-caam_jr_threadirq, IRQF_SHARED,
-dev_name(dev), dev);
+   error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+   dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ static int caam_jr_init(struct device *dev)
 out_free_irq:
free_irq(jrp->

Re: [1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-29 Thread Horia Geantă
On 11/29/2017 8:28 AM, Herbert Xu wrote:
> On Tue, Oct 10, 2017 at 01:21:59PM +0300, Robert Baronescu wrote:
>> In case buffer length is a multiple of PAGE_SIZE,
>> the S/G table is incorrectly generated.
>> Fix this by handling buflen = k * PAGE_SIZE separately.
>>
>> Signed-off-by: Robert Baronescu 
> 
> Patch applied.  Thanks.
> 
Thanks Herbert.

Considering this fixes the crash reported by Tudor:
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg29172.html
I think it should be merged in this release cycle (v4.15).

Horia


[PATCH] crypto: caam/qi - use correct print specifier for size_t

2017-11-28 Thread Horia Geantă
Fix below warnings on ARMv7 by using %zu for printing size_t values:

drivers/crypto/caam/caamalg_qi.c: In function aead_edesc_alloc:
drivers/crypto/caam/caamalg_qi.c:417:17: warning: format %lu expects argument 
of type long unsigned int, but argument 4 has type unsigned int [-Wformat=]
   sizeof(struct qm_sg_entry))
 ^
drivers/crypto/caam/caamalg_qi.c:672:16: note: in expansion of macro 
CAAM_QI_MAX_AEAD_SG
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
^
drivers/crypto/caam/caamalg_qi.c: In function ablkcipher_edesc_alloc:
drivers/crypto/caam/caamalg_qi.c:440:17: warning: format %lu expects argument 
of type long unsigned int, but argument 4 has type unsigned int [-Wformat=]
   sizeof(struct qm_sg_entry))
 ^
drivers/crypto/caam/caamalg_qi.c:909:16: note: in expansion of macro 
CAAM_QI_MAX_ABLKCIPHER_SG
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
^
drivers/crypto/caam/caamalg_qi.c: In function ablkcipher_giv_edesc_alloc:
drivers/crypto/caam/caamalg_qi.c:440:17: warning: format %lu expects argument 
of type long unsigned int, but argument 4 has type unsigned int [-Wformat=]
   sizeof(struct qm_sg_entry))
 ^
drivers/crypto/caam/caamalg_qi.c:1062:16: note: in expansion of macro 
CAAM_QI_MAX_ABLKCIPHER_SG
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
^

Fixes: eb9ba37dc15a ("crypto: caam/qi - handle large number of S/Gs case")
Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f9f08fce4356..ad14b69a052e 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -668,7 +668,7 @@ static struct aead_edesc *aead_edesc_alloc(struct 
aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
-   dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+   dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
   iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +905,7 @@ static struct ablkcipher_edesc 
*ablkcipher_edesc_alloc(struct ablkcipher_request
 
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-   dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+   dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
   iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1058,7 @@ static struct ablkcipher_edesc 
*ablkcipher_giv_edesc_alloc(
}
 
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-   dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+   dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
   iv_dma, ivsize, GIVENCRYPT, 0, 0);
-- 
2.12.0.264.gd6db3f216544



Re: [PATCH 2/2] crypto: caam - add Derived Key Protocol (DKP) support

2017-11-23 Thread Horia Geantă
On 11/10/2017 4:34 PM, Horia Geantă wrote:
> Offload split key generation in CAAM engine, using DKP.
> DKP is supported starting with Era 6.
> 
> Note that the way assoclen is transmitted from the job descriptor
> to the shared descriptor changes - DPOVRD register is used instead
> of MATH3 (where available), since DKP protocol thrashes the MATH
> registers.
> 
> Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
When CAAM Job Ring platform devices are configured to go through ARM
SMMU, errors are reported:
arm-smmu 500.iommu: Unhandled context fault: fsr=0x402,
iova=0xffef9000, fsynr=0x3, cb=1

This is due to incorrect DMA mapping direction for the buffers where
keys are stored, they have to change from DMA_TO_DEVICE to
DMA_BIDIRECTIONAL - with DKP keys are initially written by CPU and then
overwritten by the crypto engine.

Will follow up with v2 shortly.

Thanks,
Horia


Re: [PATCH] crypto: tcrypt - set assoc in sg_init_aead()

2017-11-15 Thread Horia Geantă
On 11/14/2017 4:59 PM, Tudor Ambarus wrote:
> Results better code readability.
 ^^ *in* better
> 
> Signed-off-by: Tudor Ambarus <tudor.amba...@microchip.com>
Reviewed-by: Horia Geantă <horia.gea...@nxp.com>

Horia


Re: [PATCH 1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-14 Thread Horia Geantă
On 11/13/2017 8:28 PM, Tudor Ambarus wrote:
> Hi,
> 
> On 11/12/2017 06:26 PM, Horia Geantă wrote:
> 
>> -sg[0] - (1 entry) reserved for associated data, filled outside
>> sg_init_aead()
> 
> Let's fill the sg[0] with aad inside sg_init_aead()!
> 
This could be done, however I would not mix fixes with improvements.

Thanks,
Horia


Re: [PATCH 1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-14 Thread Horia Geantă
On 11/13/2017 8:24 PM, Tudor Ambarus wrote:
> Hi,
> 
> On 10/10/2017 01:21 PM, Robert Baronescu wrote:
>> In case buffer length is a multiple of PAGE_SIZE,
>> the S/G table is incorrectly generated.
>> Fix this by handling buflen = k * PAGE_SIZE separately.
>>
>> Signed-off-by: Robert Baronescu 
>> ---
>>   crypto/tcrypt.c | 6 --
>>   1 file changed, 4 insertions(+), 2 deletions(-)
> 
> This patch fixes the segmentation fault listed below. The NULL
> dereference can be seen starting with:
> 7aacbfc crypto: tcrypt - fix buffer lengths in test_aead_speed()
> 
Right, the order of the two fixes is unfortunate in the sense that first
fix (commit 7aacbfc) uncovers the issue you mention.

Thanks,
Horia



Re: [PATCH RESEND 1/4] crypto: caam: add caam-dma node to SEC4.0 device tree binding

2017-11-13 Thread Horia Geantă
On 11/10/2017 6:44 PM, Kim Phillips wrote:
> On Fri, 10 Nov 2017 08:02:01 +
> Radu Andrei Alexe  wrote:
[snip]>> 2. I wanted this driver to be tracked by the dma engine team.
They have
>> the right expertise to provide adequate feedback. If all the code was in 
>> the crypto directory they wouldn't know about this driver or any 
>> subsequent changes to it.
> 
> dma subsystem bits could still be put in the dma area if deemed
> necessary but I don't think it is: I see
> drivers/crypto/ccp/ccp-dmaengine.c calls dma_async_device_register for
> example.
> 
Please see previous discussion with Vinod:
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg21468.html

> What is the rationale for using the crypto h/w as a dma engine anyway?
SoCs that don't have a system DMA, for e.g. LS1012A.

Horia


Re: [PATCH 1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-12 Thread Horia Geantă
On 11/10/2017 1:23 PM, Herbert Xu wrote:
> On Fri, Nov 10, 2017 at 09:17:33AM +0000, Horia Geantă wrote:
>>
>>> I must be missing something.  In the case rem == 0, let's say
>>> the original value of np is npo.  Then at the start of the loop,
>>> np = npo - 1, and at the last iteration, k = npo - 2, so we do
>> IIUC at the start of the loop np = npo (and not npo - 1), since np is no
>> longer decremented in the rem == 0 case:
>> -np--;
>> +if (rem)
>> +np--;
>>
>>>
>>> sg_set_buf([npo - 1], xbuf[npo - 2], PAGE_SIZE);
>>>
>> and accordingly last iteration is for k = npo - 1:
>>  sg_set_buf([npo], xbuf[npo - 1], PAGE_SIZE);
>>
>>> While the sg_init_table call sets the end-of-table at
>>>
>>> sg_init_table(sg, npo + 1);
>>>
>> while this marks sg[npo] as last SG table entry.
> 
> OK, we're both sort of right.  You're correct that this generates
> a valid SG list in that the number of entries match the end-of-table
> marking.
> 
> But the thing that prompted to check this patch in the first place
> is the semantics behind it.  For the case rem == 0, it means that
> buflen is a multiple of PAGE_SIZE.  In that case, the code with
> your patch will create an SG list that's one page longer than
> buflen.
> 
SG table always has 1 entry more than what's needed strictly for input data.

Let's say buflen = npo * PAGE_SIZE.
SG table generated by the code will have npo + 1 entries:
-sg[0] - (1 entry) reserved for associated data, filled outside
sg_init_aead()
-sg[1]..sg[npo] (npo entries) - input data, entries pointing to
xbuf[0]..xbuf[npo-1]

Horia


[PATCH 2/2] crypto: caam - add Derived Key Protocol (DKP) support

2017-11-10 Thread Horia Geantă
Offload split key generation in CAAM engine, using DKP.
DKP is supported starting with Era 6.

Note that the way assoclen is transmitted from the job descriptor
to the shared descriptor changes - DPOVRD register is used instead
of MATH3 (where available), since DKP protocol thrashes the MATH
registers.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamalg.c  |  52 +--
 drivers/crypto/caam/caamalg_desc.c | 176 ++---
 drivers/crypto/caam/caamalg_desc.h |  10 +--
 drivers/crypto/caam/caamalg_qi.c   |  31 ++-
 drivers/crypto/caam/caamhash.c |  56 
 drivers/crypto/caam/desc.h |  29 ++
 drivers/crypto/caam/desc_constr.h  |  41 +
 drivers/crypto/caam/key_gen.c  |  30 ---
 drivers/crypto/caam/key_gen.h  |  30 +++
 9 files changed, 323 insertions(+), 132 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..700dc09b80da 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -118,6 +118,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,7 +137,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-   cnstr_shdsc_aead_null_encap(desc, >adata, ctx->authsize);
+   cnstr_shdsc_aead_null_encap(desc, >adata, ctx->authsize,
+   ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), DMA_TO_DEVICE);
 
@@ -154,7 +156,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-   cnstr_shdsc_aead_null_decap(desc, >adata, ctx->authsize);
+   cnstr_shdsc_aead_null_decap(desc, >adata, ctx->authsize,
+   ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), DMA_TO_DEVICE);
 
@@ -168,6 +171,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,7 +238,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, >cdata, >adata, ivsize,
   ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
-  false);
+  false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), DMA_TO_DEVICE);
 
@@ -266,7 +270,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, >cdata, >adata, ivsize,
   ctx->authsize, alg->caam.geniv, is_rfc3686,
-  nonce, ctx1_iv_off, false);
+  nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
   desc_bytes(desc), DMA_TO_DEVICE);
 
@@ -300,7 +304,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, >cdata, >adata, ivsize,
  ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
   desc_bytes(desc), DMA_TO_DEVICE);
 
@@ -503,6 +507,7 @@ static int aead_setkey(struct crypto_aead *aead,
 {
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+   struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
 
@@ -517,6 +522,27 @@ static int aead_setkey(struct crypto_aead *aead,
   DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
 
+   /*
+* If DKP is supported, use it in the shared descriptor to generate
+* the spl

[PATCH 1/2] crypto: caam - save Era in driver's private data

2017-11-10 Thread Horia Geantă
Save Era in driver's private data for further usage,
like deciding whether an erratum applies or a feature is available
based on its value.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/ctrl.c   | 4 +++-
 drivers/crypto/caam/intern.h | 1 +
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
 
+   ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
 
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
-caam_get_era());
+ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
 ctrlpriv->total_jobrs, ctrlpriv->qi_present,
 caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index a52361258d3a..55aab74e7b5c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -83,6 +83,7 @@ struct caam_drv_private {
u8 qi_present;  /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en;/* Virtualization enabled in CAAM */
+   int era;/* CAAM Era (internal HW revision) */
 
 #defineRNG4_MAX_HANDLES 2
/* RNG4 block */
-- 
2.12.0.264.gd6db3f216544



Re: [PATCH 1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-10 Thread Horia Geantă
On 11/10/2017 9:43 AM, Herbert Xu wrote:
> On Fri, Nov 10, 2017 at 06:37:22AM +0000, Horia Geantă wrote:
>> On 11/10/2017 12:21 AM, Herbert Xu wrote:
>>> On Thu, Nov 09, 2017 at 02:37:29PM +, Horia Geantă wrote:
>>>>
>>>>>>  sg_init_table(sg, np + 1);
>>>> sg_mark_end() marks sg[np].
>>>>
>>>>>> -np--;
>>>>>> +if (rem)
>>>>>> +np--;
>>>>>>  for (k = 0; k < np; k++)
>>>>>>  sg_set_buf([k + 1], xbuf[k], PAGE_SIZE);
>>>> In case rem == 0, last k value is np-1, thus sg[np-1+1] will be filled
>>>> here with xbuf[np-1].
>>>
>>> No, if rem == 0, then the last k value is np-2.
>>>
>> Notice that np-- above the for loop is done conditionally, so in the for
>> loop k takes values in [0, np-1].
>> This means the for loop fills sg[1]...sg[np].
> 
> I must be missing something.  In the case rem == 0, let's say
> the original value of np is npo.  Then at the start of the loop,
> np = npo - 1, and at the last iteration, k = npo - 2, so we do
IIUC at the start of the loop np = npo (and not npo - 1), since np is no
longer decremented in the rem == 0 case:
-   np--;
+   if (rem)
+   np--;

> 
>   sg_set_buf([npo - 1], xbuf[npo - 2], PAGE_SIZE);
> 
and accordingly last iteration is for k = npo - 1:
sg_set_buf([npo], xbuf[npo - 1], PAGE_SIZE);

> While the sg_init_table call sets the end-of-table at
> 
>   sg_init_table(sg, npo + 1);
> 
while this marks sg[npo] as last SG table entry.

Thanks,
Horia


Re: [PATCH 1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-09 Thread Horia Geantă
On 11/10/2017 12:21 AM, Herbert Xu wrote:
> On Thu, Nov 09, 2017 at 02:37:29PM +0000, Horia Geantă wrote:
>>
>>>>sg_init_table(sg, np + 1);
>> sg_mark_end() marks sg[np].
>>
>>>> -  np--;
>>>> +  if (rem)
>>>> +  np--;
>>>>for (k = 0; k < np; k++)
>>>>sg_set_buf([k + 1], xbuf[k], PAGE_SIZE);
>> In case rem == 0, last k value is np-1, thus sg[np-1+1] will be filled
>> here with xbuf[np-1].
> 
> No, if rem == 0, then the last k value is np-2.
> 
Notice that np-- above the for loop is done conditionally, so in the for
loop k takes values in [0, np-1].
This means the for loop fills sg[1]...sg[np].

Thanks,
Horia


Re: [PATCH 1/2] crypto: tcrypt - fix S/G table for test_aead_speed()

2017-11-09 Thread Horia Geantă
On 11/3/2017 2:42 PM, Herbert Xu wrote:
> On Tue, Oct 10, 2017 at 01:21:59PM +0300, Robert Baronescu wrote:
>> In case buffer length is a multiple of PAGE_SIZE,
>> the S/G table is incorrectly generated.
>> Fix this by handling buflen = k * PAGE_SIZE separately.
>>
>> Signed-off-by: Robert Baronescu 
>> ---
>>  crypto/tcrypt.c | 6 --
>>  1 file changed, 4 insertions(+), 2 deletions(-)
>>
>> diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
>> index 0022a18..bd9b66c 100644
>> --- a/crypto/tcrypt.c
>> +++ b/crypto/tcrypt.c
>> @@ -221,11 +221,13 @@ static void sg_init_aead(struct scatterlist *sg, char 
>> *xbuf[XBUFSIZE],
>>  }
>>  
>>  sg_init_table(sg, np + 1);
sg_mark_end() marks sg[np].

>> -np--;
>> +if (rem)
>> +np--;
>>  for (k = 0; k < np; k++)
>>  sg_set_buf([k + 1], xbuf[k], PAGE_SIZE);
In case rem == 0, last k value is np-1, thus sg[np-1+1] will be filled
here with xbuf[np-1].

>>  
>> -sg_set_buf([k + 1], xbuf[k], rem);
>> +if (rem)
>> +sg_set_buf([k + 1], xbuf[k], rem);
In case rem !=0, sg[np] will be filled here with xbuf[np-1].

> 
> Sorry but I think this is still buggy because you have not moved the
> end-of-table marking in the rem == 0 case.
IIUC this is correct, see above comments.
Could you please take a look again?

Thanks,
Horia


Re: [PATCH v2 1/3] staging: ccree: copy IV to DMAable memory

2017-11-08 Thread Horia Geantă
On 11/2/2017 10:14 AM, Gilad Ben-Yossef wrote:
> We are being passed an IV buffer from unknown origin, which may be
> stack allocated and thus not safe for DMA. Allocate a DMA safe
> buffer for the IV and use that instead.
> 
IIUC this fixes only the (a)blkcipher / skcipher algorithms.
What about aead, authenc?

The fact that only the skcipher tcrypt tests use IVs on stack doesn't
mean aead, authenc implementations are safe - other crypto API users
could provide IVs laying in non-DMAable memory.

To reiterate, the proper approach is to fix the crypto API to guarantee
IVs are DMAable.
However Herbert suggests he is not willing to do this work:
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg28821.html

A few high-level details mentioning what this implies would be helpful,
in case somebody else decides its worth pursuing this path.

The compromise is to fix all crypto drivers that need DMAable IVs.
IMHO this is suboptimal, both in terms of performance (memory
allocation, memcpy) and increased code complexity.

Horia



[PATCH 2/2] crypto: caam - remove unused param of ctx_map_to_sec4_sg()

2017-11-01 Thread Horia Geantă
ctx_map_to_sec4_sg() function, added in
commit 045e36780f115 ("crypto: caam - ahash hmac support")
has never used the "desc" parameter, so let's drop it.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamhash.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 27fe1a07050c..400e788b4f1c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -211,7 +211,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
 }
 
 /* Map state->caam_ctx, and add it to link table */
-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
 struct caam_hash_state *state, int ctx_len,
 struct sec4_sg_entry *sec4_sg, u32 flag)
 {
@@ -722,7 +722,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
 
-   ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+   ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
 edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -821,7 +821,7 @@ static int ahash_final_ctx(struct ahash_request *req)
 
edesc->sec4_sg_bytes = sec4_sg_bytes;
 
-   ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+   ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
 edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -915,7 +915,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 
edesc->src_nents = src_nents;
 
-   ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+   ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
 edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
-- 
2.12.0.264.gd6db3f216544



[PATCH 1/2] crypto: caam - remove unneeded edesc zeroization

2017-11-01 Thread Horia Geantă
Extended descriptor allocation has been changed by
commit dde20ae9d6383 ("crypto: caam - Change kmalloc to kzalloc to avoid 
residual data")
to provide zeroized memory, meaning we no longer have to sanitize
its members - edesc->src_nents and edesc->dst_dma.

Signed-off-by: Horia Geantă <horia.gea...@nxp.com>
---
 drivers/crypto/caam/caamhash.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index f1bf563cb85b..27fe1a07050c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -820,7 +820,6 @@ static int ahash_final_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
 
edesc->sec4_sg_bytes = sec4_sg_bytes;
-   edesc->src_nents = 0;
 
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
 edesc->sec4_sg, DMA_TO_DEVICE);
@@ -1072,7 +1071,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
-   edesc->src_nents = 0;
 
 #ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1154,7 +1152,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
-   edesc->dst_dma = 0;
 
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
if (ret)
@@ -1366,7 +1363,6 @@ static int ahash_update_first(struct ahash_request *req)
}
 
edesc->src_nents = src_nents;
-   edesc->dst_dma = 0;
 
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
  to_hash);
-- 
2.12.0.264.gd6db3f216544



Re: [PATCH] crypto: testmgr: don't allocate IV on stack

2017-10-31 Thread Horia Geantă
On 10/31/2017 10:00 AM, Herbert Xu wrote:
> On Tue, Oct 31, 2017 at 07:56:26AM +, Gilad Ben-Yossef wrote:
>> The IV was allocated on the stack in testmgr skcipher tests.
>> Since HW based tfm providers need to DMA the IV to the HW,
>> this leads to problems and is detected by the DMA-API debug
>> code.
>>
>> Fix it by allocating the IV using kmalloc instead.
>>
>> Signed-off-by: Gilad Ben-Yossef 
Gilad, you're not the only one who bumped into this issue:
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg23074.html
not to mention previous patches that have been accepted:
96692a7305c4 crypto: tcrypt - do not allocate iv on stack for aead speed
tests
9bac019dad80 crypto: testmgr - Fix DMA-API warning
and so on.

> 
> The driver that is mapping the IV directly should be fixed instead.
> Only input that is given in the form of SG lists can be mapped.
> Everything else should be copied if they need to go over DMA.
> 
Herbert, wouldn't it make more sense to follow your previous suggestion:
"Perhaps we should change the API so that it gets passed in as an
SG list."
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg23082.html

Thanks,
Horia


[PATCH RESEND 3/4] crypto: caam: add functionality used by the caam_dma driver

2017-10-30 Thread Horia Geantă
From: Radu Alexe 

The caam_dma is a memcpy DMA driver based on the DMA functionality of
the CAAM hardware block. It creates a DMA channel for each JR of the
CAAM. This patch adds functionality that is used by the caam_dma that is
not yet part of the JR driver.

Signed-off-by: Radu Alexe 
---
 drivers/crypto/caam/desc.h |  3 +++
 drivers/crypto/caam/jr.c   | 42 ++
 drivers/crypto/caam/jr.h   |  2 ++
 3 files changed, 47 insertions(+)

diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 2e6766a1573f..f03221d2468a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -354,6 +354,7 @@
 #define FIFOLD_TYPE_PK_N   (0x08 << FIFOLD_TYPE_SHIFT)
 #define FIFOLD_TYPE_PK_A   (0x0c << FIFOLD_TYPE_SHIFT)
 #define FIFOLD_TYPE_PK_B   (0x0d << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IFIFO  (0x0f << FIFOLD_TYPE_SHIFT)
 
 /* Other types. Need to OR in last/flush bits as desired */
 #define FIFOLD_TYPE_MSG_MASK   (0x38 << FIFOLD_TYPE_SHIFT)
@@ -407,6 +408,7 @@
 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_RNGSTORE(0x34 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA(0x3e << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_SKIP(0x3f << FIFOST_TYPE_SHIFT)
 
 /*
@@ -1443,6 +1445,7 @@
 #define MATH_SRC1_INFIFO   (0x0a << MATH_SRC1_SHIFT)
 #define MATH_SRC1_OUTFIFO  (0x0b << MATH_SRC1_SHIFT)
 #define MATH_SRC1_ONE  (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
 
 /* Destination selectors */
 #define MATH_DEST_SHIFT8
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d258953ff488..00e87094588d 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -23,6 +23,14 @@ struct jr_driver_data {
 
 static struct jr_driver_data driver_data;
 
+static int jr_driver_probed;
+
+int caam_jr_driver_probed(void)
+{
+   return jr_driver_probed;
+}
+EXPORT_SYMBOL(caam_jr_driver_probed);
+
 static int caam_reset_hw_jr(struct device *dev)
 {
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -119,6 +127,8 @@ static int caam_jr_remove(struct platform_device *pdev)
dev_err(jrdev, "Failed to shut down job ring\n");
irq_dispose_mapping(jrpriv->irq);
 
+   jr_driver_probed--;
+
return ret;
 }
 
@@ -280,6 +290,36 @@ struct device *caam_jr_alloc(void)
 }
 EXPORT_SYMBOL(caam_jr_alloc);
 
+/**
+ * caam_jridx_alloc() - Alloc a specific job ring based on its index.
+ *
+ * returns :  pointer to the newly allocated physical
+ *   JobR dev can be written to if successful.
+ **/
+struct device *caam_jridx_alloc(int idx)
+{
+   struct caam_drv_private_jr *jrpriv;
+   struct device *dev = ERR_PTR(-ENODEV);
+
+   spin_lock(_data.jr_alloc_lock);
+
+   if (list_empty(_data.jr_list))
+   goto end;
+
+   list_for_each_entry(jrpriv, _data.jr_list, list_node) {
+   if (jrpriv->ridx == idx) {
+   atomic_inc(>tfm_count);
+   dev = jrpriv->dev;
+   break;
+   }
+   }
+
+end:
+   spin_unlock(_data.jr_alloc_lock);
+   return dev;
+}
+EXPORT_SYMBOL(caam_jridx_alloc);
+
 /**
  * caam_jr_free() - Free the Job Ring
  * @rdev - points to the dev that identifies the Job ring to
@@ -538,6 +578,8 @@ static int caam_jr_probe(struct platform_device *pdev)
 
atomic_set(>tfm_count, 0);
 
+   jr_driver_probed++;
+
return 0;
 }
 
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 97113a6d6c58..ee4d31c9aeb8 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,7 +8,9 @@
 #define JR_H
 
 /* Prototypes for backend-level services exposed to APIs */
+int caam_jr_driver_probed(void);
 struct device *caam_jr_alloc(void);
+struct device *caam_jridx_alloc(int idx);
 void caam_jr_free(struct device *rdev);
 int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
-- 
2.14.2.606.g7451fcd



[PATCH RESEND 4/4] dma: caam: add dma memcpy driver

2017-10-30 Thread Horia Geantă
From: Radu Alexe 

This module introduces a memcpy DMA driver based on the DMA capabilities
of the CAAM hardware block. CAAM DMA is a platform driver that is only
probed if the device is defined in the device tree. The driver creates
a DMA channel for each JR of the CAAM. This introduces a dependency on
the JR driver. Therefore a defering mechanism was used to ensure that
the CAAM DMA driver is probed only after the JR driver.

Signed-off-by: Radu Alexe 
Signed-off-by: Tudor Ambarus 
Signed-off-by: Rajiv Vishwakarma 
---
 drivers/dma/Kconfig|  17 ++
 drivers/dma/Makefile   |   1 +
 drivers/dma/caam_dma.c | 444 +
 3 files changed, 462 insertions(+)
 create mode 100644 drivers/dma/caam_dma.c

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fadc4d8783bd..0df48307dac1 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -600,6 +600,23 @@ config ZX_DMA
help
  Support the DMA engine for ZTE ZX family platform devices.
 
+config CRYPTO_DEV_FSL_CAAM_DMA
+   tristate "CAAM DMA engine support"
+   depends on CRYPTO_DEV_FSL_CAAM_JR
+   default y
+   select DMA_ENGINE
+   select ASYNC_CORE
+   select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+   help
+ Selecting this will offload the DMA operations for users of
+ the scatter gather memcopy API to the CAAM via job rings. The
+ CAAM is a hardware module that provides hardware acceleration to
+ cryptographic operations. It has a built-in DMA controller that can
+ be programmed to read/write cryptographic data. This module defines
+ a DMA driver that uses the DMA capabilities of the CAAM.
+
+ To compile this as a module, choose M here: the module
+ will be called caam_dma.
 
 # driver files
 source "drivers/dma/bestcomm/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f08f8de1b567..37563454d624 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx_dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
 
 obj-y += qcom/
 obj-y += xilinx/
diff --git a/drivers/dma/caam_dma.c b/drivers/dma/caam_dma.c
new file mode 100644
index ..dfd5409864b0
--- /dev/null
+++ b/drivers/dma/caam_dma.c
@@ -0,0 +1,444 @@
+/*
+ * caam support for SG DMA
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc
+ * Copyright 2017 NXP
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include "dmaengine.h"
+
+#include "../crypto/caam/regs.h"
+#include "../crypto/caam/jr.h"
+#include "../crypto/caam/error.h"
+#include "../crypto/caam/intern.h"
+#include "../crypto/caam/desc_constr.h"
+
+#define DESC_DMA_MEMCPY_LEN((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
+CAAM_CMD_SZ)
+
+/* This is max chunk size of a DMA transfer. If a buffer is larger than this
+ * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
+ * and for each chunk a DMA transfer request is issued.
+ * This value is the largest number on 16 bits that is a multiple of 256 bytes
+ * (the largest configurable CAAM DMA burst size).
+ */
+#define CAAM_DMA_CHUNK_SIZE65280
+
+struct caam_dma_sh_desc {
+   u32 desc[DESC_DMA_MEMCPY_LEN] cacheline_aligned;
+   dma_addr_t desc_dma;
+};
+
+/* caam dma extended descriptor */
+struct caam_dma_edesc {
+   struct dma_async_tx_descriptor async_tx;
+   struct list_head node;
+   struct caam_dma_ctx *ctx;
+   dma_addr_t src_dma;
+   dma_addr_t dst_dma;
+   unsigned int src_len;
+   unsigned int dst_len;
+   u32 jd[] cacheline_aligned;
+};
+
+/*
+ * caam_dma_ctx - per jr/channel context
+ * @chan: dma channel used by async_tx API
+ * @node: list_head used to attach to the global dma_ctx_list
+ * @jrdev: Job Ring device
+ * @submit_q: queue of pending (submitted, but not enqueued) jobs
+ * @done_not_acked: jobs that have been completed by jr, but maybe not acked
+ * @edesc_lock: protects extended descriptor
+ */
+struct caam_dma_ctx {
+   struct dma_chan chan;
+   struct list_head node;
+   struct device *jrdev;
+   struct list_head submit_q;
+   struct list_head done_not_acked;
+   spinlock_t edesc_lock;
+};
+
+static struct dma_device *dma_dev;
+static struct caam_dma_sh_desc *dma_sh_desc;
+static LIST_HEAD(dma_ctx_list);
+
+static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+   struct caam_dma_edesc *edesc = NULL;
+   struct caam_dma_ctx *ctx = NULL;
+   dma_cookie_t cookie;
+
+   edesc = container_of(tx, struct caam_dma_edesc, async_tx);
+   ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
+
+   spin_lock_bh(>edesc_lock);
+
+  

[PATCH RESEND 2/4] arm64: dts: ls1012a: add caam-dma node

2017-10-30 Thread Horia Geantă
From: Radu Alexe 

Signed-off-by: Radu Alexe 
---
 arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi 
b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index df83915d6ea6..f92ecf381cb1 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -197,6 +197,12 @@
interrupts = ;
};
 
+   caam-dma {
+   compatible = "fsl,sec-v5.4-dma",
+"fsl,sec-v5.0-dma",
+"fsl,sec-v4.0-dma";
+   };
+
rtic@6 {
compatible = "fsl,sec-v5.4-rtic",
 "fsl,sec-v5.0-rtic",
-- 
2.14.2.606.g7451fcd



[PATCH RESEND 1/4] crypto: caam: add caam-dma node to SEC4.0 device tree binding

2017-10-30 Thread Horia Geantă
From: Radu Alexe 

Signed-off-by: Radu Alexe 
---
 .../devicetree/bindings/crypto/fsl-sec4.txt | 21 +
 1 file changed, 21 insertions(+)

diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt 
b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 7aef0eae58d4..97b37c15d793 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -6,6 +6,7 @@ Copyright (C) 2008-2011 Freescale Semiconductor Inc.
-Overview
-SEC 4 Node
-Job Ring Node
+   -CAAM DMA Node
-Run Time Integrity Check (RTIC) Node
-Run Time Integrity Check (RTIC) Memory Node
-Secure Non-Volatile Storage (SNVS) Node
@@ -215,6 +216,26 @@ EXAMPLE
interrupts = <88 2>;
};
 
+=
+CAAM DMA Node
+
+Child node of the crypto node that enables the use of the DMA capabilities
+of the CAAM by a stand-alone driver. The only required property is the
+"compatible" property. All the other properties are determined from
+the job rings on which the CAAM DMA driver depends (ex: the number of
+dma-channels is equal to the number of defined job rings).
+
+  - compatible
+  Usage: required
+  Value type: 
+  Definition: Must include "fsl,sec-v4.0-dma"
+
+EXAMPLE
+  caam-dma {
+compatible = "fsl,sec-v5.4-dma",
+ "fsl,sec-v5.0-dma",
+ "fsl,sec-v4.0-dma";
+  }
 
 =
 Run Time Integrity Check (RTIC) Node
-- 
2.14.2.606.g7451fcd



[PATCH RESEND 0/4] add CAAM DMA memcpy driver

2017-10-30 Thread Horia Geantă
From: Radu Alexe 

This patch-set introduces a new DMA memcpy driver based on the DMA
capabilities of the CAAM crypto engine. Because of this dependency the
included commits target various parts of the kernel tree.

Patch 1.
Since the CAAM DMA driver is a platform driver it is enabled by a new node
in the device tree. This commit adds documentation for the device tree
bindings.

Patch 2.
This patch adds the "caam-dma" node in the fsl-ls1012a.dtsi file.

Patch 3.
This commit adds various capabilities in the JR driver of the CAAM that is
used by the CAAM DMA driver.

Patch 4.
Adds the CAAM DMA memcpy driver.

Patch 1 and 3 should be ack-ed by the crypto maintainers, patch 2 by
devicetree maintainers and patch 4 by the DMA maintainers.
The intent is to go withh all the patches through the dmaengine tree.

Radu Alexe (4):
  crypto: caam: add caam-dma node to SEC4.0 device tree binding
  arm64: dts: ls1012a: add caam-dma node
  crypto: caam: add functionality used by the caam_dma driver
  dma: caam: add dma memcpy driver

 .../devicetree/bindings/crypto/fsl-sec4.txt|  21 +
 arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi |   6 +
 drivers/crypto/caam/desc.h |   3 +
 drivers/crypto/caam/jr.c   |  42 ++
 drivers/crypto/caam/jr.h   |   2 +
 drivers/dma/Kconfig|  17 +
 drivers/dma/Makefile   |   1 +
 drivers/dma/caam_dma.c | 444 +
 8 files changed, 536 insertions(+)
 create mode 100644 drivers/dma/caam_dma.c

-- 
2.14.2.606.g7451fcd



  1   2   3   4   >