[Patch v3 7/7] crypto: qce: aead: Schedule fallback algorithm

2021-04-19 Thread Thara Gopinath
Qualcomm crypto engine does not handle the following scenarios and
will issue an abort. In such cases, pass on the transformation to
a fallback algorithm.

- DES3 algorithms with all three keys same.
- AES192 algorithms.
- 0 length messages.

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Updated crypto_aead_set_reqsize to include the size of fallback
  request as well.

 drivers/crypto/qce/aead.c | 64 ---
 drivers/crypto/qce/aead.h |  3 ++
 2 files changed, 56 insertions(+), 11 deletions(-)

diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index ef66ae21eae3..6d06a19b48e4 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -512,7 +512,23 @@ static int qce_aead_crypt(struct aead_request *req, int 
encrypt)
/* CE does not handle 0 length messages */
if (!rctx->cryptlen) {
if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
-   return -EINVAL;
+   ctx->need_fallback = true;
+   }
+
+   /* If fallback is needed, schedule and exit */
+   if (ctx->need_fallback) {
+   /* Reset need_fallback in case the same ctx is used for another 
transaction */
+   ctx->need_fallback = false;
+
+   aead_request_set_tfm(>fallback_req, ctx->fallback);
+   aead_request_set_callback(>fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+   aead_request_set_crypt(>fallback_req, req->src,
+  req->dst, req->cryptlen, req->iv);
+   aead_request_set_ad(>fallback_req, req->assoclen);
+
+   return encrypt ? crypto_aead_encrypt(>fallback_req) :
+crypto_aead_decrypt(>fallback_req);
}
 
/*
@@ -553,7 +569,7 @@ static int qce_aead_ccm_setkey(struct crypto_aead *tfm, 
const u8 *key,
memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
}
 
-   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256)
+   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != 
AES_KEYSIZE_192)
return -EINVAL;
 
ctx->enc_keylen = keylen;
@@ -562,7 +578,12 @@ static int qce_aead_ccm_setkey(struct crypto_aead *tfm, 
const u8 *key,
memcpy(ctx->enc_key, key, keylen);
memcpy(ctx->auth_key, key, keylen);
 
-   return 0;
+   if (keylen == AES_KEYSIZE_192)
+   ctx->need_fallback = true;
+
+   return IS_CCM_RFC4309(flags) ?
+   crypto_aead_setkey(ctx->fallback, key, keylen + 
QCE_CCM4309_SALT_SIZE) :
+   crypto_aead_setkey(ctx->fallback, key, keylen);
 }
 
 static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned 
int keylen)
@@ -593,20 +614,21 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const 
u8 *key, unsigned int
 * The crypto engine does not support any two keys
 * being the same for triple des algorithms. The
 * verify_skcipher_des3_key does not check for all the
-* below conditions. Return -EINVAL in case any two keys
-* are the same. Revisit to see if a fallback cipher
-* is needed to handle this condition.
+* below conditions. Schedule fallback in this case.
 */
memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
!((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
!((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
-   return -EINVAL;
+   ctx->need_fallback = true;
} else if (IS_AES(flags)) {
/* No random key sizes */
if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
+   authenc_keys.enckeylen != AES_KEYSIZE_192 &&
authenc_keys.enckeylen != AES_KEYSIZE_256)
return -EINVAL;
+   if (authenc_keys.enckeylen == AES_KEYSIZE_192)
+   ctx->need_fallback = true;
}
 
ctx->enc_keylen = authenc_keys.enckeylen;
@@ -617,7 +639,7 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const 
u8 *key, unsigned int
memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
 
-   return 0;
+   return crypto_aead_setkey(ctx->fallback, key, keylen);
 }
 
 static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -632,15 +654,33 @@ static int qce_aead_setauthsize(struct crypto_aead *tfm, 
unsigned int authsize)
  

[Patch v3 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-19 Thread Thara Gopinath
Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---

v2->v3:
- Made qce_be32_to_cpu_array truly be32 to cpu endian by using 
be32_to_cpup
  instead of cpu_to_be32p. Also remove the (u32 *) typcasting of arrays 
obtained
  as output from qce_be32_to_cpu_array as per Bjorn's review comments.
- Wrapped newly introduced std_iv_sha1, std_iv_sha256 and 
qce_be32_to_cpu_array
  in CONFIG_CRYPTO_DEV_QCE_AEAD to prevent W1 warnings as reported by 
kernel
  test robot .

v1->v2:
- Minor fixes like removing not needed initializing of variables
  and using bool values in lieu of 0 and 1 as pointed out by Bjorn.
- Introduced qce_be32_to_cpu_array which converts the u8 string in big
  endian order to array of u32 and returns back total number of words,
  as per Bjorn's review comments. Presently this function is used only 
by
  qce_setup_regs_aead to format keys, iv and nonce. cipher and hash 
  algorithms can be made to use this function as a separate clean up 
patch.

 drivers/crypto/qce/common.c | 162 +++-
 1 file changed, 160 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7b3d6caec1b2..6d6b3792323b 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,7 @@
 #include "core.h"
 #include "regs-v5.h"
 #include "sha.h"
+#include "aead.h"
 
 static inline u32 qce_read(struct qce_device *qce, u32 offset)
 {
@@ -96,7 +97,7 @@ static inline void qce_crypto_go(struct qce_device *qce, bool 
result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
 }
 
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
 static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
 {
u32 cfg = 0;
@@ -139,7 +140,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
 
return cfg;
 }
+#endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
 static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +228,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
 }
 #endif
 
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
 static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
 {
u32 cfg = 0;
@@ -271,7 +274,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
 
return cfg;
 }
+#endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
 static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
 {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +391,155 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
 }
 #endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned 
int len)
+{
+   u32 *d = dst;
+   const u8 *s = src;
+   unsigned int n;
+
+   n = len / sizeof(u32);
+   for (; n > 0; n--) {
+   *d = be32_to_cpup((const __be32 *)s);
+   s += sizeof(u32);
+   d++;
+   }
+   return DIV_ROUND_UP(len, sizeof(u32));
+}
+
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+   u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
+   u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
+   u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
+   u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg, auth_cfg, config, totallen;
+   u32 iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key *

[Patch v3 5/7] crypto: qce: common: Clean up qce_auth_cfg

2021-04-19 Thread Thara Gopinath
Remove various redundant checks in qce_auth_cfg. Also allow qce_auth_cfg
to take auth_size as a parameter which is a required setting for ccm(aes)
algorithms

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 21 +
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7b5bc5a6ae81..7b3d6caec1b2 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -97,11 +97,11 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
 }
 
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
-static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
 {
u32 cfg = 0;
 
-   if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+   if (IS_CCM(flags) || IS_CMAC(flags))
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
else
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
@@ -119,15 +119,16 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+   else if (IS_CCM(flags))
+   cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
 
if (IS_SHA1(flags) || IS_SHA256(flags))
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
-   else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
-IS_CBC(flags) || IS_CTR(flags))
+   else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
-   else if (IS_AES(flags) && IS_CCM(flags))
+   else if (IS_CCM(flags))
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
-   else if (IS_AES(flags) && IS_CMAC(flags))
+   else if (IS_CMAC(flags))
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
 
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
@@ -136,10 +137,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
if (IS_CCM(flags))
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
 
-   if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
-   IS_CMAC(flags))
-   cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
-
return cfg;
 }
 
@@ -171,7 +168,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
qce_clear_array(qce, REG_AUTH_KEY0, 16);
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
 
-   auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+   auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, 
digestsize);
}
 
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
@@ -199,7 +196,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
qce_write_array(qce, REG_AUTH_BYTECNT0,
(u32 *)rctx->byte_count, 2);
 
-   auth_cfg = qce_auth_cfg(rctx->flags, 0);
+   auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
 
if (rctx->last_blk)
auth_cfg |= BIT(AUTH_LAST_SHIFT);
-- 
2.25.1



[Patch v3 2/7] crypto: qce: common: Make result dump optional

2021-04-19 Thread Thara Gopinath
Qualcomm crypto engine allows for IV registers and status register
to be concatenated to the output. This option is enabled by setting the
RESULTS_DUMP field in GOPROC  register. This is useful for most of the
algorithms to either retrieve status of operation or in case of
authentication algorithms to retrieve the mac. But for ccm
algorithms, the mac is part of the output stream and not retrieved
from the IV registers, thus needing a separate buffer to retrieve it.
Make enabling RESULTS_DUMP field optional so that algorithms can choose
whether or not to enable the option.
Note that in this patch, the enabled algorithms always choose
RESULTS_DUMP to be enabled. But later with the introduction of ccm
algorithms, this changes.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dd76175d5c62..7b5bc5a6ae81 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -88,9 +88,12 @@ static void qce_setup_config(struct qce_device *qce)
qce_write(qce, REG_CONFIG, config);
 }
 
-static inline void qce_crypto_go(struct qce_device *qce)
+static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
 {
-   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+   if (result_dump)
+   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | 
BIT(RESULTS_DUMP_SHIFT));
+   else
+   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
 }
 
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
@@ -219,7 +222,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
 
-   qce_crypto_go(qce);
+   qce_crypto_go(qce, true);
 
return 0;
 }
@@ -380,7 +383,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
 
-   qce_crypto_go(qce);
+   qce_crypto_go(qce, true);
 
return 0;
 }
-- 
2.25.1



[Patch v3 4/7] crypto: qce: Add support for AEAD algorithms

2021-04-19 Thread Thara Gopinath
Introduce support to enable following algorithms in Qualcomm Crypto Engine.

- authenc(hmac(sha1),cbc(des))
- authenc(hmac(sha1),cbc(des3_ede))
- authenc(hmac(sha256),cbc(des))
- authenc(hmac(sha256),cbc(des3_ede))
- authenc(hmac(sha256),cbc(aes))
- ccm(aes)
- rfc4309(ccm(aes))

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Removed spurious totallen from qce_aead_async_req_handle since
  it was unused as pointed out by Hubert.
- Updated qce_dma_prep_sgs to use #nents returned by dma_map_sg rather 
than
  using precomputed #nents.


 drivers/crypto/Kconfig  |  15 +
 drivers/crypto/qce/Makefile |   1 +
 drivers/crypto/qce/aead.c   | 799 
 drivers/crypto/qce/aead.h   |  53 +++
 drivers/crypto/qce/common.h |   2 +
 drivers/crypto/qce/core.c   |   4 +
 6 files changed, 874 insertions(+)
 create mode 100644 drivers/crypto/qce/aead.c
 create mode 100644 drivers/crypto/qce/aead.h

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9a4c275a1335..1fe5b7eafc02 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -627,6 +627,12 @@ config CRYPTO_DEV_QCE_SHA
select CRYPTO_SHA1
select CRYPTO_SHA256
 
+config CRYPTO_DEV_QCE_AEAD
+   bool
+   depends on CRYPTO_DEV_QCE
+   select CRYPTO_AUTHENC
+   select CRYPTO_LIB_DES
+
 choice
prompt "Algorithms enabled for QCE acceleration"
default CRYPTO_DEV_QCE_ENABLE_ALL
@@ -647,6 +653,7 @@ choice
bool "All supported algorithms"
select CRYPTO_DEV_QCE_SKCIPHER
select CRYPTO_DEV_QCE_SHA
+   select CRYPTO_DEV_QCE_AEAD
help
  Enable all supported algorithms:
- AES (CBC, CTR, ECB, XTS)
@@ -672,6 +679,14 @@ choice
- SHA1, HMAC-SHA1
- SHA256, HMAC-SHA256
 
+   config CRYPTO_DEV_QCE_ENABLE_AEAD
+   bool "AEAD algorithms only"
+   select CRYPTO_DEV_QCE_AEAD
+   help
+ Enable AEAD algorithms only:
+   - authenc()
+   - ccm(aes)
+   - rfc4309(ccm(aes))
 endchoice
 
 config CRYPTO_DEV_QCE_SW_MAX_LEN
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 14ade8a7d664..2cf8984e1b85 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -6,3 +6,4 @@ qcrypto-objs := core.o \
 
 qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
 qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_AEAD) += aead.o
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
new file mode 100644
index ..ef66ae21eae3
--- /dev/null
+++ b/drivers/crypto/qce/aead.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "aead.h"
+
+#define CCM_NONCE_ADATA_SHIFT  6
+#define CCM_NONCE_AUTHSIZE_SHIFT   3
+#define MAX_CCM_ADATA_HEADER_LEN6
+
+static LIST_HEAD(aead_algs);
+
+static void qce_aead_done(void *data)
+{
+   struct crypto_async_request *async_req = data;
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   struct qce_result_dump *result_buf = qce->dma.result_buf;
+   enum dma_data_direction dir_src, dir_dst;
+   bool diff_dst;
+   int error;
+   u32 status;
+   unsigned int totallen;
+   unsigned char tag[SHA256_DIGEST_SIZE] = {0};
+   int ret = 0;
+
+   diff_dst = (req->src != req->dst) ? true : false;
+   dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+   dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+   error = qce_dma_terminate_all(>dma);
+   if (error)
+   dev_dbg(qce->dev, "aead dma termination error (%d)\n",
+   error);
+   if (diff_dst)
+   dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+
+   dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+   if (IS_CCM(rctx->flags)) {
+   if (req->assoclen) {
+   sg_free_table(>src_tbl);
+   if (diff_dst)
+   sg_free_table(>dst_tbl);
+   } else {
+   if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
+   sg_free_table(>dst_tbl);
+   }
+   } else {
+  

[Patch v3 3/7] crypto: qce: Add mode for rfc4309

2021-04-19 Thread Thara Gopinath
rf4309 is the specification that uses aes ccm algorithms with IPsec
security packets. Add a submode to identify rfc4309 ccm(aes) algorithm
in the crypto driver.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---

v1->v2:
- Moved up the QCE_ENCRYPT AND QCE_DECRYPT bit positions so that
  addition of other algorithms in future will not affect these
  macros.

 drivers/crypto/qce/common.h | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 3bc244bcca2d..b135440bf72b 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -51,9 +51,11 @@
 #define QCE_MODE_CCM   BIT(12)
 #define QCE_MODE_MASK  GENMASK(12, 8)
 
+#define QCE_MODE_CCM_RFC4309   BIT(13)
+
 /* cipher encryption/decryption operations */
-#define QCE_ENCRYPTBIT(13)
-#define QCE_DECRYPTBIT(14)
+#define QCE_ENCRYPTBIT(30)
+#define QCE_DECRYPTBIT(31)
 
 #define IS_DES(flags)  (flags & QCE_ALG_DES)
 #define IS_3DES(flags) (flags & QCE_ALG_3DES)
@@ -73,6 +75,7 @@
 #define IS_CTR(mode)   (mode & QCE_MODE_CTR)
 #define IS_XTS(mode)   (mode & QCE_MODE_XTS)
 #define IS_CCM(mode)   (mode & QCE_MODE_CCM)
+#define IS_CCM_RFC4309(mode)   ((mode) & QCE_MODE_CCM_RFC4309)
 
 #define IS_ENCRYPT(dir)(dir & QCE_ENCRYPT)
 #define IS_DECRYPT(dir)(dir & QCE_DECRYPT)
-- 
2.25.1



[Patch v3 0/7] Add support for AEAD algorithms in Qualcomm Crypto Engine driver

2021-04-19 Thread Thara Gopinath
Enable support for AEAD algorithms in Qualcomm CE driver.  The first three
patches in this series are cleanups and add a few missing pieces required
to add support for AEAD algorithms.  Patch 4 introduces supported AEAD
transformations on Qualcomm CE.  Patches 5 and 6 implements the h/w
infrastructure needed to enable and run the AEAD transformations on
Qualcomm CE.  Patch 7 adds support to queue fallback algorithms in case of
unsupported special inputs.

This patch series has been tested with in kernel crypto testing module
tcrypt.ko with fuzz tests enabled as well.

Thara Gopinath (7):
  crypto: qce: common: Add MAC failed error checking
  crypto: qce: common: Make result dump optional
  crypto: qce: Add mode for rfc4309
  crypto: qce: Add support for AEAD algorithms
  crypto: qce: common: Clean up qce_auth_cfg
  crypto: qce: common: Add support for AEAD algorithms
  crypto: qce: aead: Schedule fallback algorithm

 drivers/crypto/Kconfig  |  15 +
 drivers/crypto/qce/Makefile |   1 +
 drivers/crypto/qce/aead.c   | 841 
 drivers/crypto/qce/aead.h   |  56 +++
 drivers/crypto/qce/common.c | 196 -
 drivers/crypto/qce/common.h |   9 +-
 drivers/crypto/qce/core.c   |   4 +
 7 files changed, 1102 insertions(+), 20 deletions(-)
 create mode 100644 drivers/crypto/qce/aead.c
 create mode 100644 drivers/crypto/qce/aead.h

-- 
2.25.1



[Patch v3 1/7] crypto: qce: common: Add MAC failed error checking

2021-04-19 Thread Thara Gopinath
MAC_FAILED gets set in the status register if authenthication fails
for ccm algorithms(during decryption). Add support to catch and flag
this error.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---

v1->v2:
- Split the error checking for -ENXIO and -EBADMSG into if-else clause
  so that the code is more readable as per Bjorn's review comment.

 drivers/crypto/qce/common.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dceb9579d87a..dd76175d5c62 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -419,6 +419,8 @@ int qce_check_status(struct qce_device *qce, u32 *status)
 */
if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
ret = -ENXIO;
+   else if (*status & BIT(MAC_FAILED_SHIFT))
+   ret = -EBADMSG;
 
return ret;
 }
-- 
2.25.1



Re: [PATCH 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-17 Thread Thara Gopinath




On 4/13/21 7:09 PM, Bjorn Andersson wrote:

On Tue 13 Apr 17:44 CDT 2021, Thara Gopinath wrote:


[..]



Yes, given that you just typecast things as you do it should just work
to move the typecast to the qce_cpu_to_be32p_array().

But as I said, this would indicate that what is cpu_to_be32() should
have been be32_to_cpu() (both performs the same swap, it's just a matter
of what type goes in and what type goes out).

Looking at the other uses of qce_cpu_to_be32p_array() I suspect that
it's the same situation there, so perhaps introduce a new function
qce_be32_to_cpu() in this patchset (that returns number of words
converted) and then look into the existing users after that?


Hi!

I have sent out the v2 with the new function. To me, it does look 
cleaner. So thanks!




[..]

+   if (IS_SHA_HMAC(rctx->flags)) {
+   /* Write default authentication iv */
+   if (IS_SHA1_HMAC(rctx->flags)) {
+   auth_ivsize = SHA1_DIGEST_SIZE;
+   memcpy(authiv, std_iv_sha1, auth_ivsize);
+   } else if (IS_SHA256_HMAC(rctx->flags)) {
+   auth_ivsize = SHA256_DIGEST_SIZE;
+   memcpy(authiv, std_iv_sha256, auth_ivsize);
+   }
+   authiv_words = auth_ivsize / sizeof(u32);
+   qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words);


AUTH_IV0 is affected by the little endian configuration, does this imply
that IS_SHA_HMAC() and IS_CCM() are exclusive bits of rctx->flags? If so
I think it would be nice if you grouped the conditionals in a way that
made that obvious when reading the function.


So yes IS_SHA_HMAC() and IS_CCM() are exclusive bits of rctx->flags.
AUTH_IVn is 0 for ccm and has initial value for HMAC algorithms. I don't
understand the confusion here.



I'm just saying that writing is as below would have made it obvious to
me that IS_SHA_HMAC() and IS_CCM() are exclusive:


So regardless of the mode, it is a good idea  to clear the IV  registers
which happens above in

qce_clear_array(qce, REG_AUTH_IV0, 16);


This is important becasue the size of IV varies between HMAC(SHA1) and
HMAC(SHA256) and we don't want any previous bits sticking around.
For CCM after the above step we don't do anything with AUTH_IV where as for
SHA_HMAC we have to go and program the registers. I can split it into
if (IS_SHA_HMAC(flags)) {
...
} else {
...
}

but both snippets will have the above line code clearing the IV register and
the if part will have more stuff actually programming these registers.. Is
that what you are looking for ?


I didn't find an answer quickly to the question if the two where
mutually exclusive and couldn't determine from the code flow either. But
my comment seems to stem from my misunderstanding that the little endian
bit was dependent on IS_CCM().

That said, if the logic really is "do this for IS_SHA_HMAC(), otherwise
do that", then if else makes sense.


So, the logic is really. do "clearing of IV" in all cases. Do setting of 
initial IV only for HMAC. I tried the if..else like you said. It did not 
look correct to duplicate the code clearing the IV. So I have added 
comments around this section hopefully making it clearer. Do take a look 
and let me know. I can rework it further if you think so.




Regards,
Bjorn



--
Warm Regards
Thara


[Patch v2 5/7] crypto: qce: common: Clean up qce_auth_cfg

2021-04-17 Thread Thara Gopinath
Remove various redundant checks in qce_auth_cfg. Also allow qce_auth_cfg
to take auth_size as a parameter which is a required setting for ccm(aes)
algorithms

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 21 +
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7b5bc5a6ae81..7b3d6caec1b2 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -97,11 +97,11 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
 }
 
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
-static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
 {
u32 cfg = 0;
 
-   if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+   if (IS_CCM(flags) || IS_CMAC(flags))
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
else
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
@@ -119,15 +119,16 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+   else if (IS_CCM(flags))
+   cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
 
if (IS_SHA1(flags) || IS_SHA256(flags))
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
-   else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
-IS_CBC(flags) || IS_CTR(flags))
+   else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
-   else if (IS_AES(flags) && IS_CCM(flags))
+   else if (IS_CCM(flags))
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
-   else if (IS_AES(flags) && IS_CMAC(flags))
+   else if (IS_CMAC(flags))
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
 
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
@@ -136,10 +137,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
if (IS_CCM(flags))
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
 
-   if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
-   IS_CMAC(flags))
-   cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
-
return cfg;
 }
 
@@ -171,7 +168,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
qce_clear_array(qce, REG_AUTH_KEY0, 16);
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
 
-   auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+   auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, 
digestsize);
}
 
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
@@ -199,7 +196,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
qce_write_array(qce, REG_AUTH_BYTECNT0,
(u32 *)rctx->byte_count, 2);
 
-   auth_cfg = qce_auth_cfg(rctx->flags, 0);
+   auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
 
if (rctx->last_blk)
auth_cfg |= BIT(AUTH_LAST_SHIFT);
-- 
2.25.1



[Patch v2 7/7] crypto: qce: aead: Schedule fallback algorithm

2021-04-17 Thread Thara Gopinath
Qualcomm crypto engine does not handle the following scenarios and
will issue an abort. In such cases, pass on the transformation to
a fallback algorithm.

- DES3 algorithms with all three keys same.
- AES192 algorithms.
- 0 length messages.

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Updated crypto_aead_set_reqsize to include the size of fallback
  request as well.

 drivers/crypto/qce/aead.c | 64 ---
 drivers/crypto/qce/aead.h |  3 ++
 2 files changed, 56 insertions(+), 11 deletions(-)

diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index ef66ae21eae3..6d06a19b48e4 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -512,7 +512,23 @@ static int qce_aead_crypt(struct aead_request *req, int 
encrypt)
/* CE does not handle 0 length messages */
if (!rctx->cryptlen) {
if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
-   return -EINVAL;
+   ctx->need_fallback = true;
+   }
+
+   /* If fallback is needed, schedule and exit */
+   if (ctx->need_fallback) {
+   /* Reset need_fallback in case the same ctx is used for another 
transaction */
+   ctx->need_fallback = false;
+
+   aead_request_set_tfm(>fallback_req, ctx->fallback);
+   aead_request_set_callback(>fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+   aead_request_set_crypt(>fallback_req, req->src,
+  req->dst, req->cryptlen, req->iv);
+   aead_request_set_ad(>fallback_req, req->assoclen);
+
+   return encrypt ? crypto_aead_encrypt(>fallback_req) :
+crypto_aead_decrypt(>fallback_req);
}
 
/*
@@ -553,7 +569,7 @@ static int qce_aead_ccm_setkey(struct crypto_aead *tfm, 
const u8 *key,
memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
}
 
-   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256)
+   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != 
AES_KEYSIZE_192)
return -EINVAL;
 
ctx->enc_keylen = keylen;
@@ -562,7 +578,12 @@ static int qce_aead_ccm_setkey(struct crypto_aead *tfm, 
const u8 *key,
memcpy(ctx->enc_key, key, keylen);
memcpy(ctx->auth_key, key, keylen);
 
-   return 0;
+   if (keylen == AES_KEYSIZE_192)
+   ctx->need_fallback = true;
+
+   return IS_CCM_RFC4309(flags) ?
+   crypto_aead_setkey(ctx->fallback, key, keylen + 
QCE_CCM4309_SALT_SIZE) :
+   crypto_aead_setkey(ctx->fallback, key, keylen);
 }
 
 static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned 
int keylen)
@@ -593,20 +614,21 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const 
u8 *key, unsigned int
 * The crypto engine does not support any two keys
 * being the same for triple des algorithms. The
 * verify_skcipher_des3_key does not check for all the
-* below conditions. Return -EINVAL in case any two keys
-* are the same. Revisit to see if a fallback cipher
-* is needed to handle this condition.
+* below conditions. Schedule fallback in this case.
 */
memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
!((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
!((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
-   return -EINVAL;
+   ctx->need_fallback = true;
} else if (IS_AES(flags)) {
/* No random key sizes */
if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
+   authenc_keys.enckeylen != AES_KEYSIZE_192 &&
authenc_keys.enckeylen != AES_KEYSIZE_256)
return -EINVAL;
+   if (authenc_keys.enckeylen == AES_KEYSIZE_192)
+   ctx->need_fallback = true;
}
 
ctx->enc_keylen = authenc_keys.enckeylen;
@@ -617,7 +639,7 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const 
u8 *key, unsigned int
memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
 
-   return 0;
+   return crypto_aead_setkey(ctx->fallback, key, keylen);
 }
 
 static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -632,15 +654,33 @@ static int qce_aead_setauthsize(struct crypto_aead *tfm, 
unsigned int authsize)
  

[Patch v2 4/7] crypto: qce: Add support for AEAD algorithms

2021-04-17 Thread Thara Gopinath
Introduce support to enable following algorithms in Qualcomm Crypto Engine.

- authenc(hmac(sha1),cbc(des))
- authenc(hmac(sha1),cbc(des3_ede))
- authenc(hmac(sha256),cbc(des))
- authenc(hmac(sha256),cbc(des3_ede))
- authenc(hmac(sha256),cbc(aes))
- ccm(aes)
- rfc4309(ccm(aes))

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Removed spurious totallen from qce_aead_async_req_handle since
  it was unused as pointed out by Hubert.
- Updated qce_dma_prep_sgs to use #nents returned by dma_map_sg rather 
than
  using precomputed #nents.

 drivers/crypto/Kconfig  |  15 +
 drivers/crypto/qce/Makefile |   1 +
 drivers/crypto/qce/aead.c   | 799 
 drivers/crypto/qce/aead.h   |  53 +++
 drivers/crypto/qce/common.h |   2 +
 drivers/crypto/qce/core.c   |   4 +
 6 files changed, 874 insertions(+)
 create mode 100644 drivers/crypto/qce/aead.c
 create mode 100644 drivers/crypto/qce/aead.h

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9a4c275a1335..1fe5b7eafc02 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -627,6 +627,12 @@ config CRYPTO_DEV_QCE_SHA
select CRYPTO_SHA1
select CRYPTO_SHA256
 
+config CRYPTO_DEV_QCE_AEAD
+   bool
+   depends on CRYPTO_DEV_QCE
+   select CRYPTO_AUTHENC
+   select CRYPTO_LIB_DES
+
 choice
prompt "Algorithms enabled for QCE acceleration"
default CRYPTO_DEV_QCE_ENABLE_ALL
@@ -647,6 +653,7 @@ choice
bool "All supported algorithms"
select CRYPTO_DEV_QCE_SKCIPHER
select CRYPTO_DEV_QCE_SHA
+   select CRYPTO_DEV_QCE_AEAD
help
  Enable all supported algorithms:
- AES (CBC, CTR, ECB, XTS)
@@ -672,6 +679,14 @@ choice
- SHA1, HMAC-SHA1
- SHA256, HMAC-SHA256
 
+   config CRYPTO_DEV_QCE_ENABLE_AEAD
+   bool "AEAD algorithms only"
+   select CRYPTO_DEV_QCE_AEAD
+   help
+ Enable AEAD algorithms only:
+   - authenc()
+   - ccm(aes)
+   - rfc4309(ccm(aes))
 endchoice
 
 config CRYPTO_DEV_QCE_SW_MAX_LEN
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 14ade8a7d664..2cf8984e1b85 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -6,3 +6,4 @@ qcrypto-objs := core.o \
 
 qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
 qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_AEAD) += aead.o
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
new file mode 100644
index ..ef66ae21eae3
--- /dev/null
+++ b/drivers/crypto/qce/aead.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "aead.h"
+
+#define CCM_NONCE_ADATA_SHIFT  6
+#define CCM_NONCE_AUTHSIZE_SHIFT   3
+#define MAX_CCM_ADATA_HEADER_LEN6
+
+static LIST_HEAD(aead_algs);
+
+static void qce_aead_done(void *data)
+{
+   struct crypto_async_request *async_req = data;
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   struct qce_result_dump *result_buf = qce->dma.result_buf;
+   enum dma_data_direction dir_src, dir_dst;
+   bool diff_dst;
+   int error;
+   u32 status;
+   unsigned int totallen;
+   unsigned char tag[SHA256_DIGEST_SIZE] = {0};
+   int ret = 0;
+
+   diff_dst = (req->src != req->dst) ? true : false;
+   dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+   dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+   error = qce_dma_terminate_all(>dma);
+   if (error)
+   dev_dbg(qce->dev, "aead dma termination error (%d)\n",
+   error);
+   if (diff_dst)
+   dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+
+   dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+   if (IS_CCM(rctx->flags)) {
+   if (req->assoclen) {
+   sg_free_table(>src_tbl);
+   if (diff_dst)
+   sg_free_table(>dst_tbl);
+   } else {
+   if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
+   sg_free_table(>dst_tbl);
+   }
+   } else {
+  

[Patch v2 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-17 Thread Thara Gopinath
Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Minor fixes like removing not needed initializing of variables
  and using bool values in lieu of 0 and 1 as pointed out by Bjorn.
- Introduced qce_be32_to_cpu_array which converts the u8 string in big
  endian order to array of u32 and returns back total number of words,
  as per Bjorn's review comments. Presently this function is used only 
by
  qce_setup_regs_aead to format keys, iv and nonce. cipher and hash 
  algorithms can be made to use this function as a separate clean up 
patch.

 drivers/crypto/qce/common.c | 164 +++-
 1 file changed, 162 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7b3d6caec1b2..ffbf866842a3 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,16 @@
 #include "core.h"
 #include "regs-v5.h"
 #include "sha.h"
+#include "aead.h"
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
 
 static inline u32 qce_read(struct qce_device *qce, u32 offset)
 {
@@ -76,6 +86,21 @@ void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, 
unsigned int len)
}
 }
 
+static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned 
int len)
+{
+   __be32 *d = (__be32 *)dst;
+   const u8 *s = src;
+   unsigned int n;
+
+   n = len / sizeof(u32);
+   for (; n > 0; n--) {
+   *d = cpu_to_be32p((const __u32 *)s);
+   s += sizeof(u32);
+   d++;
+   }
+   return DIV_ROUND_UP(len, sizeof(u32));
+}
+
 static void qce_setup_config(struct qce_device *qce)
 {
u32 config;
@@ -96,7 +121,7 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
 }
 
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
 static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
 {
u32 cfg = 0;
@@ -139,7 +164,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
 
return cfg;
 }
+#endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
 static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +252,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
 }
 #endif
 
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
 static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
 {
u32 cfg = 0;
@@ -271,7 +298,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
 
return cfg;
 }
+#endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
 static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
 {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +415,133 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
 }
 #endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+   u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
+   u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
+   u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
+   u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg, auth_cfg, config, totallen;
+   u32 *iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key */
+   enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen);
+   qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+   /* Write encryption iv */
+   enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv,

[Patch v2 3/7] crypto: qce: Add mode for rfc4309

2021-04-17 Thread Thara Gopinath
rf4309 is the specification that uses aes ccm algorithms with IPsec
security packets. Add a submode to identify rfc4309 ccm(aes) algorithm
in the crypto driver.

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Moved up the QCE_ENCRYPT AND QCE_DECRYPT bit positions so that
  addition of other algorithms in future will not affect these
  macros.

 drivers/crypto/qce/common.h | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 3bc244bcca2d..b135440bf72b 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -51,9 +51,11 @@
 #define QCE_MODE_CCM   BIT(12)
 #define QCE_MODE_MASK  GENMASK(12, 8)
 
+#define QCE_MODE_CCM_RFC4309   BIT(13)
+
 /* cipher encryption/decryption operations */
-#define QCE_ENCRYPTBIT(13)
-#define QCE_DECRYPTBIT(14)
+#define QCE_ENCRYPTBIT(30)
+#define QCE_DECRYPTBIT(31)
 
 #define IS_DES(flags)  (flags & QCE_ALG_DES)
 #define IS_3DES(flags) (flags & QCE_ALG_3DES)
@@ -73,6 +75,7 @@
 #define IS_CTR(mode)   (mode & QCE_MODE_CTR)
 #define IS_XTS(mode)   (mode & QCE_MODE_XTS)
 #define IS_CCM(mode)   (mode & QCE_MODE_CCM)
+#define IS_CCM_RFC4309(mode)   ((mode) & QCE_MODE_CCM_RFC4309)
 
 #define IS_ENCRYPT(dir)(dir & QCE_ENCRYPT)
 #define IS_DECRYPT(dir)(dir & QCE_DECRYPT)
-- 
2.25.1



[Patch v2 2/7] crypto: qce: common: Make result dump optional

2021-04-17 Thread Thara Gopinath
Qualcomm crypto engine allows for IV registers and status register
to be concatenated to the output. This option is enabled by setting the
RESULTS_DUMP field in GOPROC  register. This is useful for most of the
algorithms to either retrieve status of operation or in case of
authentication algorithms to retrieve the mac. But for ccm
algorithms, the mac is part of the output stream and not retrieved
from the IV registers, thus needing a separate buffer to retrieve it.
Make enabling RESULTS_DUMP field optional so that algorithms can choose
whether or not to enable the option.
Note that in this patch, the enabled algorithms always choose
RESULTS_DUMP to be enabled. But later with the introduction of ccm
algorithms, this changes.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dd76175d5c62..7b5bc5a6ae81 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -88,9 +88,12 @@ static void qce_setup_config(struct qce_device *qce)
qce_write(qce, REG_CONFIG, config);
 }
 
-static inline void qce_crypto_go(struct qce_device *qce)
+static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
 {
-   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+   if (result_dump)
+   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | 
BIT(RESULTS_DUMP_SHIFT));
+   else
+   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
 }
 
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
@@ -219,7 +222,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
 
-   qce_crypto_go(qce);
+   qce_crypto_go(qce, true);
 
return 0;
 }
@@ -380,7 +383,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
 
-   qce_crypto_go(qce);
+   qce_crypto_go(qce, true);
 
return 0;
 }
-- 
2.25.1



[Patch v2 1/7] crypto: qce: common: Add MAC failed error checking

2021-04-17 Thread Thara Gopinath
MAC_FAILED gets set in the status register if authenthication fails
for ccm algorithms(during decryption). Add support to catch and flag
this error.

Signed-off-by: Thara Gopinath 
---

v1->v2:
- Split the error checking for -ENXIO and -EBADMSG into if-else clause
  so that the code is more readable as per Bjorn's review comment.

 drivers/crypto/qce/common.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dceb9579d87a..dd76175d5c62 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -419,6 +419,8 @@ int qce_check_status(struct qce_device *qce, u32 *status)
 */
if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
ret = -ENXIO;
+   else if (*status & BIT(MAC_FAILED_SHIFT))
+   ret = -EBADMSG;
 
return ret;
 }
-- 
2.25.1



[Patch v2 0/7] Add support for AEAD algorithms in Qualcomm Crypto Engine driver

2021-04-17 Thread Thara Gopinath
Enable support for AEAD algorithms in Qualcomm CE driver.  The first three
patches in this series are cleanups and add a few missing pieces required
to add support for AEAD algorithms.  Patch 4 introduces supported AEAD
transformations on Qualcomm CE.  Patches 5 and 6 implements the h/w
infrastructure needed to enable and run the AEAD transformations on
Qualcomm CE.  Patch 7 adds support to queue fallback algorithms in case of
unsupported special inputs.

This patch series has been tested with in kernel crypto testing module
tcrypt.ko with fuzz tests enabled as well.

Thara Gopinath (7):
  crypto: qce: common: Add MAC failed error checking
  crypto: qce: common: Make result dump optional
  crypto: qce: Add mode for rfc4309
  crypto: qce: Add support for AEAD algorithms
  crypto: qce: common: Clean up qce_auth_cfg
  crypto: qce: common: Add support for AEAD algorithms
  crypto: qce: aead: Schedule fallback algorithm

 drivers/crypto/Kconfig  |  15 +
 drivers/crypto/qce/Makefile |   1 +
 drivers/crypto/qce/aead.c   | 841 
 drivers/crypto/qce/aead.h   |  56 +++
 drivers/crypto/qce/common.c | 198 -
 drivers/crypto/qce/common.h |   9 +-
 drivers/crypto/qce/core.c   |   4 +
 7 files changed, 1104 insertions(+), 20 deletions(-)
 create mode 100644 drivers/crypto/qce/aead.c
 create mode 100644 drivers/crypto/qce/aead.h

-- 
2.25.1



Re: [PATCH 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-13 Thread Thara Gopinath




On 4/13/21 6:33 PM, Bjorn Andersson wrote:

On Tue 13 Apr 17:27 CDT 2021, Thara Gopinath wrote:




On 4/5/21 6:18 PM, Bjorn Andersson wrote:

On Thu 25 Feb 12:27 CST 2021, Thara Gopinath wrote:


Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---
   drivers/crypto/qce/common.c | 155 +++-
   1 file changed, 153 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 05a71c5ecf61..54d209cb0525 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,16 @@
   #include "core.h"
   #include "regs-v5.h"
   #include "sha.h"
+#include "aead.h"
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
   static inline u32 qce_read(struct qce_device *qce, u32 offset)
   {
@@ -96,7 +106,7 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
   }
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
   static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
   {
u32 cfg = 0;
@@ -139,7 +149,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
return cfg;
   }
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
   static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
   {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +237,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
   }
   #endif
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
   static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
   {
u32 cfg = 0;
@@ -271,7 +283,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
return cfg;
   }
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
   static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
   {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +400,139 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
   }
   #endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+   __be32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 authiv[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+   __be32 authnonce[QCE_MAX_NONCE / sizeof(__be32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg = 0, auth_cfg = 0, config, totallen;


I don't see any reason to initialize encr_cfg or auth_cfg.


+   u32 *iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key */
+   qce_cpu_to_be32p_array(enckey, ctx->enc_key, enc_keylen);
+   enckey_words = enc_keylen / sizeof(u32);
+   qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);


Afaict all "array registers" in this function are affected by the
CRYPTO_SETUP little endian bit, but you set this bit before launching
the operation dependent on IS_CCM(). So is this really working for the
!IS_CCM() case?


+
+   /* Write encryption iv */
+   qce_cpu_to_be32p_array(enciv, rctx->iv, enc_ivsize);
+   enciv_words = enc_ivsize / sizeof(u32);
+   qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);


It would be nice if this snippet was extracted to a helper function.


I kind of forgot to type this earlier. So yes I agree in principle.
It is more elegant to have something like qce_convert_be32_and_write
and in the function do the above three steps. This snippet is prevalent in
this driver code across other alogs as well (skcipher and hash).


Perhaps make qce_cpu_to_be32p_array() (or qce_be32_to_cpu() per the
other reply), could ret

Re: [PATCH 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-13 Thread Thara Gopinath




On 4/13/21 6:20 PM, Bjorn Andersson wrote:

On Tue 13 Apr 16:31 CDT 2021, Thara Gopinath wrote:



Hi Bjorn,

On 4/5/21 6:18 PM, Bjorn Andersson wrote:

On Thu 25 Feb 12:27 CST 2021, Thara Gopinath wrote:


Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---
   drivers/crypto/qce/common.c | 155 +++-
   1 file changed, 153 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 05a71c5ecf61..54d209cb0525 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,16 @@
   #include "core.h"
   #include "regs-v5.h"
   #include "sha.h"
+#include "aead.h"
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
   static inline u32 qce_read(struct qce_device *qce, u32 offset)
   {
@@ -96,7 +106,7 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
   }
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
   static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
   {
u32 cfg = 0;
@@ -139,7 +149,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
return cfg;
   }
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
   static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
   {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +237,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
   }
   #endif
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
   static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
   {
u32 cfg = 0;
@@ -271,7 +283,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
return cfg;
   }
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
   static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
   {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +400,139 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
   }
   #endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+   __be32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 authiv[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+   __be32 authnonce[QCE_MAX_NONCE / sizeof(__be32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg = 0, auth_cfg = 0, config, totallen;


I don't see any reason to initialize encr_cfg or auth_cfg.


right.. I will remove it




+   u32 *iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key */
+   qce_cpu_to_be32p_array(enckey, ctx->enc_key, enc_keylen);
+   enckey_words = enc_keylen / sizeof(u32);
+   qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);


Afaict all "array registers" in this function are affected by the
CRYPTO_SETUP little endian bit, but you set this bit before launching
the operation dependent on IS_CCM(). So is this really working for the
!IS_CCM() case?


I am not sure I understand you. Below ,
/* get little endianness */
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);

is outside of any checks..



You're right, I misread that snippet as I was jumping through the
function. So we're unconditionally running the hardware in little endian
mode.




+
+   /* Write encryption iv */
+   qce_cpu_to_be32p_array(enciv, rctx->iv, enc_ivsize);
+   enciv_words = enc_ivsize / sizeof(u32);
+   qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);


It would be nice if this snippet was extracted to a helper fun

Re: [PATCH 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-13 Thread Thara Gopinath




On 4/5/21 6:18 PM, Bjorn Andersson wrote:

On Thu 25 Feb 12:27 CST 2021, Thara Gopinath wrote:


Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---
  drivers/crypto/qce/common.c | 155 +++-
  1 file changed, 153 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 05a71c5ecf61..54d209cb0525 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,16 @@
  #include "core.h"
  #include "regs-v5.h"
  #include "sha.h"
+#include "aead.h"
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
  
  static inline u32 qce_read(struct qce_device *qce, u32 offset)

  {
@@ -96,7 +106,7 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
  }
  
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA

+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
  static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
  {
u32 cfg = 0;
@@ -139,7 +149,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
  
  	return cfg;

  }
+#endif
  
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA

  static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
  {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +237,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
  }
  #endif
  
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER

+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
  static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
  {
u32 cfg = 0;
@@ -271,7 +283,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
  
  	return cfg;

  }
+#endif
  
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER

  static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
  {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +400,139 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
  }
  #endif
  
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD

+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+   __be32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 authiv[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+   __be32 authnonce[QCE_MAX_NONCE / sizeof(__be32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg = 0, auth_cfg = 0, config, totallen;


I don't see any reason to initialize encr_cfg or auth_cfg.


+   u32 *iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key */
+   qce_cpu_to_be32p_array(enckey, ctx->enc_key, enc_keylen);
+   enckey_words = enc_keylen / sizeof(u32);
+   qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);


Afaict all "array registers" in this function are affected by the
CRYPTO_SETUP little endian bit, but you set this bit before launching
the operation dependent on IS_CCM(). So is this really working for the
!IS_CCM() case?


+
+   /* Write encryption iv */
+   qce_cpu_to_be32p_array(enciv, rctx->iv, enc_ivsize);
+   enciv_words = enc_ivsize / sizeof(u32);
+   qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);


It would be nice if this snippet was extracted to a helper function.


I kind of forgot to type this earlier. So yes I agree in principle.
It is more elegant to have something like qce_convert_be32_and_write
and in the function do the above three steps. This snippet is prevalent 
in this driver code across other alogs as well (skcipher and hash).

Take it up as a separate clean up activity  ?

--
Warm Regards
Thara


Re: [PATCH 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-04-13 Thread Thara Gopinath



Hi Bjorn,

On 4/5/21 6:18 PM, Bjorn Andersson wrote:

On Thu 25 Feb 12:27 CST 2021, Thara Gopinath wrote:


Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---
  drivers/crypto/qce/common.c | 155 +++-
  1 file changed, 153 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 05a71c5ecf61..54d209cb0525 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,16 @@
  #include "core.h"
  #include "regs-v5.h"
  #include "sha.h"
+#include "aead.h"
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
  
  static inline u32 qce_read(struct qce_device *qce, u32 offset)

  {
@@ -96,7 +106,7 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
  }
  
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA

+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
  static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
  {
u32 cfg = 0;
@@ -139,7 +149,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
  
  	return cfg;

  }
+#endif
  
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA

  static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
  {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +237,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
  }
  #endif
  
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER

+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
  static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
  {
u32 cfg = 0;
@@ -271,7 +283,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
  
  	return cfg;

  }
+#endif
  
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER

  static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
  {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +400,139 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
  }
  #endif
  
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD

+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+   __be32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 authiv[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+   __be32 authnonce[QCE_MAX_NONCE / sizeof(__be32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg = 0, auth_cfg = 0, config, totallen;


I don't see any reason to initialize encr_cfg or auth_cfg.


right.. I will remove it




+   u32 *iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key */
+   qce_cpu_to_be32p_array(enckey, ctx->enc_key, enc_keylen);
+   enckey_words = enc_keylen / sizeof(u32);
+   qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);


Afaict all "array registers" in this function are affected by the
CRYPTO_SETUP little endian bit, but you set this bit before launching
the operation dependent on IS_CCM(). So is this really working for the
!IS_CCM() case?


I am not sure I understand you. Below ,
/* get little endianness */
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);

is outside of any checks..




+
+   /* Write encryption iv */
+   qce_cpu_to_be32p_array(enciv, rctx->iv, enc_ivsize);
+   enciv_words = enc_ivsize / sizeof(u32);
+   qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);


It would be nice if this snippet was extracted to a helper function.


+
+   if (IS_CCM(rctx->flags)) {
+   iv_last_word = (u32 *)[enciv_words - 1];
+// qce_write(qce, REG_CNTR3_IV3, enciv[enciv_words - 1] + 1);


I believe this is a remnant of the two surrounding lines.


It i

Re: [PATCH 3/7] crypto: qce: Add mode for rfc4309

2021-04-13 Thread Thara Gopinath




On 4/5/21 6:32 PM, Bjorn Andersson wrote:

On Thu 25 Feb 12:27 CST 2021, Thara Gopinath wrote:


rf4309 is the specification that uses aes ccm algorithms with IPsec
security packets. Add a submode to identify rfc4309 ccm(aes) algorithm
in the crypto driver.

Signed-off-by: Thara Gopinath 
---
  drivers/crypto/qce/common.h | 7 +--
  1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 3bc244bcca2d..3ffe719b79e4 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -51,9 +51,11 @@
  #define QCE_MODE_CCM  BIT(12)
  #define QCE_MODE_MASK GENMASK(12, 8)
  
+#define QCE_MODE_CCM_RFC4309		BIT(13)

+
  /* cipher encryption/decryption operations */
-#define QCE_ENCRYPTBIT(13)
-#define QCE_DECRYPTBIT(14)
+#define QCE_ENCRYPTBIT(14)
+#define QCE_DECRYPTBIT(15)


Can't we move these further up, so that next time we want to add
something it doesn't require that we also move the ENC/DEC bits?


Yes I will change it to BIT(30) and BIT(31)



  
  #define IS_DES(flags)			(flags & QCE_ALG_DES)

  #define IS_3DES(flags)(flags & QCE_ALG_3DES)
@@ -73,6 +75,7 @@
  #define IS_CTR(mode)  (mode & QCE_MODE_CTR)
  #define IS_XTS(mode)  (mode & QCE_MODE_XTS)
  #define IS_CCM(mode)  (mode & QCE_MODE_CCM)
+#define IS_CCM_RFC4309(mode)   ((mode) & QCE_MODE_CCM_RFC4309)


While leaving room for the typical macro issues, none of the other
macros wrap the argument in parenthesis. Please follow the style of the
driver, and perhaps follow up with a cleanup patch that just wraps them
all in parenthesis?


This does throw up a checkpatch warning if I don't wrap "mode" in 
parenthesis. How about I keep this for now and I will follow up with a 
clean up for rest of the macros later ?




Regards,
Bjorn

  
  #define IS_ENCRYPT(dir)			(dir & QCE_ENCRYPT)

  #define IS_DECRYPT(dir)   (dir & QCE_DECRYPT)
--
2.25.1



--
Warm Regards
Thara


Re: [PATCH 1/7] crypto: qce: common: Add MAC failed error checking

2021-04-13 Thread Thara Gopinath

Hi Bjorn,

Thanks for the reviews.
I realized that I had these replies in my draft for a while and forgot 
to send them!


On 4/5/21 1:36 PM, Bjorn Andersson wrote:

On Thu 25 Feb 12:27 CST 2021, Thara Gopinath wrote:


MAC_FAILED gets set in the status register if authenthication fails
for ccm algorithms(during decryption). Add support to catch and flag
this error.

Signed-off-by: Thara Gopinath 
---
  drivers/crypto/qce/common.c | 11 ---
  1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dceb9579d87a..7c3cb483749e 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -403,7 +403,8 @@ int qce_start(struct crypto_async_request *async_req, u32 
type)
  }
  
  #define STATUS_ERRORS	\

-   (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+   (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) |   \
+BIT(HSD_ERR_SHIFT) | BIT(MAC_FAILED_SHIFT))
  
  int qce_check_status(struct qce_device *qce, u32 *status)

  {
@@ -417,8 +418,12 @@ int qce_check_status(struct qce_device *qce, u32 *status)
 * use result_status from result dump the result_status needs to be byte
 * swapped, since we set the device to little endian.
 */
-   if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
-   ret = -ENXIO;
+   if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) {
+   if (*status & BIT(MAC_FAILED_SHIFT))


Afaict MAC_FAILED indicates a different category of errors from the
others. So I would prefer that the conditionals are flattened.

Is OPERATION_DONE set when MAC_FAILED?
Yes it is. I will change the check to the pattern you have suggested. It 
is less confusing..




If so:

if (errors || !done)
return -ENXIO;
else if (*status & BIT(MAC_FAILED))
return -EBADMSG;

Would be cleaner in my opinion.

Regards,
Bjorn


+   ret = -EBADMSG;
+   else
+   ret = -ENXIO;
+   }
  
  	return ret;

  }
--
2.25.1



--
Warm Regards
Thara


Re: [RESEND PATCH v4 2/2] thermal: qcom: tsens-v0_1: Add support for MDM9607

2021-03-21 Thread Thara Gopinath




On 3/19/21 6:08 PM, Konrad Dybcio wrote:

MDM9607 TSENS IP is very similar to the one of MSM8916, with
minor adjustments to various tuning values.

Signed-off-by: Konrad Dybcio 


Acked-by: Thara Gopinath 

Warm Regards
Thara

---
v4: Remove unneeded braces and newline

  drivers/thermal/qcom/tsens-v0_1.c | 98 ++-
  drivers/thermal/qcom/tsens.c  |  3 +
  drivers/thermal/qcom/tsens.h  |  2 +-
  3 files changed, 101 insertions(+), 2 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-v0_1.c 
b/drivers/thermal/qcom/tsens-v0_1.c
index 4ffa2e2c0145..f136cb350238 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -190,6 +190,39 @@
  
  #define BIT_APPEND		0x3
  
+/* eeprom layout data for mdm9607 */

+#define MDM9607_BASE0_MASK 0x00ff
+#define MDM9607_BASE1_MASK 0x000ff000
+#define MDM9607_BASE0_SHIFT0
+#define MDM9607_BASE1_SHIFT12
+
+#define MDM9607_S0_P1_MASK 0x3f00
+#define MDM9607_S1_P1_MASK 0x03f0
+#define MDM9607_S2_P1_MASK 0x003f
+#define MDM9607_S3_P1_MASK 0x0003f000
+#define MDM9607_S4_P1_MASK 0x003f
+
+#define MDM9607_S0_P2_MASK 0x000fc000
+#define MDM9607_S1_P2_MASK 0xfc00
+#define MDM9607_S2_P2_MASK 0x0fc0
+#define MDM9607_S3_P2_MASK 0x00fc
+#define MDM9607_S4_P2_MASK 0x0fc0
+
+#define MDM9607_S0_P1_SHIFT8
+#define MDM9607_S1_P1_SHIFT20
+#define MDM9607_S2_P1_SHIFT0
+#define MDM9607_S3_P1_SHIFT12
+#define MDM9607_S4_P1_SHIFT0
+
+#define MDM9607_S0_P2_SHIFT14
+#define MDM9607_S1_P2_SHIFT26
+#define MDM9607_S2_P2_SHIFT6
+#define MDM9607_S3_P2_SHIFT18
+#define MDM9607_S4_P2_SHIFT6
+
+#define MDM9607_CAL_SEL_MASK   0x0070
+#define MDM9607_CAL_SEL_SHIFT  20
+
  static int calibrate_8916(struct tsens_priv *priv)
  {
int base0 = 0, base1 = 0, i;
@@ -452,7 +485,56 @@ static int calibrate_8974(struct tsens_priv *priv)
return 0;
  }
  
-/* v0.1: 8916, 8939, 8974 */

+static int calibrate_9607(struct tsens_priv *priv)
+{
+   int base, i;
+   u32 p1[5], p2[5];
+   int mode = 0;
+   u32 *qfprom_cdata;
+
+   qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+   if (IS_ERR(qfprom_cdata))
+   return PTR_ERR(qfprom_cdata);
+
+   mode = (qfprom_cdata[2] & MDM9607_CAL_SEL_MASK) >> 
MDM9607_CAL_SEL_SHIFT;
+   dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+   switch (mode) {
+   case TWO_PT_CALIB:
+   base = (qfprom_cdata[2] & MDM9607_BASE1_MASK) >> 
MDM9607_BASE1_SHIFT;
+   p2[0] = (qfprom_cdata[0] & MDM9607_S0_P2_MASK) >> 
MDM9607_S0_P2_SHIFT;
+   p2[1] = (qfprom_cdata[0] & MDM9607_S1_P2_MASK) >> 
MDM9607_S1_P2_SHIFT;
+   p2[2] = (qfprom_cdata[1] & MDM9607_S2_P2_MASK) >> 
MDM9607_S2_P2_SHIFT;
+   p2[3] = (qfprom_cdata[1] & MDM9607_S3_P2_MASK) >> 
MDM9607_S3_P2_SHIFT;
+   p2[4] = (qfprom_cdata[2] & MDM9607_S4_P2_MASK) >> 
MDM9607_S4_P2_SHIFT;
+   for (i = 0; i < priv->num_sensors; i++)
+   p2[i] = ((base + p2[i]) << 2);
+   fallthrough;
+   case ONE_PT_CALIB2:
+   base = (qfprom_cdata[0] & MDM9607_BASE0_MASK);
+   p1[0] = (qfprom_cdata[0] & MDM9607_S0_P1_MASK) >> 
MDM9607_S0_P1_SHIFT;
+   p1[1] = (qfprom_cdata[0] & MDM9607_S1_P1_MASK) >> 
MDM9607_S1_P1_SHIFT;
+   p1[2] = (qfprom_cdata[1] & MDM9607_S2_P1_MASK) >> 
MDM9607_S2_P1_SHIFT;
+   p1[3] = (qfprom_cdata[1] & MDM9607_S3_P1_MASK) >> 
MDM9607_S3_P1_SHIFT;
+   p1[4] = (qfprom_cdata[2] & MDM9607_S4_P1_MASK) >> 
MDM9607_S4_P1_SHIFT;
+   for (i = 0; i < priv->num_sensors; i++)
+   p1[i] = ((base + p1[i]) << 2);
+   break;
+   default:
+   for (i = 0; i < priv->num_sensors; i++) {
+   p1[i] = 500;
+   p2[i] = 780;
+   }
+   break;
+   }
+
+   compute_intercept_slope(priv, p1, p2, mode);
+   kfree(qfprom_cdata);
+
+   return 0;
+}
+
+/* v0.1: 8916, 8939, 8974, 9607 */
  
  static struct tsens_features tsens_v0_1_feat = {

.ver_major  = VER_0_1,
@@ -540,3 +622,17 @@ struct tsens_plat_data data_8974 = {
.feat   = _v0_1_feat,
.fields = tsens_v0_1_regfields,
  };
+
+static const struct tsens_ops ops_9607 = {
+   .init   = init_common,
+   .calibrate  = calibrate_9607,
+   .get_temp   = get_temp_common,
+};
+
+struct tsens_plat_data data_9607 = {
+   .num_sensors= 5,
+   .ops= _9607,
+   .hw_ids = (unsigned int []){ 0, 1, 2, 3, 4 },
+   .feat   = _v0_1_feat,
+   .fi

Re: [PATCH v12 5/9] drivers: thermal: tsens: Fix bug in sensor enable for msm8960

2021-03-21 Thread Thara Gopinath




On 3/19/21 2:15 PM, Ansuel Smith wrote:

Device based on tsens VER_0 contains a hardware bug that results in some
problem with sensor enablement. Sensor id 6-11 can't be enabled
selectively and all of them must be enabled in one step.

Signed-off-by: Ansuel Smith 


Acked-by: Thara Gopinath 

Warm Regards
Thara

---
  drivers/thermal/qcom/tsens-8960.c | 23 ---
  1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 86585f439985..bf8dfaf06428 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -27,9 +27,9 @@
  #define ENBIT(0)
  #define SW_RSTBIT(1)
  #define SENSOR0_ENBIT(3)
+#define MEASURE_PERIOD BIT(18)
  #define SLP_CLK_ENA   BIT(26)
  #define SLP_CLK_ENA_8660  BIT(24)
-#define MEASURE_PERIOD 1
  #define SENSOR0_SHIFT 3
  
  /* INT_STATUS_ADDR bitmasks */

@@ -126,17 +126,34 @@ static int resume_8960(struct tsens_priv *priv)
  static int enable_8960(struct tsens_priv *priv, int id)
  {
int ret;
-   u32 reg, mask;
+   u32 reg, mask = BIT(id);
  
  	ret = regmap_read(priv->tm_map, CNTL_ADDR, );

if (ret)
return ret;
  
-	mask = BIT(id + SENSOR0_SHIFT);

+   /* HARDWARE BUG:
+* On platform with more than 6 sensors, all the remaining
+* sensors needs to be enabled all togheder or underfined
+* results are expected. (Sensor 6-7 disabled, Sensor 3
+* disabled...) In the original driver, all the sensors
+* are enabled in one step hence this bug is not triggered.
+*/
+   if (id > 5)
+   mask = GENMASK(10, 6);
+
+   mask <<= SENSOR0_SHIFT;
+
+   /* Sensors already enabled. Skip. */
+   if ((reg & mask) == mask)
+   return 0;
+
ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
  
+	reg |= MEASURE_PERIOD;

+
if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else





[PATCH] MAINTAINERS: Add co-maintainer for Qualcomm tsens thermal drivers

2021-03-19 Thread Thara Gopinath
Add myself as the maintainer for Qualcomm tsens drivers so that I
can help Daniel by taking care of/reviewing changes to these drivers.

Signed-off-by: Thara Gopinath 
---
 MAINTAINERS | 1 +
 1 file changed, 1 insertion(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index aa84121c5611..ab66ab9a628e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14892,6 +14892,7 @@ F:  include/linux/if_rmnet.h
 
 QUALCOMM TSENS THERMAL DRIVER
 M: Amit Kucheria 
+M: Thara Gopinath 
 L: linux...@vger.kernel.org
 L: linux-arm-...@vger.kernel.org
 S: Maintained
-- 
2.25.1



Re: [PATCH v11 1/9] drivers: thermal: tsens: Add VER_0 tsens version

2021-03-19 Thread Thara Gopinath




On 3/19/21 9:28 AM, Ansuel Smith wrote:

On Fri, Mar 19, 2021 at 09:11:38AM -0400, Thara Gopinath wrote:



On 3/18/21 8:52 PM, Ansuel Smith wrote:

VER_0 is used to describe device based on tsens version before v0.1.
These device are devices based on msm8960 for example apq8064 or
ipq806x.


Hi Ansuel,

There are still checkpatch check warnings in this patch. Please run
checkpatch.pl --strict and fix them. Once that is done, you can add

Reviewed-by: Thara Gopinath 

Warm Regards
Thara



Hi,
thanks a lot for the review. The only warning I have is a line ending
with ( that i think I can't fix or I will go over the max char for line.
Do you have something more?


I see two warning for line ending with (. The max char limit is 100.

--
Warm Regards
Thara


Re: [PATCH v11 7/9] drivers: thermal: tsens: Drop unused define for msm8960

2021-03-19 Thread Thara Gopinath




On 3/18/21 8:52 PM, Ansuel Smith wrote:

Drop unused define for msm8960 replaced by generic api and reg_field.

Signed-off-by: Ansuel Smith 


Thanks for the clean up!

Reviewed-by: Thara Gopinath 

Warm Regards
Thara


---
  drivers/thermal/qcom/tsens-8960.c | 24 +---
  1 file changed, 1 insertion(+), 23 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 8c523b764862..31e44d17d484 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -10,8 +10,6 @@
  #include 
  #include "tsens.h"
  
-#define CAL_MDEGC		3

-
  #define CONFIG_ADDR   0x3640
  #define CONFIG_ADDR_8660  0x3620
  /* CONFIG_ADDR bitmasks */
@@ -21,39 +19,19 @@
  #define CONFIG_SHIFT_8660 28
  #define CONFIG_MASK_8660  (3 << CONFIG_SHIFT_8660)
  
-#define STATUS_CNTL_ADDR_8064	0x3660

  #define CNTL_ADDR 0x3620
  /* CNTL_ADDR bitmasks */
  #define ENBIT(0)
  #define SW_RSTBIT(1)
-#define SENSOR0_EN BIT(3)
+
  #define MEASURE_PERIODBIT(18)
  #define SLP_CLK_ENA   BIT(26)
  #define SLP_CLK_ENA_8660  BIT(24)
  #define SENSOR0_SHIFT 3
  
-/* INT_STATUS_ADDR bitmasks */

-#define MIN_STATUS_MASKBIT(0)
-#define LOWER_STATUS_CLR   BIT(1)
-#define UPPER_STATUS_CLR   BIT(2)
-#define MAX_STATUS_MASKBIT(3)
-
  #define THRESHOLD_ADDR0x3624
-/* THRESHOLD_ADDR bitmasks */
-#define THRESHOLD_MAX_LIMIT_SHIFT  24
-#define THRESHOLD_MIN_LIMIT_SHIFT  16
-#define THRESHOLD_UPPER_LIMIT_SHIFT8
-#define THRESHOLD_LOWER_LIMIT_SHIFT0
-
-/* Initial temperature threshold values */
-#define LOWER_LIMIT_TH 0x50
-#define UPPER_LIMIT_TH 0xdf
-#define MIN_LIMIT_TH   0x0
-#define MAX_LIMIT_TH   0xff
  
  #define INT_STATUS_ADDR		0x363c

-#define TRDY_MASK  BIT(7)
-#define TIMEOUT_US 100
  
  #define S0_STATUS_OFF		0x3628

  #define S1_STATUS_OFF 0x362c



--
Warm Regards
Thara


Re: [PATCH v11 6/9] drivers: thermal: tsens: Replace custom 8960 apis with generic apis

2021-03-19 Thread Thara Gopinath




On 3/18/21 8:52 PM, Ansuel Smith wrote:

Rework calibrate function to use common function. Derive the offset from
a missing hardcoded slope table and the data from the nvmem calib
efuses.
Drop custom get_temp function and use generic api.

Signed-off-by: Ansuel Smith 
Acked-by: Thara Gopinath 
---
  drivers/thermal/qcom/tsens-8960.c | 56 +--
  1 file changed, 15 insertions(+), 41 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index bdc64d4188bf..8c523b764862 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -67,6 +67,13 @@
  #define S9_STATUS_OFF 0x3674
  #define S10_STATUS_OFF0x3678
  
+/* Original slope - 200 to compensate mC to C inaccuracy */

+u32 tsens_msm8960_slope[] = {
+   976, 976, 954, 976,
+   911, 932, 932, 999,
+   932, 999, 932
+   };


make -C1 throws a warning for this. You have to make the table static. 
You can keep my Acked-by once it is fixed.


Warm Regards
Thara


Re: [PATCH v11 5/9] drivers: thermal: tsens: Fix bug in sensor enable for msm8960

2021-03-19 Thread Thara Gopinath

Hi!

On 3/18/21 8:52 PM, Ansuel Smith wrote:

Device based on tsens VER_0 contains a hardware bug that results in some
problem with sensor enablement. Sensor id 6-11 can't be enabled
selectively and all of them must be enabled in one step.


Thanks for rewording!



Signed-off-by: Ansuel Smith 
---
  drivers/thermal/qcom/tsens-8960.c | 24 +---
  1 file changed, 21 insertions(+), 3 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 86585f439985..bdc64d4188bf 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -27,9 +27,9 @@
  #define ENBIT(0)
  #define SW_RSTBIT(1)
  #define SENSOR0_ENBIT(3)
+#define MEASURE_PERIOD BIT(18)
  #define SLP_CLK_ENA   BIT(26)
  #define SLP_CLK_ENA_8660  BIT(24)
-#define MEASURE_PERIOD 1
  #define SENSOR0_SHIFT 3
  
  /* INT_STATUS_ADDR bitmasks */

@@ -126,17 +126,35 @@ static int resume_8960(struct tsens_priv *priv)
  static int enable_8960(struct tsens_priv *priv, int id)
  {
int ret;
-   u32 reg, mask;
+   u32 reg, mask = BIT(id);
  
  	ret = regmap_read(priv->tm_map, CNTL_ADDR, );

if (ret)
return ret;
  
-	mask = BIT(id + SENSOR0_SHIFT);

+   /* HARDWARE BUG:
+* On platform with more than 6 sensors, all the remaining
+* sensors needs to be enabled all togheder or underfined
+* results are expected. (Sensor 6-7 disabled, Sensor 3
+* disabled...) In the original driver, all the sensors
+* are enabled in one step hence this bug is not triggered.
+*/
+   if (id > 5) {
+   mask = GENMASK(10, 6);
+
+   /* Sensors already enabled. Skip. */
+   if ((reg & mask) == mask)


This is a bug. You have to do mask <<= SENSOR0_SHIFT; before reg & mask.


+   return 0;
+   }
+
+   mask <<= SENSOR0_SHIFT;
+
ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
  
+	reg |= MEASURE_PERIOD;

+
if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else



--
Warm Regards
Thara


Re: [PATCH v11 1/9] drivers: thermal: tsens: Add VER_0 tsens version

2021-03-19 Thread Thara Gopinath




On 3/18/21 8:52 PM, Ansuel Smith wrote:

VER_0 is used to describe device based on tsens version before v0.1.
These device are devices based on msm8960 for example apq8064 or
ipq806x.


Hi Ansuel,

There are still checkpatch check warnings in this patch. Please run 
checkpatch.pl --strict and fix them. Once that is done, you can add


Reviewed-by: Thara Gopinath 

Warm Regards
Thara



Signed-off-by: Ansuel Smith 
---
  drivers/thermal/qcom/tsens.c | 141 ---
  drivers/thermal/qcom/tsens.h |   4 +-
  2 files changed, 116 insertions(+), 29 deletions(-)

diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index d8ce3a687b80..277d9b17e949 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -12,6 +12,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -515,6 +516,15 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
dev_dbg(priv->dev, "[%u] %s: no violation:  %d\n",
hw_id, __func__, temp);
}
+
+   if (tsens_version(priv) < VER_0_1) {
+   /* Constraint: There is only 1 interrupt control 
register for all
+* 11 temperature sensor. So monitoring more than 1 
sensor based
+* on interrupts will yield inconsistent result. To 
overcome this
+* issue we will monitor only sensor 0 which is the 
master sensor.
+*/
+   break;
+   }
}
  
  	return IRQ_HANDLED;

@@ -530,6 +540,13 @@ static int tsens_set_trips(void *_sensor, int low, int 
high)
int high_val, low_val, cl_high, cl_low;
u32 hw_id = s->hw_id;
  
+	if (tsens_version(priv) < VER_0_1) {

+   /* Pre v0.1 IP had a single register for each type of interrupt
+* and thresholds
+*/
+   hw_id = 0;
+   }
+
dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n",
hw_id, __func__, low, high);
  
@@ -584,18 +601,21 @@ int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)

u32 valid;
int ret;
  
-	ret = regmap_field_read(priv->rf[valid_idx], );

-   if (ret)
-   return ret;
-   while (!valid) {
-   /* Valid bit is 0 for 6 AHB clock cycles.
-* At 19.2MHz, 1 AHB clock is ~60ns.
-* We should enter this loop very, very rarely.
-*/
-   ndelay(400);
+   /* VER_0 doesn't have VALID bit */
+   if (tsens_version(priv) >= VER_0_1) {
ret = regmap_field_read(priv->rf[valid_idx], );
if (ret)
return ret;
+   while (!valid) {
+   /* Valid bit is 0 for 6 AHB clock cycles.
+* At 19.2MHz, 1 AHB clock is ~60ns.
+* We should enter this loop very, very rarely.
+*/
+   ndelay(400);
+   ret = regmap_field_read(priv->rf[valid_idx], );
+   if (ret)
+   return ret;
+   }
}
  
  	/* Valid bit is set, OK to read the temperature */

@@ -608,15 +628,29 @@ int get_temp_common(const struct tsens_sensor *s, int 
*temp)
  {
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
-   int last_temp = 0, ret;
+   int last_temp = 0, ret, trdy;
+   unsigned long timeout;
  
-	ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], _temp);

-   if (ret)
-   return ret;
+   timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+   do {
+   if (tsens_version(priv) == VER_0) {
+   ret = regmap_field_read(priv->rf[TRDY], );
+   if (ret)
+   return ret;
+   if (!trdy)
+   continue;
+   }
  
-	*temp = code_to_degc(last_temp, s) * 1000;

+   ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], 
_temp);
+   if (ret)
+   return ret;
  
-	return 0;

+   *temp = code_to_degc(last_temp, s) * 1000;
+
+   return 0;
+   } while (time_before(jiffies, timeout));
+
+   return -ETIMEDOUT;
  }
  
  #ifdef CONFIG_DEBUG_FS

@@ -738,19 +772,31 @@ int __init init_common(struct tsens_priv *priv)
priv->tm_offset = 0x1000;
}
  
-	res = platform_get_resource(op, IORESOURCE_MEM, 0);

-   tm_base = devm_ioremap_resource(dev, res);
-   if (IS_ERR(tm_base)) {
-   ret = PTR_ERR(tm_base);
-   goto err_put_device;
+   if (tsens_version(priv) >= VER_0_1) {
+   res = platform_get_resource(op, IORESOURCE_

Re: [PATCH v10 6/8] drivers: thermal: tsens: Use get_temp_common for msm8960

2021-03-18 Thread Thara Gopinath




On 2/17/21 2:40 PM, Ansuel Smith wrote:

Rework calibrate function to use common function. Derive the offset from
a missing hardcoded slope table and the data from the nvmem calib
efuses.


You are also changing get_temp to use get_temp_common instead of 
get_temp_8960 in this patch. Please add it to commit description as 
well.I will also consider changing the subject header to something more 
generic like

"drivers: thermal: tsens: Replace custom 8960 apis with generic apis"
or anything better.

Otherwise,
Acked-by: Thara Gopinath 

Warm Regards
Thara



Signed-off-by: Ansuel Smith 
---
  drivers/thermal/qcom/tsens-8960.c | 56 +--
  1 file changed, 15 insertions(+), 41 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 248aaa65b5b0..43ebe4d54672 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -67,6 +67,13 @@
  #define S9_STATUS_OFF 0x3674
  #define S10_STATUS_OFF0x3678
  
+/* Original slope - 200 to compensate mC to C inaccuracy */

+u32 tsens_msm8960_slope[] = {
+   976, 976, 954, 976,
+   911, 932, 932, 999,
+   932, 999, 932
+   };
+
  static int suspend_8960(struct tsens_priv *priv)
  {
int ret;
@@ -192,9 +199,7 @@ static int calibrate_8960(struct tsens_priv *priv)
  {
int i;
char *data;
-
-   ssize_t num_read = priv->num_sensors;
-   struct tsens_sensor *s = priv->sensor;
+   u32 p1[11];
  
  	data = qfprom_read(priv->dev, "calib");

if (IS_ERR(data))
@@ -202,49 +207,18 @@ static int calibrate_8960(struct tsens_priv *priv)
if (IS_ERR(data))
return PTR_ERR(data);
  
-	for (i = 0; i < num_read; i++, s++)

-   s->offset = data[i];
+   for (i = 0; i < priv->num_sensors; i++) {
+   p1[i] = data[i];
+   priv->sensor[i].slope = tsens_msm8960_slope[i];
+   }
+
+   compute_intercept_slope(priv, p1, NULL, ONE_PT_CALIB);
  
  	kfree(data);
  
  	return 0;

  }
  
-/* Temperature on y axis and ADC-code on x-axis */

-static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
-{
-   int slope, offset;
-
-   slope = thermal_zone_get_slope(s->tzd);
-   offset = CAL_MDEGC - slope * s->offset;
-
-   return adc_code * slope + offset;
-}
-
-static int get_temp_8960(const struct tsens_sensor *s, int *temp)
-{
-   int ret;
-   u32 code, trdy;
-   struct tsens_priv *priv = s->priv;
-   unsigned long timeout;
-
-   timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
-   do {
-   ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, );
-   if (ret)
-   return ret;
-   if (!(trdy & TRDY_MASK))
-   continue;
-   ret = regmap_read(priv->tm_map, s->status, );
-   if (ret)
-   return ret;
-   *temp = code_to_mdegC(code, s);
-   return 0;
-   } while (time_before(jiffies, timeout));
-
-   return -ETIMEDOUT;
-}
-
  static struct tsens_features tsens_8960_feat = {
.ver_major  = VER_0,
.crit_int   = 0,
@@ -313,7 +287,7 @@ static const struct reg_field 
tsens_8960_regfields[MAX_REGFIELDS] = {
  static const struct tsens_ops ops_8960 = {
.init   = init_common,
.calibrate  = calibrate_8960,
-   .get_temp   = get_temp_8960,
+   .get_temp   = get_temp_common,
.enable = enable_8960,
.disable= disable_8960,
.suspend= suspend_8960,



Re: [PATCH v10 5/8] drivers: thermal: tsens: Fix bug in sensor enable for msm8960

2021-03-18 Thread Thara Gopinath




On 2/17/21 2:40 PM, Ansuel Smith wrote:

It's present a hardware bug in tsens VER_0 where if sensors upper to id
6 are enabled selectively, underfined results are expected. Fix this by
enabling all the remaining sensor in one step.



It took me a while to understand this. It is most likely me! But please 
consider rewording.




Signed-off-by: Ansuel Smith 
---
  drivers/thermal/qcom/tsens-8960.c | 19 +--
  1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 86585f439985..248aaa65b5b0 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -27,9 +27,9 @@
  #define ENBIT(0)
  #define SW_RSTBIT(1)
  #define SENSOR0_ENBIT(3)
+#define MEASURE_PERIOD BIT(18)
  #define SLP_CLK_ENA   BIT(26)
  #define SLP_CLK_ENA_8660  BIT(24)
-#define MEASURE_PERIOD 1
  #define SENSOR0_SHIFT 3
  
  /* INT_STATUS_ADDR bitmasks */

@@ -132,11 +132,26 @@ static int enable_8960(struct tsens_priv *priv, int id)
if (ret)
return ret;
  
-	mask = BIT(id + SENSOR0_SHIFT);

+   /* HARDWARE BUG:
+* On platform with more than 5 sensors, all the remaining


Isn't it 6 ? At least according to code below it is.. You are checking 
for id > 5.



+* sensors needs to be enabled all togheder or underfined
+* results are expected. (Sensor 6-7 disabled, Sensor 3
+* disabled...) In the original driver, all the sensors
+* are enabled in one step hence this bug is not triggered.


Also with this change, you should add a check in this function to see if 
the sensors are already enabled and if yes return back. The enabling 
call from tsens.c happens for every single sensor. But at sensor number 
6 you are enabling rest of the sensors. There is absolutely no reason to 
keep doing this for rest of the sensors.



+*/
+   if (id > 5)
+   mask = GENMASK(10, 6);
+   else
+   mask = BIT(id);
+
+   mask <<= SENSOR0_SHIFT;
+
ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);


I know this is not part of this patch. But you mention above that 
earlier you were enabling all sensors one shot. Now that this is being 
done one at a time, is it needed to do a SW_RST every time ?



if (ret)
return ret;
  
+	reg |= MEASURE_PERIOD;

+
if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else



--
Warm Regards
Thara


Re: [PATCH v10 3/8] drivers: thermal: tsens: Convert msm8960 to reg_field

2021-03-18 Thread Thara Gopinath




On 2/17/21 2:40 PM, Ansuel Smith wrote:

Convert msm9860 driver to reg_field to use the init_common
function.


Hi!

Now that you have done this, you should clean up the unused 
bitmasks/offsets etc in

tsens-8960.c file as well as a separate patch. I only see the
need to maintain SLP_CLK_ENA_8660 and SLP_CLK_ENA. Everything else can 
be removed and the s/w can use priv->rf[_field_] for access.

Otherwise for this patch

Acked-by: Thara Gopinath 

Warm Regards
Thara


Signed-off-by: Ansuel Smith 
---
  drivers/thermal/qcom/tsens-8960.c | 80 ++-
  1 file changed, 79 insertions(+), 1 deletion(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 2a28a5af209e..3f4fc1ffe679 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -51,11 +51,22 @@
  #define MIN_LIMIT_TH  0x0
  #define MAX_LIMIT_TH  0xff
  
-#define S0_STATUS_ADDR		0x3628

  #define INT_STATUS_ADDR   0x363c
  #define TRDY_MASK BIT(7)
  #define TIMEOUT_US100
  
+#define S0_STATUS_OFF		0x3628

+#define S1_STATUS_OFF  0x362c
+#define S2_STATUS_OFF  0x3630
+#define S3_STATUS_OFF  0x3634
+#define S4_STATUS_OFF  0x3638
+#define S5_STATUS_OFF  0x3664  /* Sensors 5-10 found on 
apq8064/msm8960 */
+#define S6_STATUS_OFF  0x3668
+#define S7_STATUS_OFF  0x366c
+#define S8_STATUS_OFF  0x3670
+#define S9_STATUS_OFF  0x3674
+#define S10_STATUS_OFF 0x3678
+
  static int suspend_8960(struct tsens_priv *priv)
  {
int ret;
@@ -269,6 +280,71 @@ static int get_temp_8960(const struct tsens_sensor *s, int 
*temp)
return -ETIMEDOUT;
  }
  
+static struct tsens_features tsens_8960_feat = {

+   .ver_major  = VER_0,
+   .crit_int   = 0,
+   .adc= 1,
+   .srot_split = 0,
+   .max_sensors= 11,
+};
+
+static const struct reg_field tsens_8960_regfields[MAX_REGFIELDS] = {
+   /* - SROT -- */
+   /* No VERSION information */
+
+   /* CNTL */
+   [TSENS_EN] = REG_FIELD(CNTL_ADDR,  0, 0),
+   [TSENS_SW_RST] = REG_FIELD(CNTL_ADDR,  1, 1),
+   /* 8960 has 5 sensors, 8660 has 11, we only handle 5 */
+   [SENSOR_EN]= REG_FIELD(CNTL_ADDR,  3, 7),
+
+   /* - TM -- */
+   /* INTERRUPT ENABLE */
+   /* NO INTERRUPT ENABLE */
+
+   /* Single UPPER/LOWER TEMPERATURE THRESHOLD for all sensors */
+   [LOW_THRESH_0]   = REG_FIELD(THRESHOLD_ADDR,  0,  7),
+   [UP_THRESH_0]= REG_FIELD(THRESHOLD_ADDR,  8, 15),
+   /* MIN_THRESH_0 and MAX_THRESH_0 are not present in the regfield
+* Recycle CRIT_THRESH_0 and 1 to set the required regs to hardcoded 
temp
+* MIN_THRESH_0 -> CRIT_THRESH_1
+* MAX_THRESH_0 -> CRIT_THRESH_0
+*/
+   [CRIT_THRESH_1]   = REG_FIELD(THRESHOLD_ADDR, 16, 23),
+   [CRIT_THRESH_0]   = REG_FIELD(THRESHOLD_ADDR, 24, 31),
+
+   /* UPPER/LOWER INTERRUPT [CLEAR/STATUS] */
+   /* 1 == clear, 0 == normal operation */
+   [LOW_INT_CLEAR_0]   = REG_FIELD(CNTL_ADDR,  9,  9),
+   [UP_INT_CLEAR_0]= REG_FIELD(CNTL_ADDR, 10, 10),
+
+   /* NO CRITICAL INTERRUPT SUPPORT on 8960 */
+
+   /* Sn_STATUS */
+   [LAST_TEMP_0]  = REG_FIELD(S0_STATUS_OFF,  0,  7),
+   [LAST_TEMP_1]  = REG_FIELD(S1_STATUS_OFF,  0,  7),
+   [LAST_TEMP_2]  = REG_FIELD(S2_STATUS_OFF,  0,  7),
+   [LAST_TEMP_3]  = REG_FIELD(S3_STATUS_OFF,  0,  7),
+   [LAST_TEMP_4]  = REG_FIELD(S4_STATUS_OFF,  0,  7),
+   [LAST_TEMP_5]  = REG_FIELD(S5_STATUS_OFF,  0,  7),
+   [LAST_TEMP_6]  = REG_FIELD(S6_STATUS_OFF,  0,  7),
+   [LAST_TEMP_7]  = REG_FIELD(S7_STATUS_OFF,  0,  7),
+   [LAST_TEMP_8]  = REG_FIELD(S8_STATUS_OFF,  0,  7),
+   [LAST_TEMP_9]  = REG_FIELD(S9_STATUS_OFF,  0,  7),
+   [LAST_TEMP_10] = REG_FIELD(S10_STATUS_OFF, 0,  7),
+
+   /* No VALID field on 8960 */
+   /* TSENS_INT_STATUS bits: 1 == threshold violated */
+   [MIN_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 0, 0),
+   [LOWER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 1, 1),
+   [UPPER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 2, 2),
+   /* No CRITICAL field on 8960 */
+   [MAX_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 3, 3),
+
+   /* TRDY: 1=ready, 0=in progress */
+   [TRDY] = REG_FIELD(INT_STATUS_ADDR, 7, 7),
+};
+
  static const struct tsens_ops ops_8960 = {
.init   = init_8960,
.calibrate  = calibrate_8960,
@@ -282,4 +358,6 @@ static const struct tsens_ops ops_8960 = {
  struct tsens_plat_data data_8960 = {
.num_sensors= 11,
.ops= _8960,
+   .feat   = _8960_feat,
+   .fields = tsens_8960_regfields,
  };





Re: [PATCH v10 4/8] drivers: thermal: tsens: Use init_common for msm8960

2021-03-18 Thread Thara Gopinath




On 2/17/21 2:40 PM, Ansuel Smith wrote:

Use init_common and drop custom init for msm8960.

Signed-off-by: Ansuel Smith 


Reviewed-by: Thara Gopinath 

Warm Regards
Thara


--- >   drivers/thermal/qcom/tsens-8960.c | 52 +--
  1 file changed, 1 insertion(+), 51 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-8960.c 
b/drivers/thermal/qcom/tsens-8960.c
index 3f4fc1ffe679..86585f439985 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -173,56 +173,6 @@ static void disable_8960(struct tsens_priv *priv)
regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
  }
  
-static int init_8960(struct tsens_priv *priv)

-{
-   int ret, i;
-   u32 reg_cntl;
-
-   priv->tm_map = dev_get_regmap(priv->dev, NULL);
-   if (!priv->tm_map)
-   return -ENODEV;
-
-   /*
-* The status registers for each sensor are discontiguous
-* because some SoCs have 5 sensors while others have more
-* but the control registers stay in the same place, i.e
-* directly after the first 5 status registers.
-*/
-   for (i = 0; i < priv->num_sensors; i++) {
-   if (i >= 5)
-   priv->sensor[i].status = S0_STATUS_ADDR + 40;
-   priv->sensor[i].status += i * 4;
-   }
-
-   reg_cntl = SW_RST;
-   ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
-   if (ret)
-   return ret;
-
-   if (priv->num_sensors > 1) {
-   reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
-   reg_cntl &= ~SW_RST;
-   ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR,
-CONFIG_MASK, CONFIG);
-   } else {
-   reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
-   reg_cntl &= ~CONFIG_MASK_8660;
-   reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
-   }
-
-   reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT;
-   ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
-   if (ret)
-   return ret;
-
-   reg_cntl |= EN;
-   ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
-   if (ret)
-   return ret;
-
-   return 0;
-}
-
  static int calibrate_8960(struct tsens_priv *priv)
  {
int i;
@@ -346,7 +296,7 @@ static const struct reg_field 
tsens_8960_regfields[MAX_REGFIELDS] = {
  };
  
  static const struct tsens_ops ops_8960 = {

-   .init   = init_8960,
+   .init   = init_common,
.calibrate  = calibrate_8960,
.get_temp   = get_temp_8960,
.enable = enable_8960,





Re: [PATCH v10 2/8] drivers: thermal: tsens: Don't hardcode sensor slope

2021-03-18 Thread Thara Gopinath




On 2/17/21 2:40 PM, Ansuel Smith wrote:

Function compute_intercept_slope hardcode the sensor slope to
SLOPE_DEFAULT. Change this and use the default value only if a slope is
not defined. This is needed for tsens VER_0 that has a hardcoded slope
table.

Signed-off-by: Ansuel Smith 


Reviewed-by: Thara Gopinath 

Warm Regards
Thara


---
  drivers/thermal/qcom/tsens.c | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index f9126909892b..842f518fdf84 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -86,7 +86,8 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
__func__, i, p1[i], p2[i]);
  
-		priv->sensor[i].slope = SLOPE_DEFAULT;

+   if (!priv->sensor[i].slope)
+   priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
/*
 * slope (m) = adc_code2 - adc_code1 (y2 - y1)/



--
Warm Regards
Thara


Re: [PATCH v10 1/8] drivers: thermal: tsens: Add VER_0 tsens version

2021-03-18 Thread Thara Gopinath

Hi Ansuel!

Apologies for delay in the review..

This particular patch throws checkpatch check warnings. Please
run checkpatch.pl --strict and fix them. Rest of the comments below

On 2/17/21 2:40 PM, Ansuel Smith wrote:

VER_0 is used to describe device based on tsens version before v0.1.
These device are devices based on msm8960 for example apq8064 or
ipq806x.

Signed-off-by: Ansuel Smith 
---
  drivers/thermal/qcom/tsens.c | 175 +--
  drivers/thermal/qcom/tsens.h |   4 +-
  2 files changed, 151 insertions(+), 28 deletions(-)

diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index d8ce3a687b80..f9126909892b 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -12,6 +12,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -515,6 +516,15 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
dev_dbg(priv->dev, "[%u] %s: no violation:  %d\n",
hw_id, __func__, temp);
}
+
+   if (tsens_version(priv) < VER_0_1) {
+   /* Constraint: There is only 1 interrupt control 
register for all
+* 11 temperature sensor. So monitoring more than 1 
sensor based
+* on interrupts will yield inconsistent result. To 
overcome this
+* issue we will monitor only sensor 0 which is the 
master sensor.
+*/
+   break;
+   }
}
  
  	return IRQ_HANDLED;

@@ -530,6 +540,13 @@ static int tsens_set_trips(void *_sensor, int low, int 
high)
int high_val, low_val, cl_high, cl_low;
u32 hw_id = s->hw_id;
  
+	if (tsens_version(priv) < VER_0_1) {

+   /* Pre v0.1 IP had a single register for each type of interrupt
+* and thresholds
+*/
+   hw_id = 0;
+   }
+
dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n",
hw_id, __func__, low, high);
  
@@ -584,18 +601,21 @@ int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)

u32 valid;
int ret;
  
-	ret = regmap_field_read(priv->rf[valid_idx], );

-   if (ret)
-   return ret;
-   while (!valid) {
-   /* Valid bit is 0 for 6 AHB clock cycles.
-* At 19.2MHz, 1 AHB clock is ~60ns.
-* We should enter this loop very, very rarely.
-*/
-   ndelay(400);
+   /* VER_0 doesn't have VALID bit */
+   if (tsens_version(priv) >= VER_0_1) {
ret = regmap_field_read(priv->rf[valid_idx], );
if (ret)
return ret;
+   while (!valid) {
+   /* Valid bit is 0 for 6 AHB clock cycles.
+* At 19.2MHz, 1 AHB clock is ~60ns.
+* We should enter this loop very, very rarely.
+*/
+   ndelay(400);
+   ret = regmap_field_read(priv->rf[valid_idx], );
+   if (ret)
+   return ret;
+   }
}
  
  	/* Valid bit is set, OK to read the temperature */

@@ -608,15 +628,29 @@ int get_temp_common(const struct tsens_sensor *s, int 
*temp)
  {
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
-   int last_temp = 0, ret;
+   int last_temp = 0, ret, trdy;
+   unsigned long timeout;
  
-	ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], _temp);

-   if (ret)
-   return ret;
+   timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+   do {
+   if (priv->rf[TRDY]) {
+   ret = regmap_field_read(priv->rf[TRDY], );
+   if (ret)
+   return ret;
+   if (!trdy)
+   continue;
+   }


Did you test this on v0.1 sensor and ensure that the trdy handshake 
works there as well? I don't have a platform to test this. But the safer 
option here will be to do the hand shake only for v0.



+
+   ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], 
_temp);
+   if (ret)
+   return ret;
  
-	*temp = code_to_degc(last_temp, s) * 1000;

+   *temp = code_to_degc(last_temp, s) * 1000;
  
-	return 0;

+   return 0;
+   } while (time_before(jiffies, timeout));
+
+   return -ETIMEDOUT;
  }
  
  #ifdef CONFIG_DEBUG_FS

@@ -738,19 +772,31 @@ int __init init_common(struct tsens_priv *priv)
priv->tm_offset = 0x1000;
}
  
-	res = platform_get_resource(op, IORESOURCE_MEM, 0);

-   tm_base = devm_ioremap_resource(dev, res);
-   if (IS_ERR(tm_base)) {
-   ret = PTR_ERR(tm_base);
-   goto err_put_device;
+   

Re: [PATCH v10 7/8] drivers: thermal: tsens: Add support for ipq8064-tsens

2021-03-18 Thread Thara Gopinath




On 2/17/21 2:40 PM, Ansuel Smith wrote:

Add support for tsens present in ipq806x SoCs based on generic msm8960
tsens driver.

Signed-off-by: Ansuel Smith 


Reviewed-by: Thara Gopinath 

Warm Regards
Thara


---
  drivers/thermal/qcom/tsens.c | 3 +++
  1 file changed, 3 insertions(+)

diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 842f518fdf84..e14b90ddd0f9 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -1001,6 +1001,9 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, 
tsens_resume);
  
  static const struct of_device_id tsens_table[] = {

{
+   .compatible = "qcom,ipq8064-tsens",
+   .data = _8960,
+   }, {
.compatible = "qcom,msm8916-tsens",
.data = _8916,
}, {



Re: [PATCH v3] thermal: qcom: tsens-v0_1: Add support for MDM9607

2021-03-17 Thread Thara Gopinath




On 2/9/21 2:25 PM, Konrad Dybcio wrote:

MDM9607 TSENS IP is very similar to the one of MSM8916, with
minor adjustments to various tuning values.

Signed-off-by: Konrad Dybcio 
Acked-by: Rob Herring 

---
Changes since v2:
- Address Bjorn's comments (remove redundant variable and kfree)
  .../bindings/thermal/qcom-tsens.yaml  |  2 +
  drivers/thermal/qcom/tsens-v0_1.c | 99 ++-
  drivers/thermal/qcom/tsens.c  |  3 +
  drivers/thermal/qcom/tsens.h  |  2 +-
  4 files changed, 104 insertions(+), 2 deletions(-)

diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml 
b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index 95462e071ab4..8ad9dc139c23 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -22,6 +22,7 @@ properties:
- description: v0.1 of TSENS
  items:
- enum:
+  - qcom,mdm9607-tsens
- qcom,msm8916-tsens
- qcom,msm8939-tsens
- qcom,msm8974-tsens
@@ -94,6 +95,7 @@ allOf:
  compatible:
contains:
  enum:
+  - qcom,mdm9607-tsens


This should be split into two different patches. DT binding changes is 
usually not combined with driver changes because two different 
maintainers handle them. Also checkpatch.pl throws a warning stating the 
same.



- qcom,msm8916-tsens
- qcom,msm8974-tsens
- qcom,msm8976-tsens
diff --git a/drivers/thermal/qcom/tsens-v0_1.c 
b/drivers/thermal/qcom/tsens-v0_1.c
index 4ffa2e2c0145..a9fc92a4779b 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -190,6 +190,39 @@
  
  #define BIT_APPEND		0x3
  
+/* eeprom layout data for mdm9607 */

+#define MDM9607_BASE0_MASK 0x00ff
+#define MDM9607_BASE1_MASK 0x000ff000
+#define MDM9607_BASE0_SHIFT0
+#define MDM9607_BASE1_SHIFT12
+
+#define MDM9607_S0_P1_MASK 0x3f00
+#define MDM9607_S1_P1_MASK 0x03f0
+#define MDM9607_S2_P1_MASK 0x003f
+#define MDM9607_S3_P1_MASK 0x0003f000
+#define MDM9607_S4_P1_MASK 0x003f
+
+#define MDM9607_S0_P2_MASK 0x000fc000
+#define MDM9607_S1_P2_MASK 0xfc00
+#define MDM9607_S2_P2_MASK 0x0fc0
+#define MDM9607_S3_P2_MASK 0x00fc
+#define MDM9607_S4_P2_MASK 0x0fc0
+
+#define MDM9607_S0_P1_SHIFT8
+#define MDM9607_S1_P1_SHIFT20
+#define MDM9607_S2_P1_SHIFT0
+#define MDM9607_S3_P1_SHIFT12
+#define MDM9607_S4_P1_SHIFT0
+
+#define MDM9607_S0_P2_SHIFT14
+#define MDM9607_S1_P2_SHIFT26
+#define MDM9607_S2_P2_SHIFT6
+#define MDM9607_S3_P2_SHIFT18
+#define MDM9607_S4_P2_SHIFT6
+
+#define MDM9607_CAL_SEL_MASK   0x0070
+#define MDM9607_CAL_SEL_SHIFT  20
+
  static int calibrate_8916(struct tsens_priv *priv)
  {
int base0 = 0, base1 = 0, i;
@@ -452,7 +485,56 @@ static int calibrate_8974(struct tsens_priv *priv)
return 0;
  }
  
-/* v0.1: 8916, 8939, 8974 */

+static int calibrate_9607(struct tsens_priv *priv)
+{
+   int base, i;
+   u32 p1[5], p2[5];
+   int mode = 0;
+   u32 *qfprom_cdata;
+
+   qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+   if (IS_ERR(qfprom_cdata))
+   return PTR_ERR(qfprom_cdata);
+
+   mode = (qfprom_cdata[2] & MDM9607_CAL_SEL_MASK) >> 
MDM9607_CAL_SEL_SHIFT;
+   dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+   switch (mode) {
+   case TWO_PT_CALIB:
+   base = (qfprom_cdata[2] & MDM9607_BASE1_MASK) >> 
MDM9607_BASE1_SHIFT;
+   p2[0] = (qfprom_cdata[0] & MDM9607_S0_P2_MASK) >> 
MDM9607_S0_P2_SHIFT;
+   p2[1] = (qfprom_cdata[0] & MDM9607_S1_P2_MASK) >> 
MDM9607_S1_P2_SHIFT;
+   p2[2] = (qfprom_cdata[1] & MDM9607_S2_P2_MASK) >> 
MDM9607_S2_P2_SHIFT;
+   p2[3] = (qfprom_cdata[1] & MDM9607_S3_P2_MASK) >> 
MDM9607_S3_P2_SHIFT;
+   p2[4] = (qfprom_cdata[2] & MDM9607_S4_P2_MASK) >> 
MDM9607_S4_P2_SHIFT;
+   for (i = 0; i < priv->num_sensors; i++)
+   p2[i] = ((base + p2[i]) << 2);
+   fallthrough;
+   case ONE_PT_CALIB2:
+   base = (qfprom_cdata[0] & MDM9607_BASE0_MASK);
+   p1[0] = (qfprom_cdata[0] & MDM9607_S0_P1_MASK) >> 
MDM9607_S0_P1_SHIFT;
+   p1[1] = (qfprom_cdata[0] & MDM9607_S1_P1_MASK) >> 
MDM9607_S1_P1_SHIFT;
+   p1[2] = (qfprom_cdata[1] & MDM9607_S2_P1_MASK) >> 
MDM9607_S2_P1_SHIFT;
+   p1[3] = (qfprom_cdata[1] & MDM9607_S3_P1_MASK) >> 
MDM9607_S3_P1_SHIFT;
+   p1[4] = (qfprom_cdata[2] & MDM9607_S4_P1_MASK) >> 
MDM9607_S4_P1_SHIFT;
+   for (i = 0; i < priv->num_sensors; i++)
+   p1[i] = (((base) + p1[i]) << 2);


minor nit: extra braces around base


+   

Re: [PATCH] thermal: qcom: tsens_v1: Enable sensor 3 on MSM8976

2021-03-17 Thread Thara Gopinath




On 2/25/21 4:31 PM, Konrad Dybcio wrote:

The sensor *is* in fact used and does report temperature.


I can't find any info that says otherwise. So,

Acked-by: Thara Gopinath 

Warm Regards
Thara



Signed-off-by: Konrad Dybcio 
---
  drivers/thermal/qcom/tsens-v1.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 3c19a3800c6d..573e261ccca7 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -380,11 +380,11 @@ static const struct tsens_ops ops_8976 = {
.get_temp   = get_temp_tsens_valid,
  };
  
-/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */

+/* Valid for both MSM8956 and MSM8976. */
  struct tsens_plat_data data_8976 = {
.num_sensors= 11,
.ops= _8976,
-   .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10},
+   .hw_ids = (unsigned int[]){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
.feat   = _v1_feat,
.fields = tsens_v1_regfields,
  };



--
Warm Regards
Thara


Re: [PATCH 2/8] dt-bindings: crypto : Add new compatible strings for qcom-qce

2021-03-17 Thread Thara Gopinath




On 3/17/21 9:20 AM, Bhupesh Sharma wrote:

Hi Rob,

Thanks for your review.

On Wed, 17 Mar 2021 at 03:58, Rob Herring  wrote:


On Wed, Mar 10, 2021 at 10:54:57AM +0530, Bhupesh Sharma wrote:

Newer qcom chips support newer versions of the qce IP, so add
new compatible strings for qcom-qce (in addition to the existing
"qcom,crypto-v5.1").

With [1], Thara tried to add the support for new compatible strings,
but we couldn't conclude on the approach to be used. Since we have
a number of new qcom arm64 SoCs available now, several of which
support the same crypto IP version, so it makes more sense to use
the IP version for the compatible string, rather than using the soc
name as the compatible string.

[1]. 
https://lore.kernel.org/linux-arm-msm/20201119155233.3974286-7-thara.gopin...@linaro.org/

Cc: Thara Gopinath 
Cc: Bjorn Andersson 
Cc: Rob Herring 
Cc: Andy Gross 
Cc: Herbert Xu 
Cc: David S. Miller 
Cc: Stephen Boyd 
Cc: Michael Turquette 
Cc: linux-...@vger.kernel.org
Cc: linux-cry...@vger.kernel.org
Cc: devicet...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: bhupesh.li...@gmail.com
Signed-off-by: Bhupesh Sharma 
---
  Documentation/devicetree/bindings/crypto/qcom-qce.txt | 6 +-
  1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt 
b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
index 07ee1b12000b..217b37dbd58a 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.txt
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
@@ -2,7 +2,11 @@ Qualcomm crypto engine driver

  Required properties:

-- compatible  : should be "qcom,crypto-v5.1"
+- compatible  : Supported versions are:
+ - "qcom,crypto-v5.1", for ipq6018
+ - "qcom,crypto-v5.4", for sdm845, sm8150


2 SoCs sharing 1 version doesn't convince me on using version numbers.
Having 4 versions for 5 SoCs further convinces me you should stick with
SoC specific compatibles as *everyone* else does (including most QCom
bindings).


Hi!

So, it is 2 SoCs today. But we do have a bunch of SoCs for each version 
and these could be added in future. I think I have asked this question 
before as well,how about "qcom,sdm845-crypto", "qcom,crypto-v5.4" and 
have only "qcom,crypto-" in the driver ? I see this being done 
by some Qcom bindings.




Fair enough. I will add SoC specific compatibles in v2, which should
be out shortly.

Regards,
Bhupesh


+ - "qcom,crypto-v5.5", for sm8250
+ - "qcom,crypto-v5.6", for sm8350
  - reg : specifies base physical address and size of the registers map
  - clocks  : phandle to clock-controller plus clock-specifier pair
  - clock-names : "iface" clocks register interface
--
2.29.2



--
Warm Regards
Thara


Re: [PATCH 4/7] crypto: qce: Add support for AEAD algorithms

2021-03-16 Thread Thara Gopinath




On 3/12/21 8:01 AM, Herbert Xu wrote:

On Thu, Feb 25, 2021 at 01:27:13PM -0500, Thara Gopinath wrote:


+static int
+qce_aead_async_req_handle(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   enum dma_data_direction dir_src, dir_dst;
+   unsigned int totallen;
+   bool diff_dst;
+   int ret;
+
+   if (IS_CCM_RFC4309(rctx->flags)) {
+   memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
+   rctx->ccm_rfc4309_iv[0] = 3;
+   memcpy(>ccm_rfc4309_iv[1], ctx->ccm4309_salt, 
QCE_CCM4309_SALT_SIZE);
+   memcpy(>ccm_rfc4309_iv[4], req->iv, 8);
+   rctx->iv = rctx->ccm_rfc4309_iv;
+   rctx->ivsize = AES_BLOCK_SIZE;
+   } else {
+   rctx->iv = req->iv;
+   rctx->ivsize = crypto_aead_ivsize(tfm);
+   }
+   if (IS_CCM_RFC4309(rctx->flags))
+   rctx->assoclen = req->assoclen - 8;
+   else
+   rctx->assoclen = req->assoclen;
+
+   totallen = rctx->cryptlen + rctx->assoclen;


This triggers a warning on totallen not being used.  Please fix.


hmm.. this is strange. I could swear that I checked for warnings before 
sending this out. But I will fix this. I will wait for a couple of more 
days for any other comments and then spin a v2.




Thanks,



--
Warm Regards
Thara


Re: [PATCH 0/7] Add support for AEAD algorithms in Qualcomm Crypto Engine driver

2021-03-16 Thread Thara Gopinath




On 3/12/21 8:02 AM, Herbert Xu wrote:

On Thu, Mar 04, 2021 at 01:41:15PM -0500, Thara Gopinath wrote:


Yes it did. The last patch adds fallback for unsupported cases and
this will make it pass the fuzz tests.


Please include this information in the next round.


I will. Thanks!


Thanks,



--
Warm Regards
Thara


Re: [PATCH v10 0/8] Add support for ipq8064 tsens

2021-03-10 Thread Thara Gopinath




On 3/10/21 7:19 AM, Daniel Lezcano wrote:


Hi Ansuel,

On 17/02/2021 20:40, Ansuel Smith wrote:

This patchset convert msm8960 to reg_filed, use int_common instead
of a custom function and fix wrong tsens get_temp function for msm8960.
Ipq8064 SoCs tsens driver is based on 8960 tsens driver. Ipq8064 needs
to be registered as a gcc child as the tsens regs on this platform are
shared with the controller.
This is based on work and code here
https://git.linaro.org/people/amit.kucheria/kernel.git/log/?h=wrk3/tsens-8960-breakage


I don't have major concerns with the series except there is no comment
from the maintainer / reviewer of the sensor.

Given it is based on Amit's work, I can assume they are correct.

I added Thara in Cc hoping she has time to review the changes. If nobody
complains with the series, I'll merge them in the next days


Hi Ansuel/Daniel,

Just wanted to let you know that I have started looking into this and 
review this within next week or two.




Thanks

   -- Daniel




--
Warm Regards
Thara


Re: [PATCH 0/7] Add support for AEAD algorithms in Qualcomm Crypto Engine driver

2021-03-04 Thread Thara Gopinath
On Thu, 4 Mar 2021 at 00:30, Herbert Xu  wrote:
>
> On Thu, Feb 25, 2021 at 01:27:09PM -0500, Thara Gopinath wrote:
> > Enable support for AEAD algorithms in Qualcomm CE driver.  The first three
> > patches in this series are cleanups and add a few missing pieces required
> > to add support for AEAD algorithms.  Patch 4 introduces supported AEAD
> > transformations on Qualcomm CE.  Patches 5 and 6 implements the h/w
> > infrastructure needed to enable and run the AEAD transformations on
> > Qualcomm CE.  Patch 7 adds support to queue fallback algorithms in case of
> > unsupported special inputs.
> >
> > This series is dependant on https://lkml.org/lkml/2021/2/11/1052.
>
> Did this patch series pass the fuzz tests?

Hi Herbert,

Yes it did. The last patch adds fallback for unsupported cases and
this will make it pass the fuzz tests.

>
> Thanks,
> --
> Email: Herbert Xu 
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt



-- 
Warm Regards
Thara


[PATCH 7/7] crypto: qce: aead: Schedule fallback algorithm

2021-02-25 Thread Thara Gopinath
Qualcomm crypto engine does not handle the following scenarios and
will issue an abort. In such cases, pass on the transformation to
a fallback algorithm.

- DES3 algorithms with all three keys same.
- AES192 algorithms.
- 0 length messages.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/aead.c | 58 ---
 drivers/crypto/qce/aead.h |  3 ++
 2 files changed, 51 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index b594c4bb2640..4c2d024e5296 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -492,7 +492,20 @@ static int qce_aead_crypt(struct aead_request *req, int 
encrypt)
/* CE does not handle 0 length messages */
if (!rctx->cryptlen) {
if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
-   return -EINVAL;
+   ctx->need_fallback = true;
+   }
+
+   /* If fallback is needed, schedule and exit */
+   if (ctx->need_fallback) {
+   aead_request_set_tfm(>fallback_req, ctx->fallback);
+   aead_request_set_callback(>fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+   aead_request_set_crypt(>fallback_req, req->src,
+  req->dst, req->cryptlen, req->iv);
+   aead_request_set_ad(>fallback_req, req->assoclen);
+
+   return encrypt ? crypto_aead_encrypt(>fallback_req) :
+crypto_aead_decrypt(>fallback_req);
}
 
/*
@@ -533,7 +546,7 @@ static int qce_aead_ccm_setkey(struct crypto_aead *tfm, 
const u8 *key,
memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
}
 
-   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256)
+   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != 
AES_KEYSIZE_192)
return -EINVAL;
 
ctx->enc_keylen = keylen;
@@ -542,7 +555,12 @@ static int qce_aead_ccm_setkey(struct crypto_aead *tfm, 
const u8 *key,
memcpy(ctx->enc_key, key, keylen);
memcpy(ctx->auth_key, key, keylen);
 
-   return 0;
+   if (keylen == AES_KEYSIZE_192)
+   ctx->need_fallback = true;
+
+   return IS_CCM_RFC4309(flags) ?
+   crypto_aead_setkey(ctx->fallback, key, keylen + 
QCE_CCM4309_SALT_SIZE) :
+   crypto_aead_setkey(ctx->fallback, key, keylen);
 }
 
 static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned 
int keylen)
@@ -573,20 +591,21 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const 
u8 *key, unsigned int
 * The crypto engine does not support any two keys
 * being the same for triple des algorithms. The
 * verify_skcipher_des3_key does not check for all the
-* below conditions. Return -EINVAL in case any two keys
-* are the same. Revisit to see if a fallback cipher
-* is needed to handle this condition.
+* below conditions. Schedule fallback in this case.
 */
memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
!((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
!((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
-   return -EINVAL;
+   ctx->need_fallback = true;
} else if (IS_AES(flags)) {
/* No random key sizes */
if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
+   authenc_keys.enckeylen != AES_KEYSIZE_192 &&
authenc_keys.enckeylen != AES_KEYSIZE_256)
return -EINVAL;
+   if (authenc_keys.enckeylen == AES_KEYSIZE_192)
+   ctx->need_fallback = true;
}
 
ctx->enc_keylen = authenc_keys.enckeylen;
@@ -597,7 +616,7 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const 
u8 *key, unsigned int
memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
 
-   return 0;
+   return crypto_aead_setkey(ctx->fallback, key, keylen);
 }
 
 static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -612,15 +631,32 @@ static int qce_aead_setauthsize(struct crypto_aead *tfm, 
unsigned int authsize)
return -EINVAL;
}
ctx->authsize = authsize;
-   return 0;
+
+   return crypto_aead_setauthsize(ctx->fallback, authsize);
 }
 
 static int qce_aead_init(struct crypto_aead *tfm)
 {
+ 

[PATCH 6/7] crypto: qce: common: Add support for AEAD algorithms

2021-02-25 Thread Thara Gopinath
Add register programming sequence for enabling AEAD
algorithms on the Qualcomm crypto engine.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 155 +++-
 1 file changed, 153 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 05a71c5ecf61..54d209cb0525 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,16 @@
 #include "core.h"
 #include "regs-v5.h"
 #include "sha.h"
+#include "aead.h"
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+   SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+   SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
 
 static inline u32 qce_read(struct qce_device *qce, u32 offset)
 {
@@ -96,7 +106,7 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
 }
 
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
 static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
 {
u32 cfg = 0;
@@ -139,7 +149,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, 
u32 auth_size)
 
return cfg;
 }
+#endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
 static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +237,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
 }
 #endif
 
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || 
defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
 static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
 {
u32 cfg = 0;
@@ -271,7 +283,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 
aes_key_size)
 
return cfg;
 }
+#endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
 static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
 {
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +400,139 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
 }
 #endif
 
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+   __be32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+   __be32 authiv[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+   __be32 authnonce[QCE_MAX_NONCE / sizeof(__be32)] = {0};
+   unsigned int enc_keylen = ctx->enc_keylen;
+   unsigned int auth_keylen = ctx->auth_keylen;
+   unsigned int enc_ivsize = rctx->ivsize;
+   unsigned int auth_ivsize;
+   unsigned int enckey_words, enciv_words;
+   unsigned int authkey_words, authiv_words, authnonce_words;
+   unsigned long flags = rctx->flags;
+   u32 encr_cfg = 0, auth_cfg = 0, config, totallen;
+   u32 *iv_last_word;
+
+   qce_setup_config(qce);
+
+   /* Write encryption key */
+   qce_cpu_to_be32p_array(enckey, ctx->enc_key, enc_keylen);
+   enckey_words = enc_keylen / sizeof(u32);
+   qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+   /* Write encryption iv */
+   qce_cpu_to_be32p_array(enciv, rctx->iv, enc_ivsize);
+   enciv_words = enc_ivsize / sizeof(u32);
+   qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+
+   if (IS_CCM(rctx->flags)) {
+   iv_last_word = (u32 *)[enciv_words - 1];
+// qce_write(qce, REG_CNTR3_IV3, enciv[enciv_words - 1] + 1);
+   qce_write(qce, REG_CNTR3_IV3, (*iv_last_word) + 1);
+   qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, 
enciv_words);
+   qce_write(qce, REG_CNTR_MASK, ~0);
+   qce_write(qce, REG_CNTR_MASK0, ~0);
+   qce_write(qce, REG_CNTR_MASK1, ~0);
+   qce_write(qce, REG_CNTR_MASK2, ~0);
+   }
+
+   /* Clear authentication IV and KEY registers of previous values */
+   qce_clear_array(qce, REG_AUTH_IV0, 16);
+   qce_clear_array(qce, REG_AUTH_KEY0, 16);
+
+   /* Clear byte count */
+   qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+   /* Write authentication key */
+   qce_cpu_to_be32p_array(authkey, ctx->auth_key, auth_keylen);
+   authkey_words = DIV_ROUND_UP(auth_keylen, si

[PATCH 5/7] crypto: qce: common: Clean up qce_auth_cfg

2021-02-25 Thread Thara Gopinath
Remove various redundant checks in qce_auth_cfg. Also allow qce_auth_cfg
to take auth_size as a parameter which is a required setting for ccm(aes)
algorithms

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 21 +
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 2485aa371d83..05a71c5ecf61 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -97,11 +97,11 @@ static inline void qce_crypto_go(struct qce_device *qce, 
bool result_dump)
 }
 
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
-static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
 {
u32 cfg = 0;
 
-   if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+   if (IS_CCM(flags) || IS_CMAC(flags))
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
else
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
@@ -119,15 +119,16 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+   else if (IS_CCM(flags))
+   cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
 
if (IS_SHA1(flags) || IS_SHA256(flags))
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
-   else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
-IS_CBC(flags) || IS_CTR(flags))
+   else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
-   else if (IS_AES(flags) && IS_CCM(flags))
+   else if (IS_CCM(flags))
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
-   else if (IS_AES(flags) && IS_CMAC(flags))
+   else if (IS_CMAC(flags))
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
 
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
@@ -136,10 +137,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
if (IS_CCM(flags))
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
 
-   if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
-   IS_CMAC(flags))
-   cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
-
return cfg;
 }
 
@@ -171,7 +168,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
qce_clear_array(qce, REG_AUTH_KEY0, 16);
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
 
-   auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+   auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, 
digestsize);
}
 
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
@@ -199,7 +196,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
qce_write_array(qce, REG_AUTH_BYTECNT0,
(u32 *)rctx->byte_count, 2);
 
-   auth_cfg = qce_auth_cfg(rctx->flags, 0);
+   auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
 
if (rctx->last_blk)
auth_cfg |= BIT(AUTH_LAST_SHIFT);
-- 
2.25.1



[PATCH 4/7] crypto: qce: Add support for AEAD algorithms

2021-02-25 Thread Thara Gopinath
Introduce support to enable following algorithms in Qualcomm Crypto Engine.

- authenc(hmac(sha1),cbc(des))
- authenc(hmac(sha1),cbc(des3_ede))
- authenc(hmac(sha256),cbc(des))
- authenc(hmac(sha256),cbc(des3_ede))
- authenc(hmac(sha256),cbc(aes))
- ccm(aes)
- rfc4309(ccm(aes))

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/Kconfig  |  15 +
 drivers/crypto/qce/Makefile |   1 +
 drivers/crypto/qce/aead.c   | 779 
 drivers/crypto/qce/aead.h   |  53 +++
 drivers/crypto/qce/common.h |   2 +
 drivers/crypto/qce/core.c   |   4 +
 6 files changed, 854 insertions(+)
 create mode 100644 drivers/crypto/qce/aead.c
 create mode 100644 drivers/crypto/qce/aead.h

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e535f28a8028..8caf296acda4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -645,6 +645,12 @@ config CRYPTO_DEV_QCE_SHA
select CRYPTO_SHA1
select CRYPTO_SHA256
 
+config CRYPTO_DEV_QCE_AEAD
+   bool
+   depends on CRYPTO_DEV_QCE
+   select CRYPTO_AUTHENC
+   select CRYPTO_LIB_DES
+
 choice
prompt "Algorithms enabled for QCE acceleration"
default CRYPTO_DEV_QCE_ENABLE_ALL
@@ -665,6 +671,7 @@ choice
bool "All supported algorithms"
select CRYPTO_DEV_QCE_SKCIPHER
select CRYPTO_DEV_QCE_SHA
+   select CRYPTO_DEV_QCE_AEAD
help
  Enable all supported algorithms:
- AES (CBC, CTR, ECB, XTS)
@@ -690,6 +697,14 @@ choice
- SHA1, HMAC-SHA1
- SHA256, HMAC-SHA256
 
+   config CRYPTO_DEV_QCE_ENABLE_AEAD
+   bool "AEAD algorithms only"
+   select CRYPTO_DEV_QCE_AEAD
+   help
+ Enable AEAD algorithms only:
+   - authenc()
+   - ccm(aes)
+   - rfc4309(ccm(aes))
 endchoice
 
 config CRYPTO_DEV_QCE_SW_MAX_LEN
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 14ade8a7d664..2cf8984e1b85 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -6,3 +6,4 @@ qcrypto-objs := core.o \
 
 qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
 qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_AEAD) += aead.o
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
new file mode 100644
index ..b594c4bb2640
--- /dev/null
+++ b/drivers/crypto/qce/aead.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "aead.h"
+
+#define CCM_NONCE_ADATA_SHIFT  6
+#define CCM_NONCE_AUTHSIZE_SHIFT   3
+#define MAX_CCM_ADATA_HEADER_LEN6
+
+static LIST_HEAD(aead_algs);
+
+static void qce_aead_done(void *data)
+{
+   struct crypto_async_request *async_req = data;
+   struct aead_request *req = aead_request_cast(async_req);
+   struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+   struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+   struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+   struct qce_device *qce = tmpl->qce;
+   struct qce_result_dump *result_buf = qce->dma.result_buf;
+   enum dma_data_direction dir_src, dir_dst;
+   bool diff_dst;
+   int error;
+   u32 status;
+   unsigned int totallen;
+   unsigned char tag[SHA256_DIGEST_SIZE] = {0};
+   int ret = 0;
+
+   diff_dst = (req->src != req->dst) ? true : false;
+   dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+   dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+   error = qce_dma_terminate_all(>dma);
+   if (error)
+   dev_dbg(qce->dev, "aead dma termination error (%d)\n",
+   error);
+   if (diff_dst)
+   dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+
+   dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+   if (IS_CCM(rctx->flags)) {
+   if (req->assoclen) {
+   sg_free_table(>src_tbl);
+   if (diff_dst)
+   sg_free_table(>dst_tbl);
+   } else {
+   if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
+   sg_free_table(>dst_tbl);
+   }
+   } else {
+   sg_free_table(>dst_tbl);
+   }
+
+   error = qce_check_status(qce, );
+   if (error < 0 && (error != -EBADMSG))
+   dev_err(qce->dev, "aead operation error (%x)\n", stat

[PATCH 2/7] crypto: qce: common: Make result dump optional

2021-02-25 Thread Thara Gopinath
Qualcomm crypto engine allows for IV registers and status register
to be concatenated to the output. This option is enabled by setting the
RESULTS_DUMP field in GOPROC  register. This is useful for most of the
algorithms to either retrieve status of operation or in case of
authentication algorithms to retrieve the mac. But for ccm
algorithms, the mac is part of the output stream and not retrieved
from the IV registers, thus needing a separate buffer to retrieve it.
Make enabling RESULTS_DUMP field optional so that algorithms can choose
whether or not to enable the option.
Note that in this patch, the enabled algorithms always choose
RESULTS_DUMP to be enabled. But later with the introduction of ccm
algorithms, this changes.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7c3cb483749e..2485aa371d83 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -88,9 +88,12 @@ static void qce_setup_config(struct qce_device *qce)
qce_write(qce, REG_CONFIG, config);
 }
 
-static inline void qce_crypto_go(struct qce_device *qce)
+static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
 {
-   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+   if (result_dump)
+   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | 
BIT(RESULTS_DUMP_SHIFT));
+   else
+   qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
 }
 
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
@@ -219,7 +222,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request 
*async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
 
-   qce_crypto_go(qce);
+   qce_crypto_go(qce, true);
 
return 0;
 }
@@ -380,7 +383,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
 
-   qce_crypto_go(qce);
+   qce_crypto_go(qce, true);
 
return 0;
 }
-- 
2.25.1



[PATCH 1/7] crypto: qce: common: Add MAC failed error checking

2021-02-25 Thread Thara Gopinath
MAC_FAILED gets set in the status register if authenthication fails
for ccm algorithms(during decryption). Add support to catch and flag
this error.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dceb9579d87a..7c3cb483749e 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -403,7 +403,8 @@ int qce_start(struct crypto_async_request *async_req, u32 
type)
 }
 
 #define STATUS_ERRORS  \
-   (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+   (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) |   \
+BIT(HSD_ERR_SHIFT) | BIT(MAC_FAILED_SHIFT))
 
 int qce_check_status(struct qce_device *qce, u32 *status)
 {
@@ -417,8 +418,12 @@ int qce_check_status(struct qce_device *qce, u32 *status)
 * use result_status from result dump the result_status needs to be byte
 * swapped, since we set the device to little endian.
 */
-   if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
-   ret = -ENXIO;
+   if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) {
+   if (*status & BIT(MAC_FAILED_SHIFT))
+   ret = -EBADMSG;
+   else
+   ret = -ENXIO;
+   }
 
return ret;
 }
-- 
2.25.1



[PATCH 3/7] crypto: qce: Add mode for rfc4309

2021-02-25 Thread Thara Gopinath
rf4309 is the specification that uses aes ccm algorithms with IPsec
security packets. Add a submode to identify rfc4309 ccm(aes) algorithm
in the crypto driver.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.h | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 3bc244bcca2d..3ffe719b79e4 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -51,9 +51,11 @@
 #define QCE_MODE_CCM   BIT(12)
 #define QCE_MODE_MASK  GENMASK(12, 8)
 
+#define QCE_MODE_CCM_RFC4309   BIT(13)
+
 /* cipher encryption/decryption operations */
-#define QCE_ENCRYPTBIT(13)
-#define QCE_DECRYPTBIT(14)
+#define QCE_ENCRYPTBIT(14)
+#define QCE_DECRYPTBIT(15)
 
 #define IS_DES(flags)  (flags & QCE_ALG_DES)
 #define IS_3DES(flags) (flags & QCE_ALG_3DES)
@@ -73,6 +75,7 @@
 #define IS_CTR(mode)   (mode & QCE_MODE_CTR)
 #define IS_XTS(mode)   (mode & QCE_MODE_XTS)
 #define IS_CCM(mode)   (mode & QCE_MODE_CCM)
+#define IS_CCM_RFC4309(mode)   ((mode) & QCE_MODE_CCM_RFC4309)
 
 #define IS_ENCRYPT(dir)(dir & QCE_ENCRYPT)
 #define IS_DECRYPT(dir)(dir & QCE_DECRYPT)
-- 
2.25.1



[PATCH 0/7] Add support for AEAD algorithms in Qualcomm Crypto Engine driver

2021-02-25 Thread Thara Gopinath
Enable support for AEAD algorithms in Qualcomm CE driver.  The first three
patches in this series are cleanups and add a few missing pieces required
to add support for AEAD algorithms.  Patch 4 introduces supported AEAD
transformations on Qualcomm CE.  Patches 5 and 6 implements the h/w
infrastructure needed to enable and run the AEAD transformations on
Qualcomm CE.  Patch 7 adds support to queue fallback algorithms in case of
unsupported special inputs.

This series is dependant on https://lkml.org/lkml/2021/2/11/1052.

Thara Gopinath (7):
  crypto: qce: common: Add MAC failed error checking
  crypto: qce: common: Make result dump optional
  crypto: qce: Add mode for rfc4309
  crypto: qce: Add support for AEAD algorithms
  crypto: qce: common: Clean up qce_auth_cfg
  crypto: qce: common: Add support for AEAD algorithms
  crypto: qce: aead: Schedule fallback algorithm

 drivers/crypto/Kconfig  |  15 +
 drivers/crypto/qce/Makefile |   1 +
 drivers/crypto/qce/aead.c   | 817 
 drivers/crypto/qce/aead.h   |  56 +++
 drivers/crypto/qce/common.c | 198 -
 drivers/crypto/qce/common.h |   9 +-
 drivers/crypto/qce/core.c   |   4 +
 7 files changed, 1077 insertions(+), 23 deletions(-)
 create mode 100644 drivers/crypto/qce/aead.c
 create mode 100644 drivers/crypto/qce/aead.h

-- 
2.25.1



Re: [PATCH v7 00/11] Regression fixes/clean ups in the Qualcomm crypto engine driver

2021-02-24 Thread Thara Gopinath




On 2/11/21 3:01 PM, Thara Gopinath wrote:

This patch series is a result of running kernel crypto fuzz tests (by
enabling CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) on the transformations
currently supported via the Qualcomm crypto engine on sdm845.  The first
nine patches are fixes for various regressions found during testing. The
last two patches are minor clean ups of unused variable and parameters.


Hi Herbert,

This version has all the comments from you and rest of the community 
fixed. Do you think you can merge this ?




v6->v7:
- Fixed sparse warning in patch 4 as pointed out by Herbert Xu.
  This means the checking if any two keys are same for triple
  des algorithms has been reverted back to using conditional OR
  instead of using bitwise OR.
- Rebased to 5.11-rc7.

v5->v6:
- Return 0 for zero length messages instead of -EOPNOTSUPP in the
  cipher algorithms as pointed out by Eric Biggers.
- Remove the wrong TODO in patch 6 which implied that AES CBC can
  do partial block sizes when it is actually CTS mode that can as
  pointed out my Eric Biggers.

v4->v5:
- Fixed build warning/error in patch for wrong assignment of const
  pointer as reported by kernel test robot .
- Rebased to 5.11-rc6.
v3->v4:
- Fixed the bug where only two bytes of byte_count were getting
  saved and restored instead of all eight bytes. Thanks Bjorn for
  catching this.
- Split patch 3 "Fix regressions found during fuzz testing" into
  6 patches as requested by Bjorn.
- Dropped crypto from all subject headers.
- Rebased to 5.11-rc5
v2->v3:
 - Made the comparison between keys to check if any two keys are
   same for triple des algorithms constant-time as per
   Nym Seddon's suggestion.
 - Rebased to 5.11-rc4.
v1->v2:
 - Introduced custom struct qce_sha_saved_state to store and restore
   partial sha transformation.
 - Rebased to 5.11-rc3.

Thara Gopinath (11):
   crypto: qce: sha: Restore/save ahash state with custom struct in
 export/import
   crypto: qce: sha: Hold back a block of data to be transferred as part
 of final
   crypto: qce: skcipher: Return unsupported if key1 and key 2 are same
 for AES XTS algorithm
   crypto: qce: skcipher: Return unsupported if any three keys are same
 for DES3 algorithms
   crypto: qce: skcipher: Return error for zero length messages
   crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC
 algorithms)
   crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)
*** BLURB HERE ***

Thara Gopinath (11):
   crypto: qce: sha: Restore/save ahash state with custom struct in
 export/import
   crypto: qce: sha: Hold back a block of data to be transferred as part
 of final
   crypto: qce: skcipher: Return unsupported if key1 and key 2 are same
 for AES XTS algorithm
   crypto: qce: skcipher: Return unsupported if any three keys are same
 for DES3 algorithms
   crypto: qce: skcipher: Return error for zero length messages
   crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC
 algorithms)
   crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)
   crypto: qce: skcipher: Improve the conditions for requesting AES
 fallback cipher
   crypto: qce: common: Set data unit size to message length for AES XTS
 transformation
   crypto: qce: Remover src_tbl from qce_cipher_reqctx
   crypto: qce: Remove totallen and offset in qce_start

  drivers/crypto/qce/cipher.h   |   1 -
  drivers/crypto/qce/common.c   |  25 +++---
  drivers/crypto/qce/common.h   |   3 +-
  drivers/crypto/qce/sha.c  | 143 +-
  drivers/crypto/qce/skcipher.c |  69 +---
  5 files changed, 126 insertions(+), 115 deletions(-)



--
Warm Regards
Thara


Re: [PATCH] cpufreq: exclude boost frequencies from valid count if not enabled

2021-02-18 Thread Thara Gopinath




On 2/18/21 3:48 AM, Viresh Kumar wrote:

On 17-02-21, 10:32, Thara Gopinath wrote:

First of all, I am still unable to find this setting in the sysfs space.


The driver needs to call cpufreq_enable_boost_support() for that.


Ok. that makes sense.




Irrespective the ideal behavior here will be to change the cpufreq cooling
dev max state when this happens.


Hmm.. recreating it every time boost frequency is enabled is like
inviting trouble and it will be tricky. Maybe it can be done, I don't
know.:)


Scheduling a notifier for max frequency change from the qos framework 
should do the work, right?






--
Warm Regards
Thara


Re: [PATCH] thermal: cpufreq_cooling: freq_qos_update_request() returns < 0 on error

2021-02-17 Thread Thara Gopinath




On 2/17/21 12:48 AM, Viresh Kumar wrote:

freq_qos_update_request() returns 1 if the effective constraint value
has changed, 0 if the effective constraint value has not changed, or a
negative error code on failures.

The frequency constraints for CPUs can be set by different parts of the
kernel. If the maximum frequency constraint set by other parts of the
kernel are set at a lower value than the one corresponding to cooling
state 0, then we will never be able to cool down the system as
freq_qos_update_request() will keep on returning 0 and we will skip
updating cpufreq_state and thermal pressure.

Fix that by doing the updates even in the case where
freq_qos_update_request() returns 0, as we have effectively set the
constraint to a new value even if the consolidated value of the
actual constraint is unchanged because of external factors.

Cc: v5.7+  # v5.7+
Reported-by: Thara Gopinath 
Fixes: f12e4f66ab6a ("thermal/cpu-cooling: Update thermal pressure in case of a 
maximum frequency capping")
Signed-off-by: Viresh Kumar 
---
Hi Guys,

This needs to go in 5.12-rc.

Thara, please give this a try and give your tested-by :).


It fixes the thermal runaway issue on sdm845 that I had reported. So,

Tested-by: Thara Gopinath



  drivers/thermal/cpufreq_cooling.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/thermal/cpufreq_cooling.c 
b/drivers/thermal/cpufreq_cooling.c
index f5af2571f9b7..10af3341e5ea 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -485,7 +485,7 @@ static int cpufreq_set_cur_state(struct 
thermal_cooling_device *cdev,
frequency = get_state_freq(cpufreq_cdev, state);
  
  	ret = freq_qos_update_request(_cdev->qos_req, frequency);

-   if (ret > 0) {
+   if (ret >= 0) {
cpufreq_cdev->cpufreq_state = state;
cpus = cpufreq_cdev->policy->cpus;
max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));



--
Warm Regards
Thara


Re: [PATCH] cpufreq: exclude boost frequencies from valid count if not enabled

2021-02-17 Thread Thara Gopinath




On 2/17/21 12:50 AM, Viresh Kumar wrote:

Hi Thara,

On 16-02-21, 19:00, Thara Gopinath wrote:

This is a fix for a regression observed on db845 platforms with 5.7-rc11
kernel.  On these platforms running stress tests with 5.11-rc7 kernel
causes big cpus to overheat and ultimately shutdown the system due to
hitting critical temperature (thermal throttling does not happen and
cur_state of cpufreq cooling device for big cpus remain stuck at 0 or max
frequency).

This platform has boost opp defined for big cpus but boost mode itself is
disabled in the cpufreq driver. Hence the initial max frequency request
from cpufreq cooling device(cur_state) for big cpus is for boost
frequency(2803200) where as initial max frequency request from cpufreq
driver itself is for the highest non boost frequency (2649600).


Okay.


qos
framework collates these two requests and puts the max frequency of big
cpus to 2649600 which the thermal framework is unaware of.


It doesn't need to be aware of that. It sets its max frequency and other
frameworks can put their own requests and the lowest one wins. In this case the
other constraint came from cpufreq-core, which is fine.


Yes. the qos behavior is correct here.




Now during an
over heat event, with step-wise policy governor, thermal framework tries to
throttle the cpu and places a restriction on max frequency of the cpu to
cur_state - 1


Actually it is cur_state + 1 as the values are inversed here, cooling state 0
refers to highest frequency :)


yes. it does indeed!




which in this case 2649600. qos framework in turn tells the
cpufreq cooling device that max frequency of the cpu is already at 2649600
and the cooling device driver returns doing nothing(cur_state of the
cooling device remains unchanged).


And that's where the bug lies, I have sent proper fix for that now.


Like I mention below there are multiple possible fixes for this issue!
More on mismatch of frequencies below.



Thus thermal remains stuck in a loop and
never manages to actually throttle the cpu frequency. This ultimately leads
to system shutdown in case of a thermal overheat event on big cpus.
  

There are multiple possible fixes for this issue. Fundamentally,it is wrong
for cpufreq driver and cpufreq cooling device driver to show different
maximum possible state/frequency for a cpu.


Not actually, cpufreq core changes the max supported frequency at runtime based
on the availability of boost frequencies.


First of all, I am still unable to find this setting in the sysfs space.
Irrespective the ideal behavior here will be to change the cpufreq 
cooling dev max state when this happens. I say this for two reasons
1. The cooling device max state will reflect the correct highest 
frequency as reported by cpufreq core. These are interfaces exposed to

user space and they should not be showing two different things.
2. More importantly, thermal will not waste valuable cycles attempting 
to throttle down from an non-existing high frequency. In the case of 
sdm845 we have only one boost opp in the opp table and hence the first 
time thermal tries to throttle via the cpufreq cooling device(with the 
step policy governor), it will return back saying that the state is 
already achieved and then will retry again because overheating has not 
stopped. But let us a platform has 5 such opps in the table and boost 
mode not enabled. cpufreq cooling device will have to attempt 5 times 
before any actual cooling action happens.




cpufreq_table_count_valid_entries() is used at different places and it is
implemented correctly.


It is used in one other place which is for statistics count. Boost 
statistics need not be considered if boost mode is not enabled. And like 
I mentioned before as in the case of cpufreq cooling device correct 
behavior will be to reflect this as and when boost is enabled. But then 
again for statistics purpose it is not much of an issue if the entry 
itself is present with the count showing 0 if boost modes are not 
enabled. In this case, we should have another api or cpufreq cooling 
device not use cpufreq_table_count_valid_entries to get the max state.






--
Warm Regards
Thara


[PATCH] cpufreq: exclude boost frequencies from valid count if not enabled

2021-02-16 Thread Thara Gopinath
This is a fix for a regression observed on db845 platforms with 5.7-rc11
kernel.  On these platforms running stress tests with 5.11-rc7 kernel
causes big cpus to overheat and ultimately shutdown the system due to
hitting critical temperature (thermal throttling does not happen and
cur_state of cpufreq cooling device for big cpus remain stuck at 0 or max
frequency).

This platform has boost opp defined for big cpus but boost mode itself is
disabled in the cpufreq driver. Hence the initial max frequency request
from cpufreq cooling device(cur_state) for big cpus is for boost
frequency(2803200) where as initial max frequency request from cpufreq
driver itself is for the highest non boost frequency (2649600). qos
framework collates these two requests and puts the max frequency of big
cpus to 2649600 which the thermal framework is unaware of. Now during an
over heat event, with step-wise policy governor, thermal framework tries to
throttle the cpu and places a restriction on max frequency of the cpu to
cur_state - 1 which in this case 2649600. qos framework in turn tells the
cpufreq cooling device that max frequency of the cpu is already at 2649600
and the cooling device driver returns doing nothing(cur_state of the
cooling device remains unchanged). Thus thermal remains stuck in a loop and
never manages to actually throttle the cpu frequency. This ultimately leads
to system shutdown in case of a thermal overheat event on big cpus.

There are multiple possible fixes for this issue. Fundamentally,it is wrong
for cpufreq driver and cpufreq cooling device driver to show different
maximum possible state/frequency for a cpu. Hence fix this issue by
ensuring that the max state of cpufreq cooling device is in sync with the
maximum frequency of the cpu in cpufreq driver.
cpufreq_table_count_valid_entries is used to retrieve max level/max
frequency of a cpu by cpufreq_cooling_device during initialization. Add
check in this api to ignore boost frequencies if boost mode is not enabled
thus keeping the max state of cpufreq cooling device in sync with the
maximum frequency of the cpu in cpufreq driver.
cpufreq_frequency_table_cpuinfo that calculates the maximum frequency of a
cpu for cpufreq driver already has such a check in place.

Signed-off-by: Thara Gopinath 
---
 include/linux/cpufreq.h | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 9c8b7437b6cd..fe52892e0812 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1006,8 +1006,11 @@ static inline int 
cpufreq_table_count_valid_entries(const struct cpufreq_policy
if (unlikely(!policy->freq_table))
return 0;
 
-   cpufreq_for_each_valid_entry(pos, policy->freq_table)
+   cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+   if (!cpufreq_boost_enabled() && (pos->flags & 
CPUFREQ_BOOST_FREQ))
+   continue;
count++;
+   }
 
return count;
 }
-- 
2.25.1



[PATCH v7 11/11] crypto: qce: Remove totallen and offset in qce_start

2021-02-11 Thread Thara Gopinath
totallen is used to get the size of the data to be transformed.
This is also available via nbytes or cryptlen in the qce_sha_reqctx
and qce_cipher_ctx. Similarly offset convey nothing for the supported
encryption and authentication transformations and is always 0.
Remove these two redundant parameters in qce_start.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c   | 17 +++--
 drivers/crypto/qce/common.h   |  3 +--
 drivers/crypto/qce/sha.c  |  2 +-
 drivers/crypto/qce/skcipher.c |  2 +-
 4 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index f7bc701a4aa2..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
 }
 
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
-   u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -306,8 +305,7 @@ static void qce_xtskey(struct qce_device *qce, const u8 
*enckey,
qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
 {
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
 
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
-   qce_write(qce, REG_ENCR_SEG_START, offset & 0x);
+   qce_write(qce, REG_ENCR_SEG_START, 0);
 
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
 
-   qce_write(qce, REG_SEG_SIZE, totallen);
+   qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
 
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
 }
 #endif
 
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
 {
switch (type) {
 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
-   return qce_setup_regs_skcipher(async_req, totallen, offset);
+   return qce_setup_regs_skcipher(async_req);
 #endif
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
-   return qce_setup_regs_ahash(async_req, totallen, offset);
+   return qce_setup_regs_ahash(async_req);
 #endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
 void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
 int qce_check_status(struct qce_device *qce, u32 *status);
 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 
*step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
 
 #endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 2813c9a27a6e..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -113,7 +113,7 @@ static int qce_ahash_async_req_handle(struct 
crypto_async_request *async_req)
 
qce_dma_issue_pending(>dma);
 
-   ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+   ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
 
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 2e6ab1d33a31..c0a0d8c4fce1 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -144,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request 
*async_req)
 
qce_dma_issue_pending(>dma);
 
-   ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+   ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
 
-- 
2.25.1



[PATCH v7 10/11] crypto: qce: Remover src_tbl from qce_cipher_reqctx

2021-02-11 Thread Thara Gopinath
src_table is unused and hence remove it from struct qce_cipher_reqctx

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/cipher.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
-   struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req;   // keep at the end
-- 
2.25.1



[PATCH v7 06/11] crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC algorithms)

2021-02-11 Thread Thara Gopinath
ECB/CBC encryption/decryption requires the data to be blocksize aligned.
Crypto engine hangs on non-block sized operations for these algorithms.
Return invalid data if data size is not blocksize aligned for these
algorithms.

Signed-off-by: Thara Gopinath 
---

v5->v6:
- Remove the wrong TODO which implied that AES CBC can do partial
  block sizes when it is actually CTS mode that can as pointed
  out by Eric Biggers.

 drivers/crypto/qce/skcipher.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 6b3dc3a9797c..c2f0469ffb22 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -254,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, 
int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+   unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
 
@@ -265,6 +266,14 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
if (!req->cryptlen)
return 0;
 
+   /*
+* ECB and CBC algorithms require message lengths to be
+* multiples of block size.
+*/
+   if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+   if (!IS_ALIGNED(req->cryptlen, blocksize))
+   return -EINVAL;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v7 09/11] crypto: qce: common: Set data unit size to message length for AES XTS transformation

2021-02-11 Thread Thara Gopinath
Set the register REG_ENCR_XTS_DU_SIZE to cryptlen for AES XTS
transformation. Anything else causes the engine to return back
wrong results.

Acked-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..f7bc701a4aa2 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -295,15 +295,15 @@ static void qce_xtskey(struct qce_device *qce, const u8 
*enckey,
 {
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
-   unsigned int xtsdusize;
 
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
   enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
 
-   /* xts du size 512B */
-   xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
-   qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+   /* Set data unit size to cryptlen. Anything else causes
+* crypto engine to return back incorrect results.
+*/
+   qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-- 
2.25.1



[PATCH v7 05/11] crypto: qce: skcipher: Return error for zero length messages

2021-02-11 Thread Thara Gopinath
Crypto engine BAM dma does not support 0 length data. Return unsupported
if zero length messages are passed for transformation.

Signed-off-by: Thara Gopinath 
---

v5->v6:
- Return 0 for zero length messages instead of -EOPNOTSUPP in the
  cipher algorithms as pointed out by Eric Biggers.

 drivers/crypto/qce/skcipher.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 8aeb741ca5a3..6b3dc3a9797c 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -260,6 +261,10 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
 
+   /* CE does not handle 0 length messages */
+   if (!req->cryptlen)
+   return 0;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v7 03/11] crypto: qce: skcipher: Return unsupported if key1 and key 2 are same for AES XTS algorithm

2021-02-11 Thread Thara Gopinath
Crypto engine does not support key1 = key2 for AES XTS algorithm; the
operation hangs the engines.  Return -EINVAL in case key1 and key2 are the
same.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 19 ++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..12955dcd53dd 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -167,16 +167,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher 
*ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+   unsigned int __keylen;
int ret;
 
if (!key || !keylen)
return -EINVAL;
 
-   switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+   /*
+* AES XTS key1 = key2 not supported by crypto engine.
+* Revisit to request a fallback cipher in this case.
+*/
+   if (IS_XTS(flags)) {
+   __keylen = keylen >> 1;
+   if (!memcmp(key, key + __keylen, __keylen))
+   return -ENOKEY;
+   } else {
+   __keylen = keylen;
+   }
+
+   switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+   case AES_KEYSIZE_192:
+   break;
+   default:
+   return -EINVAL;
}
 
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-- 
2.25.1



[PATCH v7 04/11] crypto: qce: skcipher: Return unsupported if any three keys are same for DES3 algorithms

2021-02-11 Thread Thara Gopinath
Return unsupported if any three keys are same for DES3 algorithms
since CE does not support this and the operation causes the engine to
hang.

Signed-off-by: Thara Gopinath 
---

v6->v7:
- Fixed sparse warning in patch 4 as pointed out by Herbert Xu.
  This means the checking if any two keys are same for triple
  des algorithms has been reverted back to using conditional OR
  instead of using bitwise OR.

 drivers/crypto/qce/skcipher.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 12955dcd53dd..8aeb741ca5a3 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -221,12 +221,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, 
const u8 *key,
   unsigned int keylen)
 {
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+   u32 _key[6];
int err;
 
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
 
+   /*
+* The crypto engine does not support any two keys
+* being the same for triple des algorithms. The
+* verify_skcipher_des3_key does not check for all the
+* below conditions. Return -ENOKEY in case any two keys
+* are the same. Revisit to see if a fallback cipher
+* is needed to handle this condition.
+*/
+   memcpy(_key, key, DES3_EDE_KEY_SIZE);
+   if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+   !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+   !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+   return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
-- 
2.25.1



[PATCH v7 08/11] crypto: qce: skcipher: Improve the conditions for requesting AES fallback cipher

2021-02-11 Thread Thara Gopinath
The following are the conditions for requesting AES fallback cipher.
-  AES-192
- AES-XTS request with len <= 512 byte (Allow messages of length
  less than 512 bytes for all other AES encryption algorithms other
  than AES XTS)
- AES-XTS request with len > QCE_SECTOR_SIZE and is not a multiple
  of it

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 11a2a30631af..2e6ab1d33a31 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -274,14 +274,19 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
if (!IS_ALIGNED(req->cryptlen, blocksize))
return -EINVAL;
 
-   /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
-* is not a multiple of it; pass such requests to the fallback
+   /*
+* Conditions for requesting a fallback cipher
+* AES-192 (not supported by crypto engine (CE))
+* AES-XTS request with len <= 512 byte (not recommended to use CE)
+* AES-XTS request with len > QCE_SECTOR_SIZE and
+* is not a multiple of it.(Revisit this condition to check if it is
+* needed in all versions of CE)
 */
if (IS_AES(rctx->flags) &&
-   (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
-(IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+   ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+   (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+   (req->cryptlen > QCE_SECTOR_SIZE &&
+   req->cryptlen % QCE_SECTOR_SIZE) {
skcipher_request_set_tfm(>fallback_req, ctx->fallback);
skcipher_request_set_callback(>fallback_req,
  req->base.flags,
-- 
2.25.1



[PATCH v7 02/11] crypto: qce: sha: Hold back a block of data to be transferred as part of final

2021-02-11 Thread Thara Gopinath
If the available data to transfer is exactly a multiple of block size, save
the last block to be transferred in qce_ahash_final (with the last block
bit set) if this is indeed the end of data stream. If not this saved block
will be transferred as part of next update. If this block is not held back
and if this is indeed the end of data stream, the digest obtained will be
wrong since qce_ahash_final will see that rctx->buflen is 0 and return
doing nothing which in turn means that a digest will not be copied to the
destination result buffer.  qce_ahash_final cannot be made to alter this
behavior and allowed to proceed if rctx->buflen is 0 because the crypto
engine BAM does not allow for zero length transfers.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/sha.c | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 7da562dca740..2813c9a27a6e 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -216,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
 
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+   /*
+* At this point, there is more than one block size of data.  If
+* the available data to transfer is exactly a multiple of block
+* size, save the last block to be transferred in qce_ahash_final
+* (with the last block bit set) if this is indeed the end of data
+* stream. If not this saved block will be transferred as part of
+* next update. If this block is not held back and if this is
+* indeed the end of data stream, the digest obtained will be wrong
+* since qce_ahash_final will see that rctx->buflen is 0 and return
+* doing nothing which in turn means that a digest will not be
+* copied to the destination result buffer.  qce_ahash_final cannot
+* be made to alter this behavior and allowed to proceed if
+* rctx->buflen is 0 because the crypto engine BAM does not allow
+* for zero length transfers.
+*/
+   if (!hash_later)
+   hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
-- 
2.25.1



[PATCH v7 01/11] crypto: qce: sha: Restore/save ahash state with custom struct in export/import

2021-02-11 Thread Thara Gopinath
Export and import interfaces save and restore partial transformation
states. The partial states were being stored and restored in struct
sha1_state for sha1/hmac(sha1) transformations and sha256_state for
sha256/hmac(sha256) transformations.This led to a bunch of corner cases
where improper state was being stored and restored. A few of the corner
cases that turned up during testing are:

- wrong byte_count restored if export/import is called twice without h/w
transaction in between
- wrong buflen restored back if the pending buffer
length is exactly the block size.
- wrong state restored if buffer length is 0.

To fix these issues, save and restore the partial transformation state
using the newly introduced qce_sha_saved_state struct. This ensures that
all the pieces required to properly restart the transformation is captured
and restored back

Signed-off-by: Thara Gopinath 
---

v4->v5:
- Fixed build warning/error in patch for wrong assignment of const
  pointer as reported by kernel test robot .
v1->v2:
- Introduced custom struct qce_sha_saved_state to store and
  restore partial sha transformation. v1 was re-using
  qce_sha_reqctx to save and restore partial states and this
  could lead to potential memcpy issues around pointer copying.


 drivers/crypto/qce/sha.c | 122 +++
 1 file changed, 34 insertions(+), 88 deletions(-)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..7da562dca740 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
 #include "core.h"
 #include "sha.h"
 
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING64
-#define SHA_PADDING_MASK   (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+   u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+   u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+   __be32 byte_count[2];
+   unsigned int pending_buflen;
+   unsigned int flags;
+   u64 count;
+   bool first_blk;
+};
 
 static LIST_HEAD(ahash_algs);
 
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
 
 static int qce_ahash_export(struct ahash_request *req, void *out)
 {
-   struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-   unsigned long flags = rctx->flags;
-   unsigned int digestsize = crypto_ahash_digestsize(ahash);
-   unsigned int blocksize =
-   crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
-   if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
-   struct sha1_state *out_state = out;
-
-   out_state->count = rctx->count;
-   qce_cpu_to_be32p_array((__be32 *)out_state->state,
-  rctx->digest, digestsize);
-   memcpy(out_state->buffer, rctx->buf, blocksize);
-   } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
-   struct sha256_state *out_state = out;
-
-   out_state->count = rctx->count;
-   qce_cpu_to_be32p_array((__be32 *)out_state->state,
-  rctx->digest, digestsize);
-   memcpy(out_state->buf, rctx->buf, blocksize);
-   } else {
-   return -EINVAL;
-   }
-
-   return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
-const u32 *state, const u8 *buffer, bool hmac)
-{
-   struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-   struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-   unsigned int digestsize = crypto_ahash_digestsize(ahash);
-   unsigned int blocksize;
-   u64 count = in_count;
-
-   blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-   rctx->count = in_count;
-   memcpy(rctx->buf, buffer, blocksize);
-
-   if (in_count <= blocksize) {
-   rctx->first_blk = 1;
-   } else {
-   rctx->first_blk = 0;
-   /*
-* For HMAC, there is a hardware padding done when first block
-* is set. Therefore the byte_count must be incremened by 64
-* after the first block operation.
-*/
-   if (hmac)
-   count += SHA_PADDING;
-   }
+   struct qce_sha_saved_state *export_state = out;
 
-   rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
-   rctx->byte_count[1] = (__force __be32)(count >> 32);
-   qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
-  digestsize);
-   rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+   memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+   memcpy(export_st

[PATCH v7 07/11] crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)

2021-02-11 Thread Thara Gopinath
ECB transformations do not have an IV and hence set the ivsize to 0 for
ecb(aes).

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index c2f0469ffb22..11a2a30631af 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -353,7 +353,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name   = "ecb(aes)",
.drv_name   = "ecb-aes-qce",
.blocksize  = AES_BLOCK_SIZE,
-   .ivsize = AES_BLOCK_SIZE,
+   .ivsize = 0,
.min_keysize= AES_MIN_KEY_SIZE,
.max_keysize= AES_MAX_KEY_SIZE,
},
-- 
2.25.1



[PATCH v7 00/11] Regression fixes/clean ups in the Qualcomm crypto engine driver

2021-02-11 Thread Thara Gopinath
This patch series is a result of running kernel crypto fuzz tests (by
enabling CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) on the transformations
currently supported via the Qualcomm crypto engine on sdm845.  The first
nine patches are fixes for various regressions found during testing. The
last two patches are minor clean ups of unused variable and parameters.

v6->v7:
- Fixed sparse warning in patch 4 as pointed out by Herbert Xu.
  This means the checking if any two keys are same for triple
  des algorithms has been reverted back to using conditional OR
  instead of using bitwise OR.
- Rebased to 5.11-rc7.

v5->v6:
- Return 0 for zero length messages instead of -EOPNOTSUPP in the
  cipher algorithms as pointed out by Eric Biggers.
- Remove the wrong TODO in patch 6 which implied that AES CBC can
  do partial block sizes when it is actually CTS mode that can as
  pointed out my Eric Biggers.

v4->v5:
- Fixed build warning/error in patch for wrong assignment of const
  pointer as reported by kernel test robot .
- Rebased to 5.11-rc6.
v3->v4:
- Fixed the bug where only two bytes of byte_count were getting
  saved and restored instead of all eight bytes. Thanks Bjorn for
  catching this.
- Split patch 3 "Fix regressions found during fuzz testing" into
  6 patches as requested by Bjorn.
- Dropped crypto from all subject headers.
- Rebased to 5.11-rc5
v2->v3:
- Made the comparison between keys to check if any two keys are
  same for triple des algorithms constant-time as per
  Nym Seddon's suggestion.
- Rebased to 5.11-rc4.
v1->v2:
- Introduced custom struct qce_sha_saved_state to store and restore
  partial sha transformation.
- Rebased to 5.11-rc3.

Thara Gopinath (11):
  crypto: qce: sha: Restore/save ahash state with custom struct in
export/import
  crypto: qce: sha: Hold back a block of data to be transferred as part
of final
  crypto: qce: skcipher: Return unsupported if key1 and key 2 are same
for AES XTS algorithm
  crypto: qce: skcipher: Return unsupported if any three keys are same
for DES3 algorithms
  crypto: qce: skcipher: Return error for zero length messages
  crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC
algorithms)
  crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)
*** BLURB HERE ***

Thara Gopinath (11):
  crypto: qce: sha: Restore/save ahash state with custom struct in
export/import
  crypto: qce: sha: Hold back a block of data to be transferred as part
of final
  crypto: qce: skcipher: Return unsupported if key1 and key 2 are same
for AES XTS algorithm
  crypto: qce: skcipher: Return unsupported if any three keys are same
for DES3 algorithms
  crypto: qce: skcipher: Return error for zero length messages
  crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC
algorithms)
  crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)
  crypto: qce: skcipher: Improve the conditions for requesting AES
fallback cipher
  crypto: qce: common: Set data unit size to message length for AES XTS
transformation
  crypto: qce: Remover src_tbl from qce_cipher_reqctx
  crypto: qce: Remove totallen and offset in qce_start

 drivers/crypto/qce/cipher.h   |   1 -
 drivers/crypto/qce/common.c   |  25 +++---
 drivers/crypto/qce/common.h   |   3 +-
 drivers/crypto/qce/sha.c  | 143 +-
 drivers/crypto/qce/skcipher.c |  69 +---
 5 files changed, 126 insertions(+), 115 deletions(-)

-- 
2.25.1



[PATCH v6 11/11] crypto: qce: Remove totallen and offset in qce_start

2021-02-07 Thread Thara Gopinath
totallen is used to get the size of the data to be transformed.
This is also available via nbytes or cryptlen in the qce_sha_reqctx
and qce_cipher_ctx. Similarly offset convey nothing for the supported
encryption and authentication transformations and is always 0.
Remove these two redundant parameters in qce_start.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c   | 17 +++--
 drivers/crypto/qce/common.h   |  3 +--
 drivers/crypto/qce/sha.c  |  2 +-
 drivers/crypto/qce/skcipher.c |  2 +-
 4 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index f7bc701a4aa2..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
 }
 
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
-   u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -306,8 +305,7 @@ static void qce_xtskey(struct qce_device *qce, const u8 
*enckey,
qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
 {
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
 
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
-   qce_write(qce, REG_ENCR_SEG_START, offset & 0x);
+   qce_write(qce, REG_ENCR_SEG_START, 0);
 
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
 
-   qce_write(qce, REG_SEG_SIZE, totallen);
+   qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
 
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
 }
 #endif
 
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
 {
switch (type) {
 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
-   return qce_setup_regs_skcipher(async_req, totallen, offset);
+   return qce_setup_regs_skcipher(async_req);
 #endif
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
-   return qce_setup_regs_ahash(async_req, totallen, offset);
+   return qce_setup_regs_ahash(async_req);
 #endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
 void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
 int qce_check_status(struct qce_device *qce, u32 *status);
 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 
*step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
 
 #endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 2813c9a27a6e..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -113,7 +113,7 @@ static int qce_ahash_async_req_handle(struct 
crypto_async_request *async_req)
 
qce_dma_issue_pending(>dma);
 
-   ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+   ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
 
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 66a226d1205e..9fec5bdc29ec 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -144,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request 
*async_req)
 
qce_dma_issue_pending(>dma);
 
-   ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+   ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
 
-- 
2.25.1



[PATCH v6 09/11] crypto: qce: common: Set data unit size to message length for AES XTS transformation

2021-02-07 Thread Thara Gopinath
Set the register REG_ENCR_XTS_DU_SIZE to cryptlen for AES XTS
transformation. Anything else causes the engine to return back
wrong results.

Acked-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..f7bc701a4aa2 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -295,15 +295,15 @@ static void qce_xtskey(struct qce_device *qce, const u8 
*enckey,
 {
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
-   unsigned int xtsdusize;
 
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
   enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
 
-   /* xts du size 512B */
-   xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
-   qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+   /* Set data unit size to cryptlen. Anything else causes
+* crypto engine to return back incorrect results.
+*/
+   qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-- 
2.25.1



[PATCH v6 10/11] crypto: qce: Remover src_tbl from qce_cipher_reqctx

2021-02-07 Thread Thara Gopinath
src_table is unused and hence remove it from struct qce_cipher_reqctx

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/cipher.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
-   struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req;   // keep at the end
-- 
2.25.1



[PATCH v6 06/11] crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC algorithms)

2021-02-07 Thread Thara Gopinath
ECB/CBC encryption/decryption requires the data to be blocksize aligned.
Crypto engine hangs on non-block sized operations for these algorithms.
Return invalid data if data size is not blocksize aligned for these
algorithms.

Signed-off-by: Thara Gopinath 
---

v5->v6:
- Remove the wrong TODO which implied that AES CBC can do partial
  block sizes when it is actually CTS mode that can as pointed
  out by Eric Biggers.

 drivers/crypto/qce/skcipher.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 96af40c2c8f3..d24d96ed5be9 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -254,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, 
int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+   unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
 
@@ -265,6 +266,14 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
if (!req->cryptlen)
return 0;
 
+   /*
+* ECB and CBC algorithms require message lengths to be
+* multiples of block size.
+*/
+   if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+   if (!IS_ALIGNED(req->cryptlen, blocksize))
+   return -EINVAL;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v6 08/11] crypto: qce: skcipher: Improve the conditions for requesting AES fallback cipher

2021-02-07 Thread Thara Gopinath
The following are the conditions for requesting AES fallback cipher.
-  AES-192
- AES-XTS request with len <= 512 byte (Allow messages of length
  less than 512 bytes for all other AES encryption algorithms other
  than AES XTS)
- AES-XTS request with len > QCE_SECTOR_SIZE and is not a multiple
  of it

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 64f661d7335f..66a226d1205e 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -274,14 +274,19 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
if (!IS_ALIGNED(req->cryptlen, blocksize))
return -EINVAL;
 
-   /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
-* is not a multiple of it; pass such requests to the fallback
+   /*
+* Conditions for requesting a fallback cipher
+* AES-192 (not supported by crypto engine (CE))
+* AES-XTS request with len <= 512 byte (not recommended to use CE)
+* AES-XTS request with len > QCE_SECTOR_SIZE and
+* is not a multiple of it.(Revisit this condition to check if it is
+* needed in all versions of CE)
 */
if (IS_AES(rctx->flags) &&
-   (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
-(IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+   ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+   (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+   (req->cryptlen > QCE_SECTOR_SIZE &&
+   req->cryptlen % QCE_SECTOR_SIZE) {
skcipher_request_set_tfm(>fallback_req, ctx->fallback);
skcipher_request_set_callback(>fallback_req,
  req->base.flags,
-- 
2.25.1



[PATCH v6 07/11] crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)

2021-02-07 Thread Thara Gopinath
ECB transformations do not have an IV and hence set the ivsize to 0 for
ecb(aes).

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index d24d96ed5be9..64f661d7335f 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -353,7 +353,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name   = "ecb(aes)",
.drv_name   = "ecb-aes-qce",
.blocksize  = AES_BLOCK_SIZE,
-   .ivsize = AES_BLOCK_SIZE,
+   .ivsize = 0,
.min_keysize= AES_MIN_KEY_SIZE,
.max_keysize= AES_MAX_KEY_SIZE,
},
-- 
2.25.1



[PATCH v6 05/11] crypto: qce: skcipher: Return error for zero length messages

2021-02-07 Thread Thara Gopinath
Crypto engine BAM dma does not support 0 length data. Return unsupported
if zero length messages are passed for transformation.

Signed-off-by: Thara Gopinath 
---

v5->v6:
- Return 0 for zero length messages instead of -EOPNOTSUPP in the
  cipher algorithms as pointed out by Eric Biggers.

 drivers/crypto/qce/skcipher.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index de1f37ed4ee6..96af40c2c8f3 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -260,6 +261,10 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
 
+   /* CE does not handle 0 length messages */
+   if (!req->cryptlen)
+   return 0;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v6 04/11] crypto: qce: skcipher: Return unsupported if any three keys are same for DES3 algorithms

2021-02-07 Thread Thara Gopinath
Return unsupported if any three keys are same for DES3 algorithms
since CE does not support this and the operation causes the engine to
hang.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 12955dcd53dd..de1f37ed4ee6 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -221,12 +221,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, 
const u8 *key,
   unsigned int keylen)
 {
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+   u32 _key[6];
int err;
 
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
 
+   /*
+* The crypto engine does not support any two keys
+* being the same for triple des algorithms. The
+* verify_skcipher_des3_key does not check for all the
+* below conditions. Return -ENOKEY in case any two keys
+* are the same. Revisit to see if a fallback cipher
+* is needed to handle this condition.
+*/
+   memcpy(_key, key, DES3_EDE_KEY_SIZE);
+   if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) |
+   !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) |
+   !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+   return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
-- 
2.25.1



[PATCH v6 03/11] crypto: qce: skcipher: Return unsupported if key1 and key 2 are same for AES XTS algorithm

2021-02-07 Thread Thara Gopinath
Crypto engine does not support key1 = key2 for AES XTS algorithm; the
operation hangs the engines.  Return -EINVAL in case key1 and key2 are the
same.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 19 ++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..12955dcd53dd 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -167,16 +167,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher 
*ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+   unsigned int __keylen;
int ret;
 
if (!key || !keylen)
return -EINVAL;
 
-   switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+   /*
+* AES XTS key1 = key2 not supported by crypto engine.
+* Revisit to request a fallback cipher in this case.
+*/
+   if (IS_XTS(flags)) {
+   __keylen = keylen >> 1;
+   if (!memcmp(key, key + __keylen, __keylen))
+   return -ENOKEY;
+   } else {
+   __keylen = keylen;
+   }
+
+   switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+   case AES_KEYSIZE_192:
+   break;
+   default:
+   return -EINVAL;
}
 
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-- 
2.25.1



[PATCH v6 01/11] crypto: qce: sha: Restore/save ahash state with custom struct in export/import

2021-02-07 Thread Thara Gopinath
Export and import interfaces save and restore partial transformation
states. The partial states were being stored and restored in struct
sha1_state for sha1/hmac(sha1) transformations and sha256_state for
sha256/hmac(sha256) transformations.This led to a bunch of corner cases
where improper state was being stored and restored. A few of the corner
cases that turned up during testing are:

- wrong byte_count restored if export/import is called twice without h/w
transaction in between
- wrong buflen restored back if the pending buffer
length is exactly the block size.
- wrong state restored if buffer length is 0.

To fix these issues, save and restore the partial transformation state
using the newly introduced qce_sha_saved_state struct. This ensures that
all the pieces required to properly restart the transformation is captured
and restored back

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/sha.c | 122 +++
 1 file changed, 34 insertions(+), 88 deletions(-)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..7da562dca740 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
 #include "core.h"
 #include "sha.h"
 
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING64
-#define SHA_PADDING_MASK   (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+   u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+   u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+   __be32 byte_count[2];
+   unsigned int pending_buflen;
+   unsigned int flags;
+   u64 count;
+   bool first_blk;
+};
 
 static LIST_HEAD(ahash_algs);
 
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
 
 static int qce_ahash_export(struct ahash_request *req, void *out)
 {
-   struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-   unsigned long flags = rctx->flags;
-   unsigned int digestsize = crypto_ahash_digestsize(ahash);
-   unsigned int blocksize =
-   crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
-   if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
-   struct sha1_state *out_state = out;
-
-   out_state->count = rctx->count;
-   qce_cpu_to_be32p_array((__be32 *)out_state->state,
-  rctx->digest, digestsize);
-   memcpy(out_state->buffer, rctx->buf, blocksize);
-   } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
-   struct sha256_state *out_state = out;
-
-   out_state->count = rctx->count;
-   qce_cpu_to_be32p_array((__be32 *)out_state->state,
-  rctx->digest, digestsize);
-   memcpy(out_state->buf, rctx->buf, blocksize);
-   } else {
-   return -EINVAL;
-   }
-
-   return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
-const u32 *state, const u8 *buffer, bool hmac)
-{
-   struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-   struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-   unsigned int digestsize = crypto_ahash_digestsize(ahash);
-   unsigned int blocksize;
-   u64 count = in_count;
-
-   blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-   rctx->count = in_count;
-   memcpy(rctx->buf, buffer, blocksize);
-
-   if (in_count <= blocksize) {
-   rctx->first_blk = 1;
-   } else {
-   rctx->first_blk = 0;
-   /*
-* For HMAC, there is a hardware padding done when first block
-* is set. Therefore the byte_count must be incremened by 64
-* after the first block operation.
-*/
-   if (hmac)
-   count += SHA_PADDING;
-   }
+   struct qce_sha_saved_state *export_state = out;
 
-   rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
-   rctx->byte_count[1] = (__force __be32)(count >> 32);
-   qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
-  digestsize);
-   rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+   memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+   memcpy(export_state->partial_digest, rctx->digest, 
sizeof(rctx->digest));
+   export_state->byte_count[0] = rctx->byte_count[0];
+   export_state->byte_count[1] = rctx->byte_count[1];
+   export_state->pending_buflen = rctx->buflen;
+   export_state->count = rctx->count;
+   export_state->first_blk = rctx->first_blk;
+   export_state->flags = rctx->flags;
 

[PATCH v6 02/11] crypto: qce: sha: Hold back a block of data to be transferred as part of final

2021-02-07 Thread Thara Gopinath
If the available data to transfer is exactly a multiple of block size, save
the last block to be transferred in qce_ahash_final (with the last block
bit set) if this is indeed the end of data stream. If not this saved block
will be transferred as part of next update. If this block is not held back
and if this is indeed the end of data stream, the digest obtained will be
wrong since qce_ahash_final will see that rctx->buflen is 0 and return
doing nothing which in turn means that a digest will not be copied to the
destination result buffer.  qce_ahash_final cannot be made to alter this
behavior and allowed to proceed if rctx->buflen is 0 because the crypto
engine BAM does not allow for zero length transfers.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/sha.c | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 7da562dca740..2813c9a27a6e 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -216,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
 
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+   /*
+* At this point, there is more than one block size of data.  If
+* the available data to transfer is exactly a multiple of block
+* size, save the last block to be transferred in qce_ahash_final
+* (with the last block bit set) if this is indeed the end of data
+* stream. If not this saved block will be transferred as part of
+* next update. If this block is not held back and if this is
+* indeed the end of data stream, the digest obtained will be wrong
+* since qce_ahash_final will see that rctx->buflen is 0 and return
+* doing nothing which in turn means that a digest will not be
+* copied to the destination result buffer.  qce_ahash_final cannot
+* be made to alter this behavior and allowed to proceed if
+* rctx->buflen is 0 because the crypto engine BAM does not allow
+* for zero length transfers.
+*/
+   if (!hash_later)
+   hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
-- 
2.25.1



[PATCH v6 00/11] Regression fixes/clean ups in the Qualcomm crypto engine driver

2021-02-07 Thread Thara Gopinath
This patch series is a result of running kernel crypto fuzz tests (by
enabling CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) on the transformations
currently supported via the Qualcomm crypto engine on sdm845.  The first
nine patches are fixes for various regressions found during testing. The
last two patches are minor clean ups of unused variable and parameters.

v5->v6:
- Return 0 for zero length messages instead of -EOPNOTSUPP in the
  cipher algorithms as pointed out by Eric Biggers.
- Remove the wrong TODO in patch 6 which implied that AES CBC can
  do partial block sizes when it is actually CTS mode that can as
  pointed out my Eric Biggers.

v4->v5:
- Fixed build warning/error in patch for wrong assignment of const
  pointer as reported by kernel test robot .
- Rebased to 5.11-rc6.
v3->v4:
- Fixed the bug where only two bytes of byte_count were getting
  saved and restored instead of all eight bytes. Thanks Bjorn for
  catching this.
- Split patch 3 "Fix regressions found during fuzz testing" into
  6 patches as requested by Bjorn.
- Dropped crypto from all subject headers.
- Rebased to 5.11-rc5
v2->v3:
- Made the comparison between keys to check if any two keys are
  same for triple des algorithms constant-time as per
  Nym Seddon's suggestion.
- Rebased to 5.11-rc4.
v1->v2:
- Introduced custom struct qce_sha_saved_state to store and restore
  partial sha transformation.
    - Rebased to 5.11-rc3.

Thara Gopinath (11):
  crypto: qce: sha: Restore/save ahash state with custom struct in
export/import
  crypto: qce: sha: Hold back a block of data to be transferred as part
of final
  crypto: qce: skcipher: Return unsupported if key1 and key 2 are same
for AES XTS algorithm
  crypto: qce: skcipher: Return unsupported if any three keys are same
for DES3 algorithms
  crypto: qce: skcipher: Return error for zero length messages
  crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC
algorithms)
  crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)
  crypto: qce: skcipher: Improve the conditions for requesting AES
fallback cipher
  crypto: qce: common: Set data unit size to message length for AES XTS
transformation
  crypto: qce: Remover src_tbl from qce_cipher_reqctx
  crypto: qce: Remove totallen and offset in qce_start

 drivers/crypto/qce/cipher.h   |   1 -
 drivers/crypto/qce/common.c   |  25 +++---
 drivers/crypto/qce/common.h   |   3 +-
 drivers/crypto/qce/sha.c  | 143 +-
 drivers/crypto/qce/skcipher.c |  69 +---
 5 files changed, 126 insertions(+), 115 deletions(-)

-- 
2.25.1



Re: [PATCH v5 05/11] crypto: qce: skcipher: Return error for zero length messages

2021-02-05 Thread Thara Gopinath




On 2/4/21 7:26 PM, Eric Biggers wrote:

On Thu, Feb 04, 2021 at 07:09:53PM -0500, Thara Gopinath wrote:

@@ -260,6 +261,10 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
+   /* CE does not handle 0 length messages */
+   if (!req->cryptlen)
+   return -EOPNOTSUPP;
+


For the algorithms in question, the correct behavior is to return 0.


What do you mean? The driver should return a 0 ?


Ok. I will re-spin the series once more with this change..



Yes, there is nothing to do for empty inputs, so just return 0 (success).


Aren't the tests catching that difference?


I was anyways planning on sending an email to the list with these queries.
But since you asked,  these are my observations with fuzz testing which I
have been doing quite a bit now (I am also working on adding a few qualcomm
AEAD algorithms support in mainline).

- if the generic algorithm supports 0 length messages and the transformation
I am testing does not, the test framework throws an error and stops.
- key support mismatch between the generic algorithm vs my algorithm /engine
also does the same thing.For eg, Qualcomm CE engine does not support any
three keys being same for triple des algorithms. Where as a two key 3des is
a valid scenario for generic algorithm(k1=k3). Another example is hardware
engine not supporting AES192.

How are these scenarios usually handled ? Why not allow the test framework
to proceed with the testing if the algorithm does not support a particular
scenario ?


Omitting support for certain inputs isn't allowed.  Anyone in the kernel who
wants to use a particular algorithm could get this driver for it, and if they
happen to use inputs which the driver decided not to support, things will break.


Ya sounds reasonable.



The way that drivers handle this is to use a fallback cipher for inputs they
don't support.


Ok. So I will add this to my todo and make sure to have fallback ciphers 
for all the non-supported inputs. I will send this as a separate series 
and not this one.


In this case, though not supporting 0 length messages for encryption is 
valid. I don't think I have to have a fallback for this. I could have 
sworn that the test framework throws up an error for this. But I have 
been testing a lot and may be I am just confused. I will double check this.





- Eric



--
Warm Regards
Thara


Re: [PATCH v5 06/11] crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC algorithms)

2021-02-04 Thread Thara Gopinath




On 2/4/21 5:50 PM, Eric Biggers wrote:

On Thu, Feb 04, 2021 at 04:43:54PM -0500, Thara Gopinath wrote:

+   /*
+* ECB and CBC algorithms require message lengths to be
+* multiples of block size.
+* TODO: The spec says AES CBC mode for certain versions
+* of crypto engine can handle partial blocks as well.
+* Test and enable such messages.
+*/
+   if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+   if (!IS_ALIGNED(req->cryptlen, blocksize))
+   return -EINVAL;


CBC by definition only operates on full blocks, so the TODO doesn't make sense.
Is the partial block support really CTS-CBC?


Ya you are right. It should be CTS-CBC and not AES CBC. Though the spec 
is quite fuzzy about this part.


I can remove the comment and spin the next version or just leave it 
there for now and remove it later.




- Eric



--
Warm Regards
Thara


Re: [PATCH v5 05/11] crypto: qce: skcipher: Return error for zero length messages

2021-02-04 Thread Thara Gopinath

Hi Eric,

On 2/4/21 5:48 PM, Eric Biggers wrote:

On Thu, Feb 04, 2021 at 04:43:53PM -0500, Thara Gopinath wrote:

Crypto engine BAM dma does not support 0 length data. Return unsupported
if zero length messages are passed for transformation.

Signed-off-by: Thara Gopinath 
---
  drivers/crypto/qce/skcipher.c | 5 +
  1 file changed, 5 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index de1f37ed4ee6..331b3c3a5b59 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -260,6 +261,10 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
  
+	/* CE does not handle 0 length messages */

+   if (!req->cryptlen)
+   return -EOPNOTSUPP;
+


For the algorithms in question, the correct behavior is to return 0.


What do you mean? The driver should return a 0 ?



Aren't the tests catching that difference?


I was anyways planning on sending an email to the list with these 
queries. But since you asked,  these are my observations with fuzz 
testing which I have been doing quite a bit now (I am also working on 
adding a few qualcomm AEAD algorithms support in mainline).


- if the generic algorithm supports 0 length messages and the 
transformation I am testing does not, the test framework throws an error 
and stops.
- key support mismatch between the generic algorithm vs my algorithm 
/engine also does the same thing.For eg, Qualcomm CE engine does not 
support any three keys being same for triple des algorithms. Where as a 
two key 3des is a valid scenario for generic algorithm(k1=k3). Another 
example is hardware engine not supporting AES192.


How are these scenarios usually handled ? Why not allow the test 
framework to proceed with the testing if the algorithm does not support 
a particular scenario ?




- Eric


--
Warm Regards
Thara


[PATCH v5 04/11] crypto: qce: skcipher: Return unsupported if any three keys are same for DES3 algorithms

2021-02-04 Thread Thara Gopinath
Return unsupported if any three keys are same for DES3 algorithms
since CE does not support this and the operation causes the engine to
hang.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 12955dcd53dd..de1f37ed4ee6 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -221,12 +221,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, 
const u8 *key,
   unsigned int keylen)
 {
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+   u32 _key[6];
int err;
 
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
 
+   /*
+* The crypto engine does not support any two keys
+* being the same for triple des algorithms. The
+* verify_skcipher_des3_key does not check for all the
+* below conditions. Return -ENOKEY in case any two keys
+* are the same. Revisit to see if a fallback cipher
+* is needed to handle this condition.
+*/
+   memcpy(_key, key, DES3_EDE_KEY_SIZE);
+   if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) |
+   !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) |
+   !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+   return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
-- 
2.25.1



Re: [PATCH v4 01/11] crypto: qce: sha: Restore/save ahash state with custom struct in export/import

2021-02-04 Thread Thara Gopinath




On 2/3/21 9:42 PM, Herbert Xu wrote:

On Thu, Feb 04, 2021 at 04:56:17AM +0800, kernel test robot wrote:


Thank you for the patch! Yet something to improve:


Please fix this before you resubmit again.


Hi Herbert.

Sorry about that. I have send the fix.



Thanks,



--
Warm Regards
Thara


[PATCH v5 08/11] crypto: qce: skcipher: Improve the conditions for requesting AES fallback cipher

2021-02-04 Thread Thara Gopinath
The following are the conditions for requesting AES fallback cipher.
-  AES-192
- AES-XTS request with len <= 512 byte (Allow messages of length
  less than 512 bytes for all other AES encryption algorithms other
  than AES XTS)
- AES-XTS request with len > QCE_SECTOR_SIZE and is not a multiple
  of it

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 10e85b1fc0fd..8599250946b7 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -277,14 +277,19 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
if (!IS_ALIGNED(req->cryptlen, blocksize))
return -EINVAL;
 
-   /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
-* is not a multiple of it; pass such requests to the fallback
+   /*
+* Conditions for requesting a fallback cipher
+* AES-192 (not supported by crypto engine (CE))
+* AES-XTS request with len <= 512 byte (not recommended to use CE)
+* AES-XTS request with len > QCE_SECTOR_SIZE and
+* is not a multiple of it.(Revisit this condition to check if it is
+* needed in all versions of CE)
 */
if (IS_AES(rctx->flags) &&
-   (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
-(IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+   ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+   (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+   (req->cryptlen > QCE_SECTOR_SIZE &&
+   req->cryptlen % QCE_SECTOR_SIZE) {
skcipher_request_set_tfm(>fallback_req, ctx->fallback);
skcipher_request_set_callback(>fallback_req,
  req->base.flags,
-- 
2.25.1



[PATCH v5 06/11] crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC algorithms)

2021-02-04 Thread Thara Gopinath
ECB/CBC encryption/decryption requires the data to be blocksize aligned.
Crypto engine hangs on non-block sized operations for these algorithms.
Return invalid data if data size is not blocksize aligned for these
algorithms.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 331b3c3a5b59..28bea9584c33 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -254,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, 
int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+   unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
 
@@ -265,6 +266,17 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
if (!req->cryptlen)
return -EOPNOTSUPP;
 
+   /*
+* ECB and CBC algorithms require message lengths to be
+* multiples of block size.
+* TODO: The spec says AES CBC mode for certain versions
+* of crypto engine can handle partial blocks as well.
+* Test and enable such messages.
+*/
+   if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+   if (!IS_ALIGNED(req->cryptlen, blocksize))
+   return -EINVAL;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v5 11/11] crypto: qce: Remove totallen and offset in qce_start

2021-02-04 Thread Thara Gopinath
totallen is used to get the size of the data to be transformed.
This is also available via nbytes or cryptlen in the qce_sha_reqctx
and qce_cipher_ctx. Similarly offset convey nothing for the supported
encryption and authentication transformations and is always 0.
Remove these two redundant parameters in qce_start.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c   | 17 +++--
 drivers/crypto/qce/common.h   |  3 +--
 drivers/crypto/qce/sha.c  |  2 +-
 drivers/crypto/qce/skcipher.c |  2 +-
 4 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index f7bc701a4aa2..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
 }
 
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
-   u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -306,8 +305,7 @@ static void qce_xtskey(struct qce_device *qce, const u8 
*enckey,
qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
 {
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
 
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
-   qce_write(qce, REG_ENCR_SEG_START, offset & 0x);
+   qce_write(qce, REG_ENCR_SEG_START, 0);
 
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
 
-   qce_write(qce, REG_SEG_SIZE, totallen);
+   qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
 
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct 
crypto_async_request *async_req,
 }
 #endif
 
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
 {
switch (type) {
 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
-   return qce_setup_regs_skcipher(async_req, totallen, offset);
+   return qce_setup_regs_skcipher(async_req);
 #endif
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
-   return qce_setup_regs_ahash(async_req, totallen, offset);
+   return qce_setup_regs_ahash(async_req);
 #endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
 void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
 int qce_check_status(struct qce_device *qce, u32 *status);
 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 
*step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
 
 #endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 2813c9a27a6e..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -113,7 +113,7 @@ static int qce_ahash_async_req_handle(struct 
crypto_async_request *async_req)
 
qce_dma_issue_pending(>dma);
 
-   ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+   ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
 
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 8599250946b7..3fc0b263d498 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -144,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request 
*async_req)
 
qce_dma_issue_pending(>dma);
 
-   ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+   ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
 
-- 
2.25.1



[PATCH v5 10/11] crypto: qce: Remover src_tbl from qce_cipher_reqctx

2021-02-04 Thread Thara Gopinath
src_table is unused and hence remove it from struct qce_cipher_reqctx

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/cipher.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
-   struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req;   // keep at the end
-- 
2.25.1



[PATCH v5 09/11] crypto: qce: common: Set data unit size to message length for AES XTS transformation

2021-02-04 Thread Thara Gopinath
Set the register REG_ENCR_XTS_DU_SIZE to cryptlen for AES XTS
transformation. Anything else causes the engine to return back
wrong results.

Acked-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/common.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..f7bc701a4aa2 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -295,15 +295,15 @@ static void qce_xtskey(struct qce_device *qce, const u8 
*enckey,
 {
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
-   unsigned int xtsdusize;
 
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
   enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
 
-   /* xts du size 512B */
-   xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
-   qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+   /* Set data unit size to cryptlen. Anything else causes
+* crypto engine to return back incorrect results.
+*/
+   qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-- 
2.25.1



[PATCH v5 00/11] Regression fixes/clean ups in the Qualcomm crypto engine driver

2021-02-04 Thread Thara Gopinath
This patch series is a result of running kernel crypto fuzz tests (by
enabling CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) on the transformations
currently supported via the Qualcomm crypto engine on sdm845.  The first
nine patches are fixes for various regressions found during testing. The
last two patches are minor clean ups of unused variable and parameters.

v4->v5:
- Fixed build warning/error in patch for wrong assignment of const
  pointer as reported by kernel test robot .
- Rebased to 5.11-rc6.
v3->v4:
- Fixed the bug where only two bytes of byte_count were getting
  saved and restored instead of all eight bytes. Thanks Bjorn for
  catching this.
- Split patch 3 "Fix regressions found during fuzz testing" into
  6 patches as requested by Bjorn.
- Dropped crypto from all subject headers.
- Rebased to 5.11-rc5
v2->v3:
- Made the comparison between keys to check if any two keys are
  same for triple des algorithms constant-time as per
  Nym Seddon's suggestion.
- Rebased to 5.11-rc4.
v1->v2:
- Introduced custom struct qce_sha_saved_state to store and restore
  partial sha transformation.
- Rebased to 5.11-rc3.

Thara Gopinath (11):
  crypto: qce: sha: Restore/save ahash state with custom struct in
export/import
  crypto: qce: sha: Hold back a block of data to be transferred as part
of final
  crypto: qce: skcipher: Return unsupported if key1 and key 2 are same
for AES XTS algorithm
  crypto: qce: skcipher: Return unsupported if any three keys are same
for DES3 algorithms
  crypto: qce: skcipher: Return error for zero length messages
  crypto: qce: skcipher: Return error for non-blocksize data(ECB/CBC
algorithms)
  crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)
  crypto: qce: skcipher: Improve the conditions for requesting AES
fallback cipher
  crypto: qce: common: Set data unit size to message length for AES XTS
transformation
  crypto: qce: Remover src_tbl from qce_cipher_reqctx
  crypto: qce: Remove totallen and offset in qce_start

 drivers/crypto/qce/cipher.h   |   1 -
 drivers/crypto/qce/common.c   |  25 +++---
 drivers/crypto/qce/common.h   |   3 +-
 drivers/crypto/qce/sha.c  | 143 +-
 drivers/crypto/qce/skcipher.c |  72 ++---
 5 files changed, 129 insertions(+), 115 deletions(-)

-- 
2.25.1



[PATCH v5 07/11] crypto: qce: skcipher: Set ivsize to 0 for ecb(aes)

2021-02-04 Thread Thara Gopinath
ECB transformations do not have an IV and hence set the ivsize to 0 for
ecb(aes).

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 28bea9584c33..10e85b1fc0fd 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -356,7 +356,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name   = "ecb(aes)",
.drv_name   = "ecb-aes-qce",
.blocksize  = AES_BLOCK_SIZE,
-   .ivsize = AES_BLOCK_SIZE,
+   .ivsize = 0,
.min_keysize= AES_MIN_KEY_SIZE,
.max_keysize= AES_MAX_KEY_SIZE,
},
-- 
2.25.1



[PATCH v5 05/11] crypto: qce: skcipher: Return error for zero length messages

2021-02-04 Thread Thara Gopinath
Crypto engine BAM dma does not support 0 length data. Return unsupported
if zero length messages are passed for transformation.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index de1f37ed4ee6..331b3c3a5b59 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -260,6 +261,10 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
 
+   /* CE does not handle 0 length messages */
+   if (!req->cryptlen)
+   return -EOPNOTSUPP;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v5 03/11] crypto: qce: skcipher: Return unsupported if key1 and key 2 are same for AES XTS algorithm

2021-02-04 Thread Thara Gopinath
Crypto engine does not support key1 = key2 for AES XTS algorithm; the
operation hangs the engines.  Return -EINVAL in case key1 and key2 are the
same.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 19 ++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..12955dcd53dd 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -167,16 +167,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher 
*ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+   unsigned int __keylen;
int ret;
 
if (!key || !keylen)
return -EINVAL;
 
-   switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+   /*
+* AES XTS key1 = key2 not supported by crypto engine.
+* Revisit to request a fallback cipher in this case.
+*/
+   if (IS_XTS(flags)) {
+   __keylen = keylen >> 1;
+   if (!memcmp(key, key + __keylen, __keylen))
+   return -ENOKEY;
+   } else {
+   __keylen = keylen;
+   }
+
+   switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+   case AES_KEYSIZE_192:
+   break;
+   default:
+   return -EINVAL;
}
 
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-- 
2.25.1



[PATCH v5 01/11] crypto: qce: sha: Restore/save ahash state with custom struct in export/import

2021-02-04 Thread Thara Gopinath
Export and import interfaces save and restore partial transformation
states. The partial states were being stored and restored in struct
sha1_state for sha1/hmac(sha1) transformations and sha256_state for
sha256/hmac(sha256) transformations.This led to a bunch of corner cases
where improper state was being stored and restored. A few of the corner
cases that turned up during testing are:

- wrong byte_count restored if export/import is called twice without h/w
transaction in between
- wrong buflen restored back if the pending buffer
length is exactly the block size.
- wrong state restored if buffer length is 0.

To fix these issues, save and restore the partial transformation state
using the newly introduced qce_sha_saved_state struct. This ensures that
all the pieces required to properly restart the transformation is captured
and restored back

Signed-off-by: Thara Gopinath 
---

v4->v5:
- Fixed build warning/error in patch for wrong assignment of const
  pointer as reported by kernel test robot .
v1->v2:
- Introduced custom struct qce_sha_saved_state to store and
  restore partial sha transformation. v1 was re-using
  qce_sha_reqctx to save and restore partial states and this
  could lead to potential memcpy issues around pointer copying.


 drivers/crypto/qce/sha.c | 122 +++
 1 file changed, 34 insertions(+), 88 deletions(-)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..7da562dca740 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
 #include "core.h"
 #include "sha.h"
 
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING64
-#define SHA_PADDING_MASK   (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+   u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+   u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+   __be32 byte_count[2];
+   unsigned int pending_buflen;
+   unsigned int flags;
+   u64 count;
+   bool first_blk;
+};
 
 static LIST_HEAD(ahash_algs);
 
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
 
 static int qce_ahash_export(struct ahash_request *req, void *out)
 {
-   struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-   unsigned long flags = rctx->flags;
-   unsigned int digestsize = crypto_ahash_digestsize(ahash);
-   unsigned int blocksize =
-   crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
-   if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
-   struct sha1_state *out_state = out;
-
-   out_state->count = rctx->count;
-   qce_cpu_to_be32p_array((__be32 *)out_state->state,
-  rctx->digest, digestsize);
-   memcpy(out_state->buffer, rctx->buf, blocksize);
-   } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
-   struct sha256_state *out_state = out;
-
-   out_state->count = rctx->count;
-   qce_cpu_to_be32p_array((__be32 *)out_state->state,
-  rctx->digest, digestsize);
-   memcpy(out_state->buf, rctx->buf, blocksize);
-   } else {
-   return -EINVAL;
-   }
-
-   return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
-const u32 *state, const u8 *buffer, bool hmac)
-{
-   struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-   struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-   unsigned int digestsize = crypto_ahash_digestsize(ahash);
-   unsigned int blocksize;
-   u64 count = in_count;
-
-   blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-   rctx->count = in_count;
-   memcpy(rctx->buf, buffer, blocksize);
-
-   if (in_count <= blocksize) {
-   rctx->first_blk = 1;
-   } else {
-   rctx->first_blk = 0;
-   /*
-* For HMAC, there is a hardware padding done when first block
-* is set. Therefore the byte_count must be incremened by 64
-* after the first block operation.
-*/
-   if (hmac)
-   count += SHA_PADDING;
-   }
+   struct qce_sha_saved_state *export_state = out;
 
-   rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
-   rctx->byte_count[1] = (__force __be32)(count >> 32);
-   qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
-  digestsize);
-   rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+   memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+   memcpy(export_st

[PATCH v5 02/11] crypto: qce: sha: Hold back a block of data to be transferred as part of final

2021-02-04 Thread Thara Gopinath
If the available data to transfer is exactly a multiple of block size, save
the last block to be transferred in qce_ahash_final (with the last block
bit set) if this is indeed the end of data stream. If not this saved block
will be transferred as part of next update. If this block is not held back
and if this is indeed the end of data stream, the digest obtained will be
wrong since qce_ahash_final will see that rctx->buflen is 0 and return
doing nothing which in turn means that a digest will not be copied to the
destination result buffer.  qce_ahash_final cannot be made to alter this
behavior and allowed to proceed if rctx->buflen is 0 because the crypto
engine BAM does not allow for zero length transfers.

Reviewed-by: Bjorn Andersson 
Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/sha.c | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 7da562dca740..2813c9a27a6e 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -216,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
 
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+   /*
+* At this point, there is more than one block size of data.  If
+* the available data to transfer is exactly a multiple of block
+* size, save the last block to be transferred in qce_ahash_final
+* (with the last block bit set) if this is indeed the end of data
+* stream. If not this saved block will be transferred as part of
+* next update. If this block is not held back and if this is
+* indeed the end of data stream, the digest obtained will be wrong
+* since qce_ahash_final will see that rctx->buflen is 0 and return
+* doing nothing which in turn means that a digest will not be
+* copied to the destination result buffer.  qce_ahash_final cannot
+* be made to alter this behavior and allowed to proceed if
+* rctx->buflen is 0 because the crypto engine BAM does not allow
+* for zero length transfers.
+*/
+   if (!hash_later)
+   hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
-- 
2.25.1



[PATCH v4 05/11] crypto: qce: skcipher: Return error for zero length messages

2021-02-03 Thread Thara Gopinath
Crypto engine BAM dma does not support 0 length data. Return unsupported
if zero length messages are passed for transformation.

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/skcipher.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index de1f37ed4ee6..331b3c3a5b59 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -260,6 +261,10 @@ static int qce_skcipher_crypt(struct skcipher_request 
*req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
 
+   /* CE does not handle 0 length messages */
+   if (!req->cryptlen)
+   return -EOPNOTSUPP;
+
/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
 * is not a multiple of it; pass such requests to the fallback
 */
-- 
2.25.1



[PATCH v4 10/11] crypto: qce: Remover src_tbl from qce_cipher_reqctx

2021-02-03 Thread Thara Gopinath
src_table is unused and hence remove it from struct qce_cipher_reqctx

Signed-off-by: Thara Gopinath 
---
 drivers/crypto/qce/cipher.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
-   struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req;   // keep at the end
-- 
2.25.1



  1   2   3   4   >