[PATCH] crypto: AF_ALG - limit mask and type

2017-12-11 Thread Stephan Müller
Hi Herbert,

you see the reported problem by simply using

sa.salg_mask = 0x;

Note, I am not fully sure about whether CRYPTO_AF_ALG_ALLOWED_MASK and
CRYPTO_AF_ALG_ALLOWED_TYPE have the correct value. But I think that all
that user space should reach is potentially the ASYNC flag and the
cipher types flags.

---8<---

The user space interface allows specifying the type and the mask field
used to allocate the cipher. Only a subset of the type and mask is
considered relevant to be set by user space if needed at all.

This fixes a bug where user space is able to cause one cipher to be
registered multiple times potentially exhausting kernel memory.

Reported-by: syzbot 
Cc: 
Signed-off-by: Stephan Mueller 
---
 crypto/af_alg.c | 7 +++
 crypto/algif_aead.c | 2 ++
 crypto/algif_hash.c | 2 ++
 crypto/algif_rng.c  | 2 ++
 crypto/algif_skcipher.c | 2 ++
 include/crypto/if_alg.h | 1 +
 include/linux/crypto.h  | 3 +++
 7 files changed, 19 insertions(+)

diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 1e5353f62067..16cfbde64048 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1172,6 +1172,13 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, 
int flags,
 }
 EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
 
+void af_alg_restrict_type_mask(u32 *type, u32 *mask)
+{
+   *type &= CRYPTO_AF_ALG_ALLOWED_TYPE;
+   *mask &= CRYPTO_AF_ALG_ALLOWED_MASK;
+}
+EXPORT_SYMBOL_GPL(af_alg_restrict_type_mask);
+
 static int __init af_alg_init(void)
 {
int err = proto_register(_proto, 0);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 9d73be28cf01..5d21db83bdfd 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -463,6 +463,8 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
if (!tfm)
return ERR_PTR(-ENOMEM);
 
+   af_alg_restrict_type_mask(, );
+
aead = crypto_alloc_aead(name, type, mask);
if (IS_ERR(aead)) {
kfree(tfm);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 76d2e716c792..f7660e80cd05 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -419,6 +419,8 @@ static void *hash_bind(const char *name, u32 type, u32 mask)
if (!tfm)
return ERR_PTR(-ENOMEM);
 
+   af_alg_restrict_type_mask(, );
+
hash = crypto_alloc_ahash(name, type, mask);
if (IS_ERR(hash)) {
kfree(tfm);
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 150c2b6480ed..33a7064996f2 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -116,6 +116,8 @@ static struct proto_ops algif_rng_ops = {
 
 static void *rng_bind(const char *name, u32 type, u32 mask)
 {
+   af_alg_restrict_type_mask(, );
+
return crypto_alloc_rng(name, type, mask);
 }
 
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 9954b078f0b9..0a4987aa9d5c 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -314,6 +314,8 @@ static void *skcipher_bind(const char *name, u32 type, u32 
mask)
if (!tfm)
return ERR_PTR(-ENOMEM);
 
+   af_alg_restrict_type_mask(, );
+
skcipher = crypto_alloc_skcipher(name, type, mask);
if (IS_ERR(skcipher)) {
kfree(tfm);
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 6abf0a3604dc..8ade69d46025 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -250,5 +250,6 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
 int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
struct af_alg_async_req *areq, size_t maxsize,
size_t *outlen);
+void af_alg_restrict_type_mask(u32 *type, u32 *mask);
 
 #endif /* _CRYPTO_IF_ALG_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 78508ca4b108..0d7694673fff 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -70,6 +70,9 @@
 #define CRYPTO_ALG_DYING   0x0040
 #define CRYPTO_ALG_ASYNC   0x0080
 
+#define CRYPTO_AF_ALG_ALLOWED_MASK 0x00ff
+#define CRYPTO_AF_ALG_ALLOWED_TYPE 0x00ff
+
 /*
  * Set this bit if and only if the algorithm requires another algorithm of
  * the same type to handle corner cases.
-- 
2.14.3




[PATCH] crypto: gf128mul - remove incorrect comment

2017-12-11 Thread Eric Biggers
From: Eric Biggers 

The comment in gf128mul_x8_ble() was copy-and-pasted from gf128mul.h and
makes no sense in the new context.  Remove it.

Cc: Harsh Jain 
Signed-off-by: Eric Biggers 
---
 crypto/gf128mul.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 24e601954c7a..a4b1c026aaee 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x)
 {
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
-
-   /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
u64 _tt = gf128mul_table_be[a >> 56];
 
r->a = cpu_to_le64((a << 8) | (b >> 56));
-- 
2.15.1.424.g9478a66081-goog



[PATCH] crypto: chacha20poly1305 - validate the digest size

2017-12-11 Thread Eric Biggers
From: Eric Biggers 

If the rfc7539 template was instantiated with a hash algorithm with
digest size larger than 16 bytes (POLY1305_DIGEST_SIZE), then the digest
overran the 'tag' buffer in 'struct chachapoly_req_ctx', corrupting the
subsequent memory, including 'cryptlen'.  This caused a crash during
crypto_skcipher_decrypt().

Fix it by, when instantiating the template, requiring that the
underlying hash algorithm has the digest size expected for Poly1305.

Reproducer:

#include 
#include 
#include 

int main()
{
int algfd, reqfd;
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "rfc7539(chacha20,sha256)",
};
unsigned char buf[32] = { 0 };

algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *), sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, sizeof(buf));
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 16);
read(reqfd, buf, 16);
}

Reported-by: syzbot 
Fixes: 71ebc4d1b27d ("crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD 
construction, RFC7539")
Cc:  # v4.2+
Signed-off-by: Eric Biggers 
---
 crypto/chacha20poly1305.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index db1bc3147bc4..600afa99941f 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, 
struct rtattr **tb,
algt->mask));
if (IS_ERR(poly))
return PTR_ERR(poly);
+   poly_hash = __crypto_hash_alg_common(poly);
+
+   err = -EINVAL;
+   if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
+   goto out_put_poly;
 
err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, 
struct rtattr **tb,
 
ctx = aead_instance_ctx(inst);
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-   poly_hash = __crypto_hash_alg_common(poly);
err = crypto_init_ahash_spawn(>poly, poly_hash,
  aead_crypto_instance(inst));
if (err)
-- 
2.15.1.424.g9478a66081-goog



Re: general protection fault in blkcipher_walk_done

2017-12-11 Thread Eric Biggers
On Mon, Nov 27, 2017 at 10:56:47AM -0800, syzbot wrote:
> syzbot will keep track of this bug report.
> Once a fix for this bug is committed, please reply to this email with:
> #syz fix: exact-commit-title
> If you want to test a patch for this bug, please reply with:
> #syz test: git://repo/address.git branch
> and provide the patch inline or as an attachment.
> To mark this as a duplicate of another syzbot report, please reply with:
> #syz dup: exact-subject-of-another-report
> If it's a one-off invalid bug report, please reply with:
> #syz invalid
> Note: if the crash happens again, it will cause creation of a new
> bug report.
> Note: all commands must start from beginning of the line in the email body.

#syz fix: crypto: af_alg - wait for data at beginning of recvmsg


Re: general protection fault in crypto_chacha20_crypt

2017-12-11 Thread Eric Biggers
On Wed, Nov 29, 2017 at 01:24:38AM -0800, Eric Biggers wrote:
> 
> The bug is that the skcipher_walk API doesn't set the IV for zero-length 
> inputs,
> while some algorithms (e.g. ChaCha20) access the IV even if the input is
> zero-length.  So it was dereferencing a pointer which came from uninitialized
> stack memory.  I've sent out a fix:
> 
> "crypto: skcipher - set walk.iv for zero-length inputs"
> 

#syz fix: crypto: skcipher - set walk.iv for zero-length inputs


Re: KASAN: use-after-free Write in aead_recvmsg

2017-12-11 Thread Eric Biggers
On Mon, Dec 04, 2017 at 07:57:01AM -0800, syzbot wrote:
> syzbot will keep track of this bug report.
> Once a fix for this bug is committed, please reply to this email with:
> #syz fix: exact-commit-title
> If you want to test a patch for this bug, please reply with:
> #syz test: git://repo/address.git branch
> and provide the patch inline or as an attachment.
> To mark this as a duplicate of another syzbot report, please reply with:
> #syz dup: exact-subject-of-another-report
> If it's a one-off invalid bug report, please reply with:
> #syz invalid
> Note: if the crash happens again, it will cause creation of a new
> bug report.
> Note: all commands must start from beginning of the line in the email body.
> 

#syz fix: crypto: af_alg - fix race accessing cipher request


Re: general protection fault in strcmp

2017-12-11 Thread Eric Biggers
On Thu, Nov 30, 2017 at 12:44:01PM -0800, syzbot wrote:
> syzbot will keep track of this bug report.
> Once a fix for this bug is committed, please reply to this email with:
> #syz fix: exact-commit-title
> To mark this as a duplicate of another syzbot report, please reply with:
> #syz dup: exact-subject-of-another-report
> If it's a one-off invalid bug report, please reply with:
> #syz invalid
> Note: if the crash happens again, it will cause creation of a new
> bug report.
> Note: all commands must start from beginning of the line in the email body.
> 

#syz fix: KEYS: reject NULL restriction string when type is specified


Re: [PATCH v3 3/3] ARM: dts: exynos: Add nodes for True Random Number Generator

2017-12-11 Thread Krzysztof Kozlowski
On Mon, Dec 04, 2017 at 01:53:51PM +0100, Łukasz Stelmach wrote:
> Add nodes for the True Random Number Generator found in Samsung Exynos
> 5250+ SoCs.
> 
> Signed-off-by: Łukasz Stelmach 
> ---
>  arch/arm/boot/dts/exynos5.dtsi| 5 +
>  arch/arm/boot/dts/exynos5250.dtsi | 5 +
>  arch/arm/boot/dts/exynos5410.dtsi | 5 +
>  arch/arm/boot/dts/exynos5420.dtsi | 5 +
>  4 files changed, 20 insertions(+)
>

Unfortunately the same story as with your PRNG patch - does not apply on
top of my next/dt (after taking PRNG). Also did not apply on v4.15-rc1 +
PRNG.

Could you rebase on my next/dt?

Best regards,
Krzysztof



Re: [PATCH] fscrypt: add support for ChaCha20 contents encryption

2017-12-11 Thread David Gstir

> On 08.12.2017, at 03:51, Jason A. Donenfeld  wrote:
> 
> Hi Eric,
> 
> Nice to see more use of ChaCha20. However...
> 
> Can we skip over the "sort of worse than XTS, but not having _real_
> authentication sucks anyway in either case, so whatever" and move
> directly to, "linux finally supports authenticated encryption for disk
> encryption!"? This would be a big deal and would actually be a
> noticeable security improvement, instead of a potentially dubious step
> sidewaysbackish.

Out of curiosity, does anybody know of any specific attacks that authenticated
encryption for disk encryption would solve as opposed to just using encryption
with AES-XTS?

To my knowledge the XTS mode is frowned upon [1], but I don't know of any
serious flaws that would eg. allow an attacker to modify file contents
without a *serious* amount of effort. CBC is another story though [2].

Don't get me wrong, I'd like to have authenticated encryption too.
In fact we are currently working on a concept for adding authentication to
UBIFS (I'll share more details as soon as its in a presentable state).
However, the reason for this is mainly because UBIFS does *not* operate on
the block layer, so dm-integrity/dm-verity is not an option and fscrypt
only protects the confidentiality of file contents and filenames.
This means that the filesystem index is unprotected which makes it rather
easy to move files around - eg. replace /bin/bash with something completely
different without knowing the fscrypt master key or any derived key.

For the general use case though (eg. securing *really important* files on my 
desktop),
I'd use authenticated encryption at a higher layer to get more flexibility
to eg. easily use explicit IVs over implicit ones derived from block/sector
number. But maybe there are some uses cases I didn't think of just now... :)

David

[1] https://sockpuppet.org/blog/2014/04/30/you-dont-want-xts/
[2] 
http://www.jakoblell.com/blog/2013/12/22/practical-malleability-attack-against-cbc-encrypted-luks-partitions/



Re: [PATCH v2 4/4] crypto: exynos - Introduce mutex to prevent concurrent access to hardware

2017-12-11 Thread Krzysztof Kozlowski
On Mon, Dec 11, 2017 at 3:06 PM, Łukasz Stelmach  wrote:
> Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 
> 
>
> Hardware operations like reading random numbers and setting a seed need
> to be conducted in a single thread. Therefore a mutex is required to
> prevent multiple threads (processes) from accessing the hardware at the
> same time.
>
> The sequence of mutex_lock() and mutex_unlock() in the exynos_rng_reseed()
> function enables switching between different threads waiting for the
> driver to generate random numbers for them.
>
> Signed-off-by: Łukasz Stelmach 
> ---
>  drivers/crypto/exynos-rng.c | 21 +
>  1 file changed, 21 insertions(+)
>
> diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
> index c72a838f1932..6209035ca659 100644
> --- a/drivers/crypto/exynos-rng.c
> +++ b/drivers/crypto/exynos-rng.c
> @@ -22,6 +22,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>
> @@ -79,6 +80,7 @@ struct exynos_rng_dev {
> enum exynos_prng_type   type;
> void __iomem*mem;
> struct clk  *clk;
> +   struct mutexlock;
> /* Generated numbers stored for seeding during resume */
> u8  seed_save[EXYNOS_RNG_SEED_SIZE];
> unsigned intseed_save_len;
> @@ -192,6 +194,10 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
> return;
>
> exynos_rng_set_seed(rng, seed, read);
> +
> +   /* Let others do some of their job. */
> +   mutex_unlock(>lock);
> +   mutex_lock(>lock);
>  }
>
>  static int exynos_rng_generate(struct crypto_rng *tfm,
> @@ -207,6 +213,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
> if (ret)
> return ret;
>
> +   mutex_lock(>lock);
> do {
> ret = exynos_rng_get_random(rng, dst, dlen, );
> if (ret)
> @@ -217,6 +224,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
>
> exynos_rng_reseed(rng);
> } while (dlen > 0);
> +   mutex_unlock(>lock);
>
> clk_disable_unprepare(rng->clk);
>
> @@ -234,7 +242,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const 
> u8 *seed,
> if (ret)
> return ret;
>
> +   mutex_lock(>lock);
> ret = exynos_rng_set_seed(ctx->rng, seed, slen);
> +   mutex_unlock(>lock);

I think the number of mutex locks/unlock statements can be reduced
(including the mutex unlock+lock pattern) after moving the mutex to
exynos_rng_set_seed() and exynos_rng_get_random() because actually you
want to protect them. This would remove the new code from suspend and
resume path and gave you the fairness.

On the other hand the mutex would be unlocked+locked many times for
large generate() calls...

Best regards,
Krzysztof

> clk_disable_unprepare(rng->clk);
>
> @@ -284,6 +294,8 @@ static int exynos_rng_probe(struct platform_device *pdev)
> return -ENOTSUPP;
> }
>
> +   mutex_init(>lock);
> +
> rng->dev = >dev;
> rng->clk = devm_clk_get(>dev, "secss");
> if (IS_ERR(rng->clk)) {
> @@ -334,9 +346,14 @@ static int __maybe_unused exynos_rng_suspend(struct 
> device *dev)
> if (ret)
> return ret;
>
> +   mutex_lock(>lock);
> +
> /* Get new random numbers and store them for seeding on resume. */
> exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
>   &(rng->seed_save_len));
> +
> +   mutex_unlock(>lock);
> +
> dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
> rng->seed_save_len);
>
> @@ -359,8 +376,12 @@ static int __maybe_unused exynos_rng_resume(struct 
> device *dev)
> if (ret)
> return ret;
>
> +   mutex_lock(>lock);
> +
> ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
>
> +   mutex_unlock(>lock);
> +
> clk_disable_unprepare(rng->clk);
>
> return ret;
> --
> 2.11.0
>


Re: [PATCH v2 3/4] crypto: exynos - Reseed PRNG after generating 2^16 random bytes

2017-12-11 Thread Krzysztof Kozlowski
On Mon, Dec 11, 2017 at 3:06 PM, Łukasz Stelmach  wrote:
> Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 
> 

Same as in 1/4 and 2/4.

>
> Reseed PRNG after reading 65 kB of randomness. Although this may reduce
> performance, in most cases the loss is not noticeable.

You missed the comment about mentioning the change in time. Both from
me and Stephan.

Best regards,
Krzysztof


Re: [PATCH v2 2/4] crypto: exynos - Improve performance of PRNG

2017-12-11 Thread Krzysztof Kozlowski
On Mon, Dec 11, 2017 at 3:06 PM, Łukasz Stelmach  wrote:
> Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 
> 

This should not appear here.

>
> Use memcpy_fromio() instead of custom exynos_rng_copy_random() function
> to retrieve generated numbers from the registers of PRNG.
>
> Rearrange the loop around cpu_relax(). In a loop with while() at the
> beginning and the cpu_relax() removed the retry variable is decremented
> twice (down to 98).

I had troubles with understanding this sentence... and then I figured
out that you are referring to some case without cpu_relax(). I do not
see how it is relevant to this case. Compare the new code with old,
not with some imaginary case without barriers (thus maybe reordered?).

Your solution is strictly performance oriented so it would be nice to
see here the exact difference in numbers justifying the change. But
only the change for while() -> do-while(), not mixed with
memcpy_fromio.

> This means, the status register is read three
> times before the hardware is ready (which is very quick). Apparently,
> cpu_relax() requires noticeable amount of time to execute, so it seems
> better to call it for the first time before checking the status of
> PRNG. The do {} while () loop decrements the retry variable only once,
> which means, the time required for cpu_relax() is long enough to for
> the PRNG to provide results.

So basically you want to say that you removed one call to exynos_rng_read()?

> On the other hand, performance in this
> arrangement isn't much worse than in a loop without cpu_relax().

I think it is not relevant as cpu_relax() removal is not part of
current nor the new code.

Best regards,
Krzysztof

>
> Signed-off-by: Łukasz Stelmach 
> ---
>  drivers/crypto/exynos-rng.c | 36 +---
>  1 file changed, 5 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
> index 2f554b82f49f..7d8f658480d3 100644
> --- a/drivers/crypto/exynos-rng.c
> +++ b/drivers/crypto/exynos-rng.c
> @@ -131,34 +131,6 @@ static int exynos_rng_set_seed(struct exynos_rng_dev 
> *rng,
>  }
>
>  /*
> - * Read from output registers and put the data under 'dst' array,
> - * up to dlen bytes.
> - *
> - * Returns number of bytes actually stored in 'dst' (dlen
> - * or EXYNOS_RNG_SEED_SIZE).
> - */
> -static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
> -  u8 *dst, unsigned int dlen)
> -{
> -   unsigned int cnt = 0;
> -   int i, j;
> -   u32 val;
> -
> -   for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
> -   val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
> -
> -   for (i = 0; i < 4; i++) {
> -   dst[cnt] = val & 0xff;
> -   val >>= 8;
> -   if (++cnt >= dlen)
> -   return cnt;
> -   }
> -   }
> -
> -   return cnt;
> -}
> -
> -/*
>   * Start the engine and poll for finish.  Then read from output registers
>   * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
>   * random data (EXYNOS_RNG_SEED_SIZE).
> @@ -180,9 +152,10 @@ static int exynos_rng_get_random(struct exynos_rng_dev 
> *rng,
>   EXYNOS_RNG_SEED_CONF);
> }
>
> -   while (!(exynos_rng_readl(rng,
> -   EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && 
> --retry)
> +   do {
> cpu_relax();
> +   } while (!(exynos_rng_readl(rng, EXYNOS_RNG_STATUS) &
> +  EXYNOS_RNG_STATUS_RNG_DONE) && --retry);
>
> if (!retry)
> return -ETIMEDOUT;
> @@ -190,7 +163,8 @@ static int exynos_rng_get_random(struct exynos_rng_dev 
> *rng,
> /* Clear status bit */
> exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
>   EXYNOS_RNG_STATUS);
> -   *read = exynos_rng_copy_random(rng, dst, dlen);
> +   *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
> +   memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
>
> return 0;
>  }
> --
> 2.11.0
>


Re: [PATCH v2 1/4] crypto: exynos - Support Exynos5250+ SoCs

2017-12-11 Thread Krzysztof Kozlowski
On Mon, Dec 11, 2017 at 3:06 PM, Łukasz Stelmach  wrote:
> Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 
> 
>
> Add support for PRNG in Exynos5250+ SoCs.
>
> Signed-off-by: Łukasz Stelmach 
> ---
>  .../bindings/crypto/samsung,exynos-rng4.txt|  4 ++-
>  drivers/crypto/exynos-rng.c| 32 
> --
>  2 files changed, 33 insertions(+), 3 deletions(-)
>
> diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt 
> b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
> index 4ca8dd4d7e66..a13fbdb4bd88 100644
> --- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
> +++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
> @@ -2,7 +2,9 @@ Exynos Pseudo Random Number Generator
>
>  Required properties:
>
> -- compatible  : Should be "samsung,exynos4-rng".
> +- compatible  : One of:
> +- "samsung,exynos4-rng" for Exynos4210 and Exynos4412
> +- "samsung,exynos5250-prng" for Exynos5250+
>  - reg : Specifies base physical address and size of the registers 
> map.
>  - clocks  : Phandle to clock-controller plus clock-specifier pair.
>  - clock-names : "secss" as a clock name.
> diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
> index ed6ba796ad71..2f554b82f49f 100644
> --- a/drivers/crypto/exynos-rng.c
> +++ b/drivers/crypto/exynos-rng.c
> @@ -22,12 +22,17 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>
>  #include 
>
>  #define EXYNOS_RNG_CONTROL 0x0
>  #define EXYNOS_RNG_STATUS  0x10
> +
> +#define EXYNOS_RNG_SEED_CONF   0x14
> +#define EXYNOS_RNG_GEN_PRNGBIT(1)
> +
>  #define EXYNOS_RNG_SEED_BASE   0x140
>  #define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
>  #define EXYNOS_RNG_OUT_BASE0x160
> @@ -43,6 +48,12 @@
>  #define EXYNOS_RNG_SEED_REGS   5
>  #define EXYNOS_RNG_SEED_SIZE   (EXYNOS_RNG_SEED_REGS * 4)
>
> +enum exynos_prng_type {
> +   EXYNOS_PRNG_UNKNOWN = 0,
> +   EXYNOS_PRNG_EXYNOS4,
> +   EXYNOS_PRNG_EXYNOS5,
> +};
> +
>  /*
>   * Driver re-seeds itself with generated random numbers to increase
>   * the randomness.
> @@ -63,6 +74,7 @@ struct exynos_rng_ctx {
>  /* Device associated memory */
>  struct exynos_rng_dev {
> struct device   *dev;
> +   enum exynos_prng_type   type;
> void __iomem*mem;
> struct clk  *clk;
> /* Generated numbers stored for seeding during resume */
> @@ -160,8 +172,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev 
> *rng,
>  {
> int retry = EXYNOS_RNG_WAIT_RETRIES;
>
> -   exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
> - EXYNOS_RNG_CONTROL);
> +   if (rng->type == EXYNOS_PRNG_EXYNOS4) {
> +   exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
> + EXYNOS_RNG_CONTROL);
> +   } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
> +   exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
> + EXYNOS_RNG_SEED_CONF);
> +   }
>
> while (!(exynos_rng_readl(rng,
> EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && 
> --retry)
> @@ -279,6 +296,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
> if (!rng)
> return -ENOMEM;
>
> +   rng->type = (enum 
> exynos_prng_type)of_device_get_match_data(>dev);
> +   if (rng->type != EXYNOS_PRNG_EXYNOS4 &&
> +   rng->type != EXYNOS_PRNG_EXYNOS5) {
> +   dev_err(>dev, "Unsupported PRNG type: %d", rng->type);
> +   return -ENOTSUPP;
> +   }
> +

This cannot happen. Device will be matched only on matching compatible
thus rng->type will be always correct.

Best regards,
Krzysztof


Re: [PATCH] crypto: stm32: fix modular build

2017-12-11 Thread Fabien DESSENNE
Hi Arnd,

This issue was reported a few days ago, and Herbert applied the patch 2 
minutes before you send the mail (see 
[https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg29762.html]).

Anyway, thank you for reporting this.

Fabien


On 11/12/17 12:47, Arnd Bergmann wrote:
> Building the stm32 crypto suport as a loadable module causes a build
> failure from a simple typo:
>
> drivers/crypto/stm32/stm32-cryp.c:1035:25: error: 'sti_dt_ids' undeclared 
> here (not in a function); did you mean 'stm32_dt_ids'?
>
> This renames the reference to point to the correct symbol.
>
> Fixes: 9e054ec21ef8 ("crypto: stm32 - Support for STM32 CRYP crypto module")
> Signed-off-by: Arnd Bergmann 
> ---
>   drivers/crypto/stm32/stm32-cryp.c | 2 +-
>   1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/crypto/stm32/stm32-cryp.c 
> b/drivers/crypto/stm32/stm32-cryp.c
> index 459755940632..cf1dddbeaa2c 100644
> --- a/drivers/crypto/stm32/stm32-cryp.c
> +++ b/drivers/crypto/stm32/stm32-cryp.c
> @@ -1032,7 +1032,7 @@ static const struct of_device_id stm32_dt_ids[] = {
>   { .compatible = "st,stm32f756-cryp", },
>   {},
>   };
> -MODULE_DEVICE_TABLE(of, sti_dt_ids);
> +MODULE_DEVICE_TABLE(of, stm32_dt_ids);
>   
>   static int stm32_cryp_probe(struct platform_device *pdev)
>   {


[PATCH v2 4/4] crypto: exynos - Introduce mutex to prevent concurrent access to hardware

2017-12-11 Thread Łukasz Stelmach
Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 


Hardware operations like reading random numbers and setting a seed need
to be conducted in a single thread. Therefore a mutex is required to
prevent multiple threads (processes) from accessing the hardware at the
same time.

The sequence of mutex_lock() and mutex_unlock() in the exynos_rng_reseed()
function enables switching between different threads waiting for the
driver to generate random numbers for them.

Signed-off-by: Łukasz Stelmach 
---
 drivers/crypto/exynos-rng.c | 21 +
 1 file changed, 21 insertions(+)

diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index c72a838f1932..6209035ca659 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -22,6 +22,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -79,6 +80,7 @@ struct exynos_rng_dev {
enum exynos_prng_type   type;
void __iomem*mem;
struct clk  *clk;
+   struct mutexlock;
/* Generated numbers stored for seeding during resume */
u8  seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned intseed_save_len;
@@ -192,6 +194,10 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
return;
 
exynos_rng_set_seed(rng, seed, read);
+
+   /* Let others do some of their job. */
+   mutex_unlock(>lock);
+   mutex_lock(>lock);
 }
 
 static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -207,6 +213,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
 
+   mutex_lock(>lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, );
if (ret)
@@ -217,6 +224,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
 
exynos_rng_reseed(rng);
} while (dlen > 0);
+   mutex_unlock(>lock);
 
clk_disable_unprepare(rng->clk);
 
@@ -234,7 +242,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 
*seed,
if (ret)
return ret;
 
+   mutex_lock(>lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+   mutex_unlock(>lock);
 
clk_disable_unprepare(rng->clk);
 
@@ -284,6 +294,8 @@ static int exynos_rng_probe(struct platform_device *pdev)
return -ENOTSUPP;
}
 
+   mutex_init(>lock);
+
rng->dev = >dev;
rng->clk = devm_clk_get(>dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -334,9 +346,14 @@ static int __maybe_unused exynos_rng_suspend(struct device 
*dev)
if (ret)
return ret;
 
+   mutex_lock(>lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
  &(rng->seed_save_len));
+
+   mutex_unlock(>lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
 
@@ -359,8 +376,12 @@ static int __maybe_unused exynos_rng_resume(struct device 
*dev)
if (ret)
return ret;
 
+   mutex_lock(>lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
 
+   mutex_unlock(>lock);
+
clk_disable_unprepare(rng->clk);
 
return ret;
-- 
2.11.0



[PATCH v2 3/4] crypto: exynos - Reseed PRNG after generating 2^16 random bytes

2017-12-11 Thread Łukasz Stelmach
Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 


Reseed PRNG after reading 65 kB of randomness. Although this may reduce
performance, in most cases the loss is not noticeable.

Reseeding of a PRNG does not increase entropy, but it helps preventing
backtracking the internal state of the device from its output sequence,
and hence, prevents potential attacker from predicting numbers to be
generated.

Signed-off-by: Łukasz Stelmach 
Reviewed-by: Stephan Mueller 
---
 drivers/crypto/exynos-rng.c | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 7d8f658480d3..c72a838f1932 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -55,12 +55,14 @@ enum exynos_prng_type {
 };
 
 /*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
  *
  * Time for next re-seed in ms.
  */
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES65536
+
 /*
  * In polling mode, do not wait infinitely for the engine to finish the work.
  */
@@ -82,6 +84,8 @@ struct exynos_rng_dev {
unsigned intseed_save_len;
/* Time of last seeding in jiffies */
unsigned long   last_seeding;
+   /* Bytes generated since last seeding */
+   unsigned long   bytes_seeding;
 };
 
 static struct exynos_rng_dev *exynos_rng_dev;
@@ -126,6 +130,7 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
 
rng->last_seeding = jiffies;
+   rng->bytes_seeding = 0;
 
return 0;
 }
@@ -165,6 +170,7 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
  EXYNOS_RNG_STATUS);
*read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+   rng->bytes_seeding += *read;
 
return 0;
 }
@@ -178,7 +184,8 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
 
-   if (time_before(now, next_seeding))
+   if (time_before(now, next_seeding) &&
+   rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
 
if (exynos_rng_get_random(rng, seed, sizeof(seed), ))
-- 
2.11.0



[PATCH v2 0/4] Assorted changes for Exynos PRNG driver

2017-12-11 Thread Łukasz Stelmach
Hello,

This is a series of patches for exynos-rng driver I've decided to
create after adding support for Exynos5250+ chips. They do not
strictly depend on each other, but I think it is better to send them
as a single patch-set.

The driver requires appropriate DT configuration introduced in

https://patchwork.kernel.org/patch/10104445/

Patch #1 Add support for PRNG in Exynos5250+ SoCs

Patch #2 Improve output performance by using memcpy() rather than a
custom function to retrieve random bytes from registers. Rearrange
the loop for polling the hardware.

Patch #3 Reseed the PRNG after reading 2^16 bytes. Simmilar approach
is implemented in DRBG. (Thanks Stephan Mueller)

Patch #4 Introduce locking to prevent simultaneous access to the
hardware from more than one thread/process.

Changes since v1:

- Added Patch #4.
- Extended commit message for patch #3.
- Changed exynos_prng_type enum and a define according to Krzysztof Kozłowski's
  recommendations.
- Brought back cpu_relax() in a rearranged loop in
  exynos_rng_get_random().
- Moved an assignment of the read valuea away from an error path.
- Removed dev_info() reporting hardware presence from
  exynos_rng_probe().

Łukasz Stelmach (4):
  crypto: exynos - Support Exynos5250+ SoCs
  crypto: exynos - Improve performance of PRNG
  crypto: exynos - Reseed PRNG after generating 2^16 random bytes
  crypto: exynos - Introduce mutex to prevent concurrent access to
hardware

 .../bindings/crypto/samsung,exynos-rng4.txt|   4 +-
 drivers/crypto/exynos-rng.c| 104 +
 2 files changed, 70 insertions(+), 38 deletions(-)

-- 
2.11.0



[PATCH v2 2/4] crypto: exynos - Improve performance of PRNG

2017-12-11 Thread Łukasz Stelmach
Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 


Use memcpy_fromio() instead of custom exynos_rng_copy_random() function
to retrieve generated numbers from the registers of PRNG.

Rearrange the loop around cpu_relax(). In a loop with while() at the
beginning and the cpu_relax() removed the retry variable is decremented
twice (down to 98). This means, the status register is read three
times before the hardware is ready (which is very quick). Apparently,
cpu_relax() requires noticeable amount of time to execute, so it seems
better to call it for the first time before checking the status of
PRNG. The do {} while () loop decrements the retry variable only once,
which means, the time required for cpu_relax() is long enough to for
the PRNG to provide results. On the other hand, performance in this
arrangement isn't much worse than in a loop without cpu_relax().

Signed-off-by: Łukasz Stelmach 
---
 drivers/crypto/exynos-rng.c | 36 +---
 1 file changed, 5 insertions(+), 31 deletions(-)

diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 2f554b82f49f..7d8f658480d3 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -131,34 +131,6 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
 }
 
 /*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
-  u8 *dst, unsigned int dlen)
-{
-   unsigned int cnt = 0;
-   int i, j;
-   u32 val;
-
-   for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
-   val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
-   for (i = 0; i < 4; i++) {
-   dst[cnt] = val & 0xff;
-   val >>= 8;
-   if (++cnt >= dlen)
-   return cnt;
-   }
-   }
-
-   return cnt;
-}
-
-/*
  * Start the engine and poll for finish.  Then read from output registers
  * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
  * random data (EXYNOS_RNG_SEED_SIZE).
@@ -180,9 +152,10 @@ static int exynos_rng_get_random(struct exynos_rng_dev 
*rng,
  EXYNOS_RNG_SEED_CONF);
}
 
-   while (!(exynos_rng_readl(rng,
-   EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && 
--retry)
+   do {
cpu_relax();
+   } while (!(exynos_rng_readl(rng, EXYNOS_RNG_STATUS) &
+  EXYNOS_RNG_STATUS_RNG_DONE) && --retry);
 
if (!retry)
return -ETIMEDOUT;
@@ -190,7 +163,8 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
  EXYNOS_RNG_STATUS);
-   *read = exynos_rng_copy_random(rng, dst, dlen);
+   *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+   memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
 
return 0;
 }
-- 
2.11.0



[PATCH v2 1/4] crypto: exynos - Support Exynos5250+ SoCs

2017-12-11 Thread Łukasz Stelmach
Cc: Marek Szyprowski , Bartlomiej Zolnierkiewicz 


Add support for PRNG in Exynos5250+ SoCs.

Signed-off-by: Łukasz Stelmach 
---
 .../bindings/crypto/samsung,exynos-rng4.txt|  4 ++-
 drivers/crypto/exynos-rng.c| 32 --
 2 files changed, 33 insertions(+), 3 deletions(-)

diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt 
b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
index 4ca8dd4d7e66..a13fbdb4bd88 100644
--- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
@@ -2,7 +2,9 @@ Exynos Pseudo Random Number Generator
 
 Required properties:
 
-- compatible  : Should be "samsung,exynos4-rng".
+- compatible  : One of:
+- "samsung,exynos4-rng" for Exynos4210 and Exynos4412
+- "samsung,exynos5250-prng" for Exynos5250+
 - reg : Specifies base physical address and size of the registers map.
 - clocks  : Phandle to clock-controller plus clock-specifier pair.
 - clock-names : "secss" as a clock name.
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index ed6ba796ad71..2f554b82f49f 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -22,12 +22,17 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include 
 
 #define EXYNOS_RNG_CONTROL 0x0
 #define EXYNOS_RNG_STATUS  0x10
+
+#define EXYNOS_RNG_SEED_CONF   0x14
+#define EXYNOS_RNG_GEN_PRNGBIT(1)
+
 #define EXYNOS_RNG_SEED_BASE   0x140
 #define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
 #define EXYNOS_RNG_OUT_BASE0x160
@@ -43,6 +48,12 @@
 #define EXYNOS_RNG_SEED_REGS   5
 #define EXYNOS_RNG_SEED_SIZE   (EXYNOS_RNG_SEED_REGS * 4)
 
+enum exynos_prng_type {
+   EXYNOS_PRNG_UNKNOWN = 0,
+   EXYNOS_PRNG_EXYNOS4,
+   EXYNOS_PRNG_EXYNOS5,
+};
+
 /*
  * Driver re-seeds itself with generated random numbers to increase
  * the randomness.
@@ -63,6 +74,7 @@ struct exynos_rng_ctx {
 /* Device associated memory */
 struct exynos_rng_dev {
struct device   *dev;
+   enum exynos_prng_type   type;
void __iomem*mem;
struct clk  *clk;
/* Generated numbers stored for seeding during resume */
@@ -160,8 +172,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev 
*rng,
 {
int retry = EXYNOS_RNG_WAIT_RETRIES;
 
-   exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+   if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+   exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+   } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+   exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+   }
 
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && 
--retry)
@@ -279,6 +296,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
 
+   rng->type = (enum exynos_prng_type)of_device_get_match_data(>dev);
+   if (rng->type != EXYNOS_PRNG_EXYNOS4 &&
+   rng->type != EXYNOS_PRNG_EXYNOS5) {
+   dev_err(>dev, "Unsupported PRNG type: %d", rng->type);
+   return -ENOTSUPP;
+   }
+
rng->dev = >dev;
rng->clk = devm_clk_get(>dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -367,6 +391,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, 
exynos_rng_suspend,
 static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+   .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+   }, {
+   .compatible = "samsung,exynos5250-prng",
+   .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
 };
-- 
2.11.0



[PATCH] crypto: qat - reduce stack size with KASAN

2017-12-11 Thread Arnd Bergmann
Passing the register value by reference here leads a large amount of stack being
used when CONFIG_KASAN is enabled:

drivers/crypto/qat/qat_common/qat_hal.c: In function 
'qat_hal_exec_micro_inst.constprop':
drivers/crypto/qat/qat_common/qat_hal.c:963:1: error: the frame size of 1792 
bytes is larger than 1536 bytes [-Werror=frame-larger-than=]

Changing the register-read function to return the value instead reduces the 
stack
size to around 800 bytes, most of which is for the 'savuwords' array. The 
function
now no longer returns an error code, but nothing ever evaluated that anyway.

Signed-off-by: Arnd Bergmann 
---
 drivers/crypto/qat/qat_common/qat_hal.c | 133 
 1 file changed, 67 insertions(+), 66 deletions(-)

diff --git a/drivers/crypto/qat/qat_common/qat_hal.c 
b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle 
*handle,
 
 #define CSR_RETRY_TIMES 500
 static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
-unsigned char ae, unsigned int csr,
-unsigned int *value)
+unsigned char ae, unsigned int csr)
 {
unsigned int iterations = CSR_RETRY_TIMES;
+   int value;
 
do {
-   *value = GET_AE_CSR(handle, ae, csr);
+   value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
-   return 0;
+   return value;
} while (iterations--);
 
pr_err("QAT: Read CSR timeout\n");
-   return -EFAULT;
+   return 0;
 }
 
 static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct 
icp_qat_fw_loader_handle *handle,
 {
unsigned int cur_ctx;
 
-   qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, _ctx);
+   cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-   qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+   *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
 }
 
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct 
icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
 
-   qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, _cnt);
+   base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0x;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
-   qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, );
+   csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
 
-   qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, _cnt);
+   cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0x;
elapsed_cycles = cur_cnt - base_cnt;
 
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle 
*handle,
}
 
/* Sets the accelaration engine context mode to either four or eight */
-   qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, );
+   csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle 
*handle,
 {
unsigned int csr, new_csr;
 
-   qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, );
+   csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
 
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle 
*handle,
 {
unsigned int csr, new_csr;
 
-   qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, );
+   csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct 
icp_qat_fw_loader_handle *handle,
 {
unsigned int ctx, cur_ctx;
 
-   qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, _ctx);
+   cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
 
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct 
icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
 }
 
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle 
*handle,
   

Re: [PATCH] crypto: AF_ALG - fix race accessing cipher request

2017-12-11 Thread Herbert Xu
On Fri, Dec 08, 2017 at 11:50:37AM +0100, Stephan Müller wrote:
> Hi Herbert,
> 
> This patch would go on top of 7d2c3f54e6f646887d019faa45f35d6fe9fe82ce
> "crypto: af_alg - remove locking in async callback" found in Linus' tree
> which is not yet in the cryptodev-2.6 tree.
> 
> In addition, this patch is already on top of the other patches discussed
> on this list fixing similar issues. I.e. depending in which order you apply
> the patches, there may be a hunk. In case you want me to rebase the patch,
> please let me know.
> 
> ---8<---
> When invoking an asynchronous cipher operation, the invocation of the
> callback may be performed before the subsequent operations in the
> initial code path are invoked. The callback deletes the cipher request
> data structure which implies that after the invocation of the
> asynchronous cipher operation, this data structure must not be accessed
> any more.
> 
> The setting of the return code size with the request data structure must
> therefore be moved before the invocation of the asynchronous cipher
> operation.
> 
> Fixes: e870456d8e7c ("crypto: algif_skcipher - overhaul memory management")
> Fixes: d887c52d6ae4 ("crypto: algif_aead - overhaul memory management")
> Reported-by: syzbot 
> Cc:  # v4.14+
> Signed-off-by: Stephan Mueller 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH v2] crypto: Add myself as co-maintainer for s5p-sss.c

2017-12-11 Thread Herbert Xu
On Fri, Dec 01, 2017 at 05:51:02PM +0100, Kamil Konieczny wrote:
> Add myself as co-maintainer for Samsung Security SubSystem driver.
> I have added major functionality to the driver [hash acceleration],
> I have access to documentation and to hardware for testing, I can
> also dedicate some of my paid time for reviewing and verifying changes
> to the driver.
> 
> Signed-off-by: Kamil Konieczny 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


[PATCH] crypto: stm32: fix modular build

2017-12-11 Thread Arnd Bergmann
Building the stm32 crypto suport as a loadable module causes a build
failure from a simple typo:

drivers/crypto/stm32/stm32-cryp.c:1035:25: error: 'sti_dt_ids' undeclared here 
(not in a function); did you mean 'stm32_dt_ids'?

This renames the reference to point to the correct symbol.

Fixes: 9e054ec21ef8 ("crypto: stm32 - Support for STM32 CRYP crypto module")
Signed-off-by: Arnd Bergmann 
---
 drivers/crypto/stm32/stm32-cryp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/stm32/stm32-cryp.c 
b/drivers/crypto/stm32/stm32-cryp.c
index 459755940632..cf1dddbeaa2c 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -1032,7 +1032,7 @@ static const struct of_device_id stm32_dt_ids[] = {
{ .compatible = "st,stm32f756-cryp", },
{},
 };
-MODULE_DEVICE_TABLE(of, sti_dt_ids);
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
 
 static int stm32_cryp_probe(struct platform_device *pdev)
 {
-- 
2.9.0



Re: [PATCH] crypto: stm32: fix module device table name

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 12:04:33PM +0100, Corentin Labbe wrote:
> This patch fix the following build failure:
>   CC [M]  drivers/crypto/stm32/stm32-cryp.o
> In file included from drivers/crypto/stm32/stm32-cryp.c:11:0:
> drivers/crypto/stm32/stm32-cryp.c:1049:25: error: 'sti_dt_ids' undeclared 
> here (not in a function)
>  MODULE_DEVICE_TABLE(of, sti_dt_ids);
> 
> Let's replace sti_dt_ids with stm32_dt_ids which is just declared
> before.
> 
> Signed-off-by: Corentin Labbe 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] chcr: remove unused variables net_device, pi, adap and cntrl

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 02:11:44PM +, Colin King wrote:
> From: Colin Ian King 
> 
> Variables adap, pi and cntrl are assigned but are never read, hence
> they are redundant and can be removed.
> 
> Cleans up various clang build warnings.
> 
> Signed-off-by: Colin Ian King 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] chcr: ensure cntrl is initialized to fix bit-wise or'ing of garabage data

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 02:23:54PM +, Colin King wrote:
> From: Colin Ian King 
> 
> In the case where skb->ip_summed != CHECKSUM_PARTIAL then cntrl contains
> garbage value and this is possibly being bit-wise or'd and stored into
> cpl->ctrl1.  Fix this by initializing cntrl to zero.
> 
> Cleans up clang warning:
> drivers/crypto/chelsio/chcr_ipsec.c:374:9: warning: The left expression
> of the compound assignment is an uninitialized value. The computed value
> will also be garbage
> 
> Fixes: 6dad4e8ab3ec ("chcr: Add support for Inline IPSec")
> Signed-off-by: Colin Ian King 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: cryptd: make cryptd_max_cpu_qlen module parameter static

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 11:26:14AM +, Colin King wrote:
> From: Colin Ian King 
> 
> The cryptd_max_cpu_qlen module parameter is local to the source and does
> not need to be in global scope, so make it static.
> 
> Cleans up sparse warning:
> crypto/cryptd.c:35:14: warning: symbol 'cryptd_max_cpu_qlen' was not
> declared. Should it be static?
> 
> Signed-off-by: Colin Ian King 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: chelsio: make arrays sgl_ent_len and dsgl_ent_len static

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 11:32:08AM +, Colin King wrote:
> From: Colin Ian King 
> 
> The arrays sgl_ent_len and dsgl_ent_len are local to the source and do
> not need to be in global scope, so make them static. Also re-format the
> declarations to match the following round_constant array declaration
> style.
> 
> Cleans up sparse warnings:
> drivers/crypto/chelsio/chcr_algo.c:76:14: warning: symbol 'sgl_ent_len'
> was not declared. Should it be static?
> drivers/crypto/chelsio/chcr_algo.c:81:14: warning: symbol 'dsgl_ent_len'
> was not declared. Should it be static?
> 
> Signed-off-by: Colin Ian King 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: mcryptd: protect the per-CPU queue with a lock

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 01:39:27PM +0100, Sebastian Andrzej Siewior wrote:
> mcryptd_enqueue_request() grabs the per-CPU queue struct and protects
> access to it with disabled preemption. Then it schedules a worker on the
> same CPU. The worker in mcryptd_queue_worker() guards access to the same
> per-CPU variable with disabled preemption.
> 
> If we take CPU-hotplug into account then it is possible that between
> queue_work_on() and the actual invocation of the worker the CPU goes
> down and the worker will be scheduled on _another_ CPU. And here the
> preempt_disable() protection does not work anymore. The easiest thing is
> to add a spin_lock() to guard access to the list.
> 
> Another detail: mcryptd_queue_worker() is not processing more than
> MCRYPTD_BATCH invocation in a row. If there are still items left, then
> it will invoke queue_work() to proceed with more later. *I* would
> suggest to simply drop that check because it does not use a system
> workqueue and the workqueue is already marked as "CPU_INTENSIVE". And if
> preemption is required then the scheduler should do it.
> However if queue_work() is used then the work item is marked as CPU
> unbound. That means it will try to run on the local CPU but it may run
> on another CPU as well. Especially with CONFIG_DEBUG_WQ_FORCE_RR_CPU=y.
> Again, the preempt_disable() won't work here but lock which was
> introduced will help.
> In order to keep work-item on the local CPU (and avoid RR) I changed it
> to queue_work_on().
> 
> Cc: sta...@vger.kernel.org
> Signed-off-by: Sebastian Andrzej Siewior 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH v2] crypto: AF_ALG - wait for data at beginning of recvmsg

2017-12-11 Thread Herbert Xu
On Wed, Nov 29, 2017 at 12:02:23PM +0100, Stephan Müller wrote:
> The wait for data is a non-atomic operation that can sleep and therefore
> potentially release the socket lock. The release of the socket lock
> allows another thread to modify the context data structure. The waiting
> operation for new data therefore must be called at the beginning of
> recvmsg. This prevents a race condition where checks of the members of
> the context data structure are performed by recvmsg while there is a
> potential for modification of these values.
> 
> Fixes: e870456d8e7c ("crypto: algif_skcipher - overhaul memory management")
> Fixes: d887c52d6ae4 ("crypto: algif_aead - overhaul memory management")
> Reported-by: syzbot 
> Cc:  # v4.14+
> Signed-off-by: Stephan Mueller 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: skcipher - set walk.iv for zero-length inputs

2017-12-11 Thread Herbert Xu
On Wed, Nov 29, 2017 at 01:18:57AM -0800, Eric Biggers wrote:
> From: Eric Biggers 
> 
> All the ChaCha20 algorithms as well as the ARM bit-sliced AES-XTS
> algorithms call skcipher_walk_virt(), then access the IV (walk.iv)
> before checking whether any bytes need to be processed (walk.nbytes).
> 
> But if the input is empty, then skcipher_walk_virt() doesn't set the IV,
> and the algorithms crash trying to use the uninitialized IV pointer.
> 
> Fix it by setting the IV earlier in skcipher_walk_virt().  Also fix it
> for the AEAD walk functions.
> 
> This isn't a perfect solution because we can't actually align the IV to
> ->cra_alignmask unless there are bytes to process, for one because the
> temporary buffer for the aligned IV is freed by skcipher_walk_done(),
> which is only called when there are bytes to process.  Thus, algorithms
> that require aligned IVs will still need to avoid accessing the IV when
> walk.nbytes == 0.  Still, many algorithms/architectures are fine with
> IVs having any alignment, and even for those that aren't, a misaligned
> pointer bug is much less severe than an uninitialized pointer bug.
> 
> This change also matches the behavior of the older blkcipher_walk API.
> 
> Fixes: 0cabf2af6f5a ("crypto: skcipher - Fix crash on zero-length input")
> Reported-by: syzbot 
> Cc:  # v4.14+
> Signed-off-by: Eric Biggers 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: caam/qi - use correct print specifier for size_t

2017-12-11 Thread Herbert Xu
On Tue, Nov 28, 2017 at 06:48:08PM +0200, Horia Geantă wrote:
> Fix below warnings on ARMv7 by using %zu for printing size_t values:
> 
> drivers/crypto/caam/caamalg_qi.c: In function aead_edesc_alloc:
> drivers/crypto/caam/caamalg_qi.c:417:17: warning: format %lu expects argument 
> of type long unsigned int, but argument 4 has type unsigned int [-Wformat=]
>sizeof(struct qm_sg_entry))
>  ^
> drivers/crypto/caam/caamalg_qi.c:672:16: note: in expansion of macro 
> CAAM_QI_MAX_AEAD_SG
> qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
> ^
> drivers/crypto/caam/caamalg_qi.c: In function ablkcipher_edesc_alloc:
> drivers/crypto/caam/caamalg_qi.c:440:17: warning: format %lu expects argument 
> of type long unsigned int, but argument 4 has type unsigned int [-Wformat=]
>sizeof(struct qm_sg_entry))
>  ^
> drivers/crypto/caam/caamalg_qi.c:909:16: note: in expansion of macro 
> CAAM_QI_MAX_ABLKCIPHER_SG
> qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
> ^
> drivers/crypto/caam/caamalg_qi.c: In function ablkcipher_giv_edesc_alloc:
> drivers/crypto/caam/caamalg_qi.c:440:17: warning: format %lu expects argument 
> of type long unsigned int, but argument 4 has type unsigned int [-Wformat=]
>sizeof(struct qm_sg_entry))
>  ^
> drivers/crypto/caam/caamalg_qi.c:1062:16: note: in expansion of macro 
> CAAM_QI_MAX_ABLKCIPHER_SG
> qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
> ^
> 
> Fixes: eb9ba37dc15a ("crypto: caam/qi - handle large number of S/Gs case")
> Signed-off-by: Horia Geantă 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: arm/aes-neonbs Use PTR_ERR_OR_ZERO()

2017-12-11 Thread Herbert Xu
On Tue, Nov 28, 2017 at 12:06:06AM +0100, Vasyl Gomonovych wrote:
> Fix ptr_ret.cocci warnings:
> arch/arm/crypto/aes-neonbs-glue.c:184:1-3: WARNING: PTR_ERR_OR_ZERO can be 
> used
> arch/arm/crypto/aes-neonbs-glue.c:261:1-3: WARNING: PTR_ERR_OR_ZERO can be 
> used
> 
> Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR
> 
> Generated by: scripts/coccinelle/api/ptr_ret.cocci
> 
> Signed-off-by: Vasyl Gomonovych 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: fix typo in KPP dependency of CRYPTO_ECDH

2017-12-11 Thread Herbert Xu
On Sun, Nov 26, 2017 at 12:16:46AM +0100, Hauke Mehrtens wrote:
> This fixes a typo in the CRYPTO_KPP dependency of CRYPTO_ECDH.
> 
> Fixes: 3c4b23901a0c ("crypto: ecdh - Add ECDH software support")
> Cc:  # v4.8+
> Signed-off-by: Hauke Mehrtens 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH 6/6] crypto: tcrypt: add multibuf aead speed test

2017-12-11 Thread Herbert Xu
On Thu, Nov 30, 2017 at 10:09:32AM +, Gilad Ben-Yossef wrote:
> The performance of some aead tfm providers is affected by
> the amount of parallelism possible with the processing.
> 
> Introduce an async aead concurrent multiple buffer
> processing speed test to be able to test performance of such
> tfm providers.
> 
> Signed-off-by: Gilad Ben-Yossef 

Sorry, this no longer applies to the current cryptodev tree.

Please respin.

Thanks,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


[PATCH v4 2/4] crypto: inside-secure - free requests even if their handling failed

2017-12-11 Thread Antoine Tenart
This patch frees the request private data even if its handling failed,
as it would never be freed otherwise.

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Suggested-by: Ofer Heifetz 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..4bcef78a08aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct 
safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
   _complete, );
if (ndesc < 0) {
+   kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", 
ndesc);
return;
}
-- 
2.14.3



[PATCH v4 1/4] crypto: inside-secure - per request invalidation

2017-12-11 Thread Antoine Tenart
From: Ofer Heifetz 

When an invalidation request is needed we currently override the context
.send and .handle_result helpers. This is wrong as under high load other
requests can already be queued and overriding the context helpers will
make them execute the wrong .send and .handle_result functions.

This commit fixes this by adding a needs_inv flag in the request to
choose the action to perform when sending requests or handling their
results. This flag will be set when needed (i.e. when the context flag
will be set).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Ofer Heifetz 
[Antoine: commit message, and removed non related changes from the
original commit]
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 71 +-
 drivers/crypto/inside-secure/safexcel_hash.c   | 67 +++-
 2 files changed, 111 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c 
b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..9ea24868d860 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
 
 #include 
 #include 
+#include 
 
 #include "safexcel.h"
 
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
 };
 
+struct safexcel_cipher_req {
+   bool needs_inv;
+};
+
 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  struct crypto_async_request *async,
  struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct 
safexcel_cipher_ctx *ctx,
return 0;
 }
 
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int 
ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
 {
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request 
*async,
spin_unlock_bh(>ring[ring].egress_lock);
 
request->req = >base;
-   ctx->base.handle_result = safexcel_handle_result;
 
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct 
safexcel_crypto_priv *priv,
 
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
-   ctx->base.needs_inv = false;
-   ctx->base.send = safexcel_aes_send;
 
spin_lock_bh(>ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(>ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct 
safexcel_crypto_priv *priv,
return ndesc;
 }
 
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+   int err;
+
+   if (sreq->needs_inv) {
+   sreq->needs_inv = false;
+   err = safexcel_handle_inv_result(priv, ring, async,
+should_complete, ret);
+   } else {
+   err = safexcel_handle_req_result(priv, ring, async,
+should_complete, ret);
+   }
+
+   return err;
+}
+
 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct 
crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
 
-   ctx->base.handle_result = safexcel_handle_inv_result;
-
ret = safexcel_invalidate_cache(async, >base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -381,11 +401,29 @@ static int safexcel_cipher_send_inv(struct 
crypto_async_request *async,
return 0;
 }
 
+static int safexcel_send(struct crypto_async_request *async,
+int ring, struct safexcel_request *request,
+int *commands, int *results)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq 

[PATCH v4 3/4] crypto: inside-secure - fix request allocations in invalidation path

2017-12-11 Thread Antoine Tenart
This patch makes use of the SKCIPHER_REQUEST_ON_STACK and
AHASH_REQUEST_ON_STACK helpers to allocate enough memory to contain both
the crypto request structures and their embedded context (__ctx).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Suggested-by: Ofer Heifetz 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 16 
 drivers/crypto/inside-secure/safexcel_hash.c   | 14 +++---
 2 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c 
b/drivers/crypto/inside-secure/safexcel_cipher.c
index 9ea24868d860..fcc0a606d748 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -422,25 +422,25 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm 
*tfm)
 {
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
-   struct skcipher_request req;
-   struct safexcel_cipher_req *sreq = skcipher_request_ctx();
+   SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
 
-   memset(, 0, sizeof(struct skcipher_request));
+   memset(req, 0, sizeof(struct skcipher_request));
 
/* create invalidation request */
init_completion();
-   skcipher_request_set_callback(, CRYPTO_TFM_REQ_MAY_BACKLOG,
-   safexcel_inv_complete, );
+   skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, );
 
-   skcipher_request_set_tfm(, __crypto_skcipher_cast(tfm));
-   ctx = crypto_tfm_ctx(req.base.tfm);
+   skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+   ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
sreq->needs_inv = true;
 
spin_lock_bh(>ring[ring].queue_lock);
-   crypto_enqueue_request(>ring[ring].queue, );
+   crypto_enqueue_request(>ring[ring].queue, >base);
spin_unlock_bh(>ring[ring].queue_lock);
 
if (!priv->ring[ring].need_dequeue)
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c 
b/drivers/crypto/inside-secure/safexcel_hash.c
index 79fe149804d3..55ff8a340b11 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -450,25 +450,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
 {
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
-   struct ahash_request req;
-   struct safexcel_ahash_req *rctx = ahash_request_ctx();
+   AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+   struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
 
-   memset(, 0, sizeof(struct ahash_request));
+   memset(req, 0, sizeof(struct ahash_request));
 
/* create invalidation request */
init_completion();
-   ahash_request_set_callback(, CRYPTO_TFM_REQ_MAY_BACKLOG,
+   ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   safexcel_inv_complete, );
 
-   ahash_request_set_tfm(, __crypto_ahash_cast(tfm));
-   ctx = crypto_tfm_ctx(req.base.tfm);
+   ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+   ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
rctx->needs_inv = true;
 
spin_lock_bh(>ring[ring].queue_lock);
-   crypto_enqueue_request(>ring[ring].queue, );
+   crypto_enqueue_request(>ring[ring].queue, >base);
spin_unlock_bh(>ring[ring].queue_lock);
 
if (!priv->ring[ring].need_dequeue)
-- 
2.14.3



[PATCH v4 0/4] crypto: inside-secure - set of fixes

2017-12-11 Thread Antoine Tenart
Hi Herbert,

This series is a set of 4 fixes on the Inside Secure SafeXcel crypto
engine driver. The series will be followed by another non-fix one.

This is based on v4.15-rc3.

Thanks,
Antoine

Since v3:
  - Added one patch to only update areq->result on final operations.
  - Fixed two coding style issue in the patches.

Since v2:
  - Removed the patch only update the result buffer when provided.

Since v1:
  - Removed the crash.txt file which was part of patch 1/4.

Antoine Tenart (3):
  crypto: inside-secure - free requests even if their handling failed
  crypto: inside-secure - fix request allocations in invalidation path
  crypto: inside-secure - do not use areq->result for partial results

Ofer Heifetz (1):
  crypto: inside-secure - per request invalidation

 drivers/crypto/inside-secure/safexcel.c|  1 +
 drivers/crypto/inside-secure/safexcel_cipher.c | 85 ++--
 drivers/crypto/inside-secure/safexcel_hash.c   | 89 ++
 3 files changed, 130 insertions(+), 45 deletions(-)

-- 
2.14.3



[PATCH v4 4/4] crypto: inside-secure - do not use areq->result for partial results

2017-12-11 Thread Antoine Tenart
This patches update the SafeXcel driver to stop using the crypto
ahash_request result field for partial results (i.e. on updates).
Instead the driver local safexcel_ahash_req state field is used, and
only on final operations the ahash_request result buffer is updated.

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_hash.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_hash.c 
b/drivers/crypto/inside-secure/safexcel_hash.c
index 55ff8a340b11..0c5a5820b06e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -35,7 +35,7 @@ struct safexcel_ahash_req {
bool needs_inv;
 
u8 state_sz;/* expected sate size, only set once */
-   u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+   u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
u64 len;
u64 processed;
@@ -128,7 +128,7 @@ static int safexcel_handle_req_result(struct 
safexcel_crypto_priv *priv, int rin
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
-   int cache_len, result_sz = sreq->state_sz;
+   int cache_len;
 
*ret = 0;
 
@@ -149,8 +149,8 @@ static int safexcel_handle_req_result(struct 
safexcel_crypto_priv *priv, int rin
spin_unlock_bh(>ring[ring].egress_lock);
 
if (sreq->finish)
-   result_sz = crypto_ahash_digestsize(ahash);
-   memcpy(sreq->state, areq->result, result_sz);
+   memcpy(areq->result, sreq->state,
+  crypto_ahash_digestsize(ahash));
 
dma_unmap_sg(priv->dev, areq->src,
 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -274,7 +274,7 @@ static int safexcel_ahash_send_req(struct 
crypto_async_request *async, int ring,
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
 
-   ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+   ctx->base.result_dma = dma_map_single(priv->dev, req->state,
  req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
-- 
2.14.3



Re: [PATCH 3/4] crypto: inside-secure - only update the result buffer when provided

2017-12-11 Thread Herbert Xu
On Mon, Dec 11, 2017 at 08:49:57AM +0100, Antoine Tenart wrote:
>
> So this patch is indeed fixing an issue, which should probably not be
> there in the first place. I guess you recommend using a buffer local to
> the driver instead, and only update areq->request on completion (final).

That's one way to do it.  Ideally you'd only use the local buffer
for the non-final case so that the final case just does the DMA to
the final destination.

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH 00/18] crypto: talitos - fixes and performance improvement

2017-12-11 Thread Herbert Xu
On Fri, Dec 08, 2017 at 03:20:40PM +, Horia Geantă wrote:
> On 10/12/2017 6:20 PM, Herbert Xu wrote:
> > On Fri, Oct 06, 2017 at 03:04:31PM +0200, Christophe Leroy wrote:
> >> This serie fixes and improves the talitos crypto driver.
> >>
> >> First 6 patchs are fixes of failures reported by the new tests in the
> >> kernel crypto test manager.
> >>
> Looks like these fixes are required also on older 4.9+ -stable kernels.
> (I haven't seen them on latest 4.9.68-stable mail from Greg, even though
> they are in main tree.)
> 
> In case you agree, what would be the recommended way to add the patches
> to -stable?

I'll forward them to stable.  Thanks!
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH v3 0/3] crypto: inside-secure - set of fixes

2017-12-11 Thread Antoine Tenart
Hi,

I'll send a v4 soon as there are two coding style issues in the series.
I'll also include a new patch to only update the areq->result buffer on
final operations (this will fix the issue I was trying to fix with "only
update the result buffer when provided").

Sorry for the noise.

Antoine

On Mon, Dec 11, 2017 at 09:48:05AM +0100, Antoine Tenart wrote:
> Hi Herbert,
> 
> This series is a set of 4 fixes on the Inside Secure SafeXcel crypto
> engine driver. The series will be followed by another non-fix one.
> 
> This is based on v4.15-rc3.
> 
> Thanks,
> Antoine
> 
> Since v2:
>   - Removed the patch only update the result buffer when provided.
> 
> Since v1:
>   - Removed the crash.txt file which was part of patch 1/4.
> 
> Antoine Tenart (2):
>   crypto: inside-secure - free requests even if their handling failed
>   crypto: inside-secure - fix request allocations in invalidation path
> 
> Ofer Heifetz (1):
>   crypto: inside-secure - per request invalidation
> 
>  drivers/crypto/inside-secure/safexcel.c|  1 +
>  drivers/crypto/inside-secure/safexcel_cipher.c | 83 
> --
>  drivers/crypto/inside-secure/safexcel_hash.c   | 80 ++---
>  3 files changed, 125 insertions(+), 39 deletions(-)
> 
> -- 
> 2.14.3
> 

-- 
Antoine Ténart, Free Electrons
Embedded Linux and Kernel engineering
http://free-electrons.com


[PATCH v3 1/3] crypto: inside-secure - per request invalidation

2017-12-11 Thread Antoine Tenart
From: Ofer Heifetz 

When an invalidation request is needed we currently override the context
.send and .handle_result helpers. This is wrong as under high load other
requests can already be queued and overriding the context helpers will
make them execute the wrong .send and .handle_result functions.

This commit fixes this by adding a needs_inv flag in the request to
choose the action to perform when sending requests or handling their
results. This flag will be set when needed (i.e. when the context flag
will be set).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Signed-off-by: Ofer Heifetz 
[Antoine: commit message, and removed non related changes from the
original commit]
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 71 +-
 drivers/crypto/inside-secure/safexcel_hash.c   | 68 +++-
 2 files changed, 112 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c 
b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..9ea24868d860 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
 
 #include 
 #include 
+#include 
 
 #include "safexcel.h"
 
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
 };
 
+struct safexcel_cipher_req {
+   bool needs_inv;
+};
+
 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  struct crypto_async_request *async,
  struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct 
safexcel_cipher_ctx *ctx,
return 0;
 }
 
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int 
ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
 {
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request 
*async,
spin_unlock_bh(>ring[ring].egress_lock);
 
request->req = >base;
-   ctx->base.handle_result = safexcel_handle_result;
 
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct 
safexcel_crypto_priv *priv,
 
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
-   ctx->base.needs_inv = false;
-   ctx->base.send = safexcel_aes_send;
 
spin_lock_bh(>ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(>ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct 
safexcel_crypto_priv *priv,
return ndesc;
 }
 
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+   int err;
+
+   if (sreq->needs_inv) {
+   sreq->needs_inv = false;
+   err = safexcel_handle_inv_result(priv, ring, async,
+should_complete, ret);
+   } else {
+   err = safexcel_handle_req_result(priv, ring, async,
+should_complete, ret);
+   }
+
+   return err;
+}
+
 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct 
crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
 
-   ctx->base.handle_result = safexcel_handle_inv_result;
-
ret = safexcel_invalidate_cache(async, >base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -381,11 +401,29 @@ static int safexcel_cipher_send_inv(struct 
crypto_async_request *async,
return 0;
 }
 
+static int safexcel_send(struct crypto_async_request *async,
+int ring, struct safexcel_request *request,
+int *commands, int *results)
+{
+   struct skcipher_request *req = skcipher_request_cast(async);
+   struct safexcel_cipher_req *sreq 

[PATCH v3 0/3] crypto: inside-secure - set of fixes

2017-12-11 Thread Antoine Tenart
Hi Herbert,

This series is a set of 4 fixes on the Inside Secure SafeXcel crypto
engine driver. The series will be followed by another non-fix one.

This is based on v4.15-rc3.

Thanks,
Antoine

Since v2:
  - Removed the patch only update the result buffer when provided.

Since v1:
  - Removed the crash.txt file which was part of patch 1/4.

Antoine Tenart (2):
  crypto: inside-secure - free requests even if their handling failed
  crypto: inside-secure - fix request allocations in invalidation path

Ofer Heifetz (1):
  crypto: inside-secure - per request invalidation

 drivers/crypto/inside-secure/safexcel.c|  1 +
 drivers/crypto/inside-secure/safexcel_cipher.c | 83 --
 drivers/crypto/inside-secure/safexcel_hash.c   | 80 ++---
 3 files changed, 125 insertions(+), 39 deletions(-)

-- 
2.14.3



[PATCH v3 2/3] crypto: inside-secure - free requests even if their handling failed

2017-12-11 Thread Antoine Tenart
This patch frees the request private data even if its handling failed,
as it would never be freed otherwise.

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Suggested-by: Ofer Heifetz 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..4bcef78a08aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct 
safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
   _complete, );
if (ndesc < 0) {
+   kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", 
ndesc);
return;
}
-- 
2.14.3



[PATCH v3 3/3] crypto: inside-secure - fix request allocations in invalidation path

2017-12-11 Thread Antoine Tenart
This patch makes use of the SKCIPHER_REQUEST_ON_STACK and
AHASH_REQUEST_ON_STACK helpers to allocate enough memory to contain both
the crypto request structures and their embedded context (__ctx).

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine 
driver")
Suggested-by: Ofer Heifetz 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 14 +++---
 drivers/crypto/inside-secure/safexcel_hash.c   | 14 +++---
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c 
b/drivers/crypto/inside-secure/safexcel_cipher.c
index 9ea24868d860..ef44f5a5a90f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -422,25 +422,25 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm 
*tfm)
 {
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
-   struct skcipher_request req;
-   struct safexcel_cipher_req *sreq = skcipher_request_ctx();
+   SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+   struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
 
-   memset(, 0, sizeof(struct skcipher_request));
+   memset(req, 0, sizeof(struct skcipher_request));
 
/* create invalidation request */
init_completion();
-   skcipher_request_set_callback(, CRYPTO_TFM_REQ_MAY_BACKLOG,
+   skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, );
 
-   skcipher_request_set_tfm(, __crypto_skcipher_cast(tfm));
-   ctx = crypto_tfm_ctx(req.base.tfm);
+   skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+   ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
sreq->needs_inv = true;
 
spin_lock_bh(>ring[ring].queue_lock);
-   crypto_enqueue_request(>ring[ring].queue, );
+   crypto_enqueue_request(>ring[ring].queue, >base);
spin_unlock_bh(>ring[ring].queue_lock);
 
if (!priv->ring[ring].need_dequeue)
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c 
b/drivers/crypto/inside-secure/safexcel_hash.c
index 6135c9f5742c..d2e024732988 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -451,25 +451,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
 {
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
-   struct ahash_request req;
-   struct safexcel_ahash_req *rctx = ahash_request_ctx();
+   AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+   struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
 
-   memset(, 0, sizeof(struct ahash_request));
+   memset(req, 0, sizeof(struct ahash_request));
 
/* create invalidation request */
init_completion();
-   ahash_request_set_callback(, CRYPTO_TFM_REQ_MAY_BACKLOG,
+   ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   safexcel_inv_complete, );
 
-   ahash_request_set_tfm(, __crypto_ahash_cast(tfm));
-   ctx = crypto_tfm_ctx(req.base.tfm);
+   ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+   ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
rctx->needs_inv = true;
 
spin_lock_bh(>ring[ring].queue_lock);
-   crypto_enqueue_request(>ring[ring].queue, );
+   crypto_enqueue_request(>ring[ring].queue, >base);
spin_unlock_bh(>ring[ring].queue_lock);
 
if (!priv->ring[ring].need_dequeue)
-- 
2.14.3