Re: [PATCH v3 net-next 0/4] kernel TLS

2017-06-15 Thread David Miller
From: Dave Watson 
Date: Wed, 14 Jun 2017 11:36:54 -0700

> This series adds support for kernel TLS encryption over TCP sockets.
> A standard TCP socket is converted to a TLS socket using a setsockopt.
> Only symmetric crypto is done in the kernel, as well as TLS record
> framing.  The handshake remains in userspace, and the negotiated
> cipher keys/iv are provided to the TCP socket.
> 
> We implemented support for this API in OpenSSL 1.1.0, the code is
> available at https://github.com/Mellanox/tls-openssl/tree/master
> 
> It should work with any TLS library with similar modifications,
> a test tool using gnutls is here: https://github.com/Mellanox/tls-af_ktls_tool
> 
> RFC patch to openssl:
> https://mta.openssl.org/pipermail/openssl-dev/2017-June/009384.html
 ...

Series applied, thanks for all of this hard work!


Re: [PATCH v2 1/6] integrity: Small code improvements

2017-06-15 Thread Mimi Zohar
On Wed, 2017-06-07 at 22:49 -0300, Thiago Jung Bauermann wrote:
> These changes are too small to warrant their own patches:
> 
> The keyid and sig_size members of struct signature_v2_hdr are in BE format,
> so use a type that makes this assumption explicit. Also, use beXX_to_cpu
> instead of __beXX_to_cpu to read them.
> 
> Change integrity_kernel_read to take a void * buffer instead of char *
> buffer, so that callers don't have to use a cast if they provide a buffer
> that isn't a char *.
> 
> Add missing #endif comment in ima.h pointing out which macro it refers to.
> 
> Add missing fall through comment in ima_appraise.c.
> 
> Constify mask_tokens and func_tokens arrays.
> 
> Signed-off-by: Thiago Jung Bauermann 

Thank you.  Queued to be upstreamed.

Mimi


> ---
>  security/integrity/digsig_asymmetric.c | 4 ++--
>  security/integrity/iint.c  | 2 +-
>  security/integrity/ima/ima.h   | 2 +-
>  security/integrity/ima/ima_appraise.c  | 1 +
>  security/integrity/ima/ima_policy.c| 4 ++--
>  security/integrity/integrity.h | 7 ---
>  6 files changed, 11 insertions(+), 9 deletions(-)
> 
> diff --git a/security/integrity/digsig_asymmetric.c 
> b/security/integrity/digsig_asymmetric.c
> index 80052ed8d467..ab6a029062a1 100644
> --- a/security/integrity/digsig_asymmetric.c
> +++ b/security/integrity/digsig_asymmetric.c
> @@ -92,13 +92,13 @@ int asymmetric_verify(struct key *keyring, const char 
> *sig,
> 
>   siglen -= sizeof(*hdr);
> 
> - if (siglen != __be16_to_cpu(hdr->sig_size))
> + if (siglen != be16_to_cpu(hdr->sig_size))
>   return -EBADMSG;
> 
>   if (hdr->hash_algo >= HASH_ALGO__LAST)
>   return -ENOPKG;
> 
> - key = request_asymmetric_key(keyring, __be32_to_cpu(hdr->keyid));
> + key = request_asymmetric_key(keyring, be32_to_cpu(hdr->keyid));
>   if (IS_ERR(key))
>   return PTR_ERR(key);
> 
> diff --git a/security/integrity/iint.c b/security/integrity/iint.c
> index c710d22042f9..6fc888ca468e 100644
> --- a/security/integrity/iint.c
> +++ b/security/integrity/iint.c
> @@ -182,7 +182,7 @@ security_initcall(integrity_iintcache_init);
>   *
>   */
>  int integrity_kernel_read(struct file *file, loff_t offset,
> -   char *addr, unsigned long count)
> +   void *addr, unsigned long count)
>  {
>   mm_segment_t old_fs;
>   char __user *buf = (char __user *)addr;
> diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
> index d26a30e37d13..215a93c41b51 100644
> --- a/security/integrity/ima/ima.h
> +++ b/security/integrity/ima/ima.h
> @@ -284,7 +284,7 @@ static inline int ima_read_xattr(struct dentry *dentry,
>   return 0;
>  }
> 
> -#endif
> +#endif /* CONFIG_IMA_APPRAISE */
> 
>  /* LSM based policy rules require audit */
>  #ifdef CONFIG_IMA_LSM_RULES
> diff --git a/security/integrity/ima/ima_appraise.c 
> b/security/integrity/ima/ima_appraise.c
> index 7fe0566142d8..ea36a4f134f4 100644
> --- a/security/integrity/ima/ima_appraise.c
> +++ b/security/integrity/ima/ima_appraise.c
> @@ -240,6 +240,7 @@ int ima_appraise_measurement(enum ima_hooks func,
>   case IMA_XATTR_DIGEST_NG:
>   /* first byte contains algorithm id */
>   hash_start = 1;
> + /* fall through */
>   case IMA_XATTR_DIGEST:
>   if (iint->flags & IMA_DIGSIG_REQUIRED) {
>   cause = "IMA-signature-required";
> diff --git a/security/integrity/ima/ima_policy.c 
> b/security/integrity/ima/ima_policy.c
> index 0acd68decb17..949ad3858327 100644
> --- a/security/integrity/ima/ima_policy.c
> +++ b/security/integrity/ima/ima_policy.c
> @@ -965,7 +965,7 @@ enum {
>   mask_exec = 0, mask_write, mask_read, mask_append
>  };
> 
> -static char *mask_tokens[] = {
> +static const char *const mask_tokens[] = {
>   "MAY_EXEC",
>   "MAY_WRITE",
>   "MAY_READ",
> @@ -979,7 +979,7 @@ enum {
>   func_policy
>  };
> 
> -static char *func_tokens[] = {
> +static const char *const func_tokens[] = {
>   "FILE_CHECK",
>   "MMAP_CHECK",
>   "BPRM_CHECK",
> diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
> index 24520b4ef3b0..a53e7e4ab06c 100644
> --- a/security/integrity/integrity.h
> +++ b/security/integrity/integrity.h
> @@ -92,8 +92,8 @@ struct signature_v2_hdr {
>   uint8_t type;   /* xattr type */
>   uint8_t version;/* signature format version */
>   uint8_t hash_algo;  /* Digest algorithm [enum hash_algo] */
> - uint32_t keyid; /* IMA key identifier - not X509/PGP specific */
> - uint16_t sig_size;  /* signature size */
> + __be32 keyid;   /* IMA key identifier - not X509/PGP specific */
> + __be16 sig_size;/* signature size */
>   uint8_t sig[0]; /* signature payload */
>  } __packed;
> 
> @@ -118,7 +118,8 @@ struct integrity_iint_cache {
>  struct 

Re: [PATCH v2 3/6] ima: Log the same audit cause whenever a file has no signature

2017-06-15 Thread Mimi Zohar
On Wed, 2017-06-07 at 22:49 -0300, Thiago Jung Bauermann wrote:
> If the file doesn't have an xattr, ima_appraise_measurement sets cause to
> "missing-hash" while if there's an xattr but it's a digest instead of a
> signature it sets cause to "IMA-signature-required".
> 
> Fix it by setting cause to "IMA-signature-required" in both cases.
> 
> Signed-off-by: Thiago Jung Bauermann 

Thank you.  Queued to be upstreamed.

Mimi
> ---
>  security/integrity/ima/ima_appraise.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/security/integrity/ima/ima_appraise.c 
> b/security/integrity/ima/ima_appraise.c
> index ea36a4f134f4..809ba70fbbbf 100644
> --- a/security/integrity/ima/ima_appraise.c
> +++ b/security/integrity/ima/ima_appraise.c
> @@ -217,7 +217,8 @@ int ima_appraise_measurement(enum ima_hooks func,
>   if (rc && rc != -ENODATA)
>   goto out;
> 
> - cause = "missing-hash";
> + cause = iint->flags & IMA_DIGSIG_REQUIRED ?
> + "IMA-signature-required" : "missing-hash";
>   status = INTEGRITY_NOLABEL;
>   if (opened & FILE_CREATED)
>   iint->flags |= IMA_NEW_FILE;



Re: [PATCH v2 2/6] ima: Simplify policy_func_show.

2017-06-15 Thread Mimi Zohar
On Wed, 2017-06-07 at 22:49 -0300, Thiago Jung Bauermann wrote:
> If the func_tokens array uses the same indices as enum ima_hooks,
> policy_func_show can be a lot simpler, and the func_* enum becomes
> unnecessary.
> 
> Also, if we use the same macro trick used by kernel_read_file_id_str we can
> use one hooks list for both the enum and the string array, making sure they
> are always in sync (suggested by Mimi Zohar).
> 
> Finally, by using the printf pattern for the function token directly
> instead of using the pt macro we can simplify policy_func_show even further
> and avoid needing a temporary buffer.
> 
> Signed-off-by: Thiago Jung Bauermann 

Thank you.  Queued to be upstreamed.

Mimi
> ---
>  security/integrity/ima/ima.h| 25 +---
>  security/integrity/ima/ima_policy.c | 58 
> -
>  2 files changed, 21 insertions(+), 62 deletions(-)
> 
> diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
> index 215a93c41b51..d52b487ad259 100644
> --- a/security/integrity/ima/ima.h
> +++ b/security/integrity/ima/ima.h
> @@ -172,17 +172,22 @@ static inline unsigned long ima_hash_key(u8 *digest)
>   return hash_long(*digest, IMA_HASH_BITS);
>  }
> 
> +#define __ima_hooks(hook)\
> + hook(NONE)  \
> + hook(FILE_CHECK)\
> + hook(MMAP_CHECK)\
> + hook(BPRM_CHECK)\
> + hook(POST_SETATTR)  \
> + hook(MODULE_CHECK)  \
> + hook(FIRMWARE_CHECK)\
> + hook(KEXEC_KERNEL_CHECK)\
> + hook(KEXEC_INITRAMFS_CHECK) \
> + hook(POLICY_CHECK)  \
> + hook(MAX_CHECK)
> +#define __ima_hook_enumify(ENUM) ENUM,
> +
>  enum ima_hooks {
> - FILE_CHECK = 1,
> - MMAP_CHECK,
> - BPRM_CHECK,
> - POST_SETATTR,
> - MODULE_CHECK,
> - FIRMWARE_CHECK,
> - KEXEC_KERNEL_CHECK,
> - KEXEC_INITRAMFS_CHECK,
> - POLICY_CHECK,
> - MAX_CHECK
> + __ima_hooks(__ima_hook_enumify)
>  };
> 
>  /* LIM API function definitions */
> diff --git a/security/integrity/ima/ima_policy.c 
> b/security/integrity/ima/ima_policy.c
> index 949ad3858327..f4436626ccb7 100644
> --- a/security/integrity/ima/ima_policy.c
> +++ b/security/integrity/ima/ima_policy.c
> @@ -972,23 +972,10 @@ static const char *const mask_tokens[] = {
>   "MAY_APPEND"
>  };
> 
> -enum {
> - func_file = 0, func_mmap, func_bprm,
> - func_module, func_firmware, func_post,
> - func_kexec_kernel, func_kexec_initramfs,
> - func_policy
> -};
> +#define __ima_hook_stringify(str)(#str),
> 
>  static const char *const func_tokens[] = {
> - "FILE_CHECK",
> - "MMAP_CHECK",
> - "BPRM_CHECK",
> - "MODULE_CHECK",
> - "FIRMWARE_CHECK",
> - "POST_SETATTR",
> - "KEXEC_KERNEL_CHECK",
> - "KEXEC_INITRAMFS_CHECK",
> - "POLICY_CHECK"
> + __ima_hooks(__ima_hook_stringify)
>  };
> 
>  void *ima_policy_start(struct seq_file *m, loff_t *pos)
> @@ -1025,49 +1012,16 @@ void ima_policy_stop(struct seq_file *m, void *v)
> 
>  #define pt(token)policy_tokens[token + Opt_err].pattern
>  #define mt(token)mask_tokens[token]
> -#define ft(token)func_tokens[token]
> 
>  /*
>   * policy_func_show - display the ima_hooks policy rule
>   */
>  static void policy_func_show(struct seq_file *m, enum ima_hooks func)
>  {
> - char tbuf[64] = {0,};
> -
> - switch (func) {
> - case FILE_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_file));
> - break;
> - case MMAP_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_mmap));
> - break;
> - case BPRM_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_bprm));
> - break;
> - case MODULE_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_module));
> - break;
> - case FIRMWARE_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_firmware));
> - break;
> - case POST_SETATTR:
> - seq_printf(m, pt(Opt_func), ft(func_post));
> - break;
> - case KEXEC_KERNEL_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_kexec_kernel));
> - break;
> - case KEXEC_INITRAMFS_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_kexec_initramfs));
> - break;
> - case POLICY_CHECK:
> - seq_printf(m, pt(Opt_func), ft(func_policy));
> - break;
> - default:
> - snprintf(tbuf, sizeof(tbuf), "%d", func);
> - seq_printf(m, pt(Opt_func), tbuf);
> - break;
> - }
> - seq_puts(m, " ");
> + if (func > 0 && func < MAX_CHECK)
> + seq_printf(m, "func=%s ", func_tokens[func]);
> + else
> + seq_printf(m, "func=%d ", func);
>  }
> 
>  int ima_policy_show(struct seq_file *m, void *v)



Re: [PATCH] crypto: n2: make of_device_ids const

2017-06-15 Thread David Miller
From: Arvind Yadav 
Date: Thu, 15 Jun 2017 17:28:10 +0530

> of_device_ids are not supposed to change at runtime. All functions
> working with of_device_ids provided by  work with const
> of_device_ids. So mark the non-const structs as const.
> 
> File size before:
>text  data bss dec hex filename
>2001  2168  4842171079 drivers/crypto/n2_core.o
> 
> File size after constify dummy_tlb_ops.:
>text  data bss dec hex filename
>3601   536  4841851059 drivers/crypto/n2_core.o
> 
> Signed-off-by: Arvind Yadav 

Acked-by: David S. Miller 


Re: Crypto Fixes for 4.12

2017-06-15 Thread David Miller
From: Linus Torvalds 
Date: Thu, 15 Jun 2017 18:04:44 +0900

> There's a fair number of SHASH_DESC_ON_STACK users, are all the others
> safe for some random reason that just happens to be about code
> generation? Did people actually verify that?

I looked at the code generated in every case.

As a side note, ext4 does something similar with a private
implementation, but it doesn't use something the evaluates to an
alloca.  Instead it uses a fixed 4-byte size for the shash context
value in the on-stack declaration.

We can tidy it up with abstraction macros as a follow-up, thanks
for the suggestion.  I'll look into it.


Re: Crypto Fixes for 4.12

2017-06-15 Thread David Miller
From: Herbert Xu 
Date: Thu, 15 Jun 2017 17:42:10 +0800

> On Thu, Jun 15, 2017 at 06:04:44PM +0900, Linus Torvalds wrote:
>> There's a fair number of SHASH_DESC_ON_STACK users, are all the others
>> safe for some random reason that just happens to be about code
>> generation? Did people actually verify that?
> 
> If I understand this correctly this is only an issue if you directly
> return a value from the shash_desc struct allocated on the stack.
> This is usually rare as normally you'd return an error code and the
> hash result would be written directly to some memory passed in from
> the caller.

Correct.


Re: [RFC PATCH 0/2] crypto: caam - fix cts(cbc(aes)) with CAAM driver

2017-06-15 Thread Horia Geantă
On 6/2/2017 3:25 PM, David Gstir wrote:
> Hi!
> 
> While testing fscrypt's filename encryption, I noticed that the implementation
> of cts(cbc(aes)) is broken when the CAAM hardware crypto driver is enabled.
> Some digging showed that the refactoring of crypto/cts.c in v4.8 
> (commit 0605c41cc53ca) exposed some problems with CAAM's aes-cbc
> implementation. This can be tested quite easily by loading the tcrypt
> module with mode=38. Looks like the cts mode is not used very often
> and this went unnoticed since 4.8...
> 
> This patch series is an attempt to fix that and get cts(cbc(aes)) working
> properly again when the CAAM driver is enabled.
> 
David, thanks for taking time to fix these.

> Specifically, the issues are:
> 
> 1) The cts implementation assumes ->info of ablkcipher_request (or ->iv of
>skcipher_request respectively) to be set to the last ciphertext block when
>the aes-cbc encryption is finished. Meaning that ->info is changed by the
>aes-cbc code. The CAAM driver does not do that and leaves ->info as-is.
> 
>It is not fully clear to me yet if this is a requirement of the crypto API,
>or if this is just a side effect that is exploited by the cts 
> implementation?
> 
>For now, I assumed it is a requirement of the crypto API since I've seen
>other crypto drivers (e.g. AMD's CCP) do that. So the patch fixes the CAAM
>crypto driver accordingly.
> 
IIUC, yes, the Crypto API requires ->info to be updated.

Dan, Radu,

IIRC, you've hit smth. similar while testing CAAM on i.MX.
Could you please review David's fix, compare it with yours, so in the
end we would choose the one that fits best?

>Also, the aead code in the CAAM driver, more specifically the gcm mode code
>seems to have the same flaw, so it'll need a similar fix in case.
> 
> 
> 2) The cts implementation uses aes-cbc twice to perform its job. The second
>time, it is called from within the callback of the first call to aes-cbc.
>This usage is not properly handled in the CAAM driver which triggers the
>BUG below.
> 
Dan, Radu - have you seen this on i.MX?

>My current proposal is to use in_interrupt() to detect such cases and set
>the k*alloc flags accordingly. However, since using in_interrupt() is not
>really nice, I'm wondering if there is a better way to handle this?
> 
Nothing else crosses my mind right now, but I'd like to sleep on it.

> 
> Thanks,
> David
> 
> 
> [  126.252543] BUG: sleeping function called from invalid context at 
> mm/slab.h:432
> [  126.260099] in_atomic(): 1, irqs_disabled(): 0, pid: 0, name: swapper/0
> [  126.266837] no locks held by swapper/0/0.
> [  126.270969] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 
> 4.12.0-rc3-00052-g0ddec680d395 #287
> [  126.279226] Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree)
> [  126.285821] Backtrace:
> [  126.288406] [] (dump_backtrace) from [] 
> (show_stack+0x18/0x1c)
> [  126.296057]  r7: r6:6113 r5: r4:c102ab48
> [  126.301798] [] (show_stack) from [] 
> (dump_stack+0xb4/0xe8)
> [  126.309106] [] (dump_stack) from [] 
> (___might_sleep+0x17c/0x2a0)
> [  126.316929]  r9: r8:c0a016dc r7:c101ee3e r6:01b0 r5:c0c12fac 
> r4:e000
> [  126.324751] [] (___might_sleep) from [] 
> (__might_sleep+0x64/0xa4)
> [  126.332655]  r7:014080c1 r6: r5:01b0 r4:c0c12fac
> [  126.338397] [] (__might_sleep) from [] 
> (__kmalloc+0x130/0x1b8)
> [  126.346039]  r6:c071a8ec r5:014080c1 r4:eec01e00
> [  126.350742] [] (__kmalloc) from [] 
> (ablkcipher_edesc_alloc.constprop.1+0x200/0x900)
> [  126.360213]  r10: r9: r8:c0a016dc r7:c0a016dc r6:ee05ac10 
> r5:edfdaec0
> [  126.368109]  r4:0001 r3:0020
> [  126.371765] [] (ablkcipher_edesc_alloc.constprop.1) from 
> [] (ablkcipher_encrypt+0x24/0x9c)
> [  126.381843]  r10: r9:0020 r8:0001 r7:ee05ac10 r6:ed597000 
> r5:edfdaec0
> [  126.389738]  r4:ed475d08
> [  126.392354] [] (ablkcipher_encrypt) from [] 
> (skcipher_encrypt_ablkcipher+0x68/0x6c)
> [  126.401822]  r7:ed475d08 r6:ed597000 r5:0400 r4:ed475d08
> [  126.407566] [] (skcipher_encrypt_ablkcipher) from [] 
> (cts_cbc_encrypt+0x118/0x124)
> [  126.416947]  r7:ed475d08 r6:c1001cc0 r5:0010 r4:edfdae00
> [  126.422686] [] (cts_cbc_encrypt) from [] 
> (crypto_cts_encrypt_done+0x20/0x54)
> [  126.431548]  r10: r9:ee05ac10 r8: r7:0010 r6:edc8e6c0 
> r5:edc8e6d8
> [  126.439443]  r4:edfdae00
> [  126.442056] [] (crypto_cts_encrypt_done) from [] 
> (ablkcipher_encrypt_done+0x88/0x9c)
> [  126.445180] fec 2188000.ethernet eth0: MDIO read timeout
> [  126.456948]  r5:edc8e6d8 r4:edfdaec0
> [  126.460604] [] (ablkcipher_encrypt_done) from [] 
> (caam_jr_dequeue+0x214/0x2d4)
> [  126.469639]  r9:0001 r8:ee088010 r7:01ff r6:0001 r5: 
> r4:edfdaec0
> [  126.477467] [] (caam_jr_dequeue) from [] 
> (tasklet_action+0x98/0x154)
> [  126.485160] fec 2188000.ethernet eth0: MDIO read timeout
> [  

Re: [kernel-hardening] Re: [PATCH v4 13/13] random: warn when kernel uses unseeded randomness

2017-06-15 Thread Stephan Müller
Am Donnerstag, 15. Juni 2017, 13:03:48 CEST schrieb Michael Ellerman:

Hi Michael,

> 
> Even with this patch, it's still pretty spammy (today's linux-next):
> 

I would think that the issue regarding the logging is relevant for 
cryptographic use cases or use cases requiring strong random numbers only. 
Only those use cases should be fixed eventually to wait for a fully seeded 
DRNG.

The logged messages you present here indicate use cases where no strong 
security is required. It looks like that the logs show ASLR related use of 
random numbers. Those do not require a fully seeded ChaCha20 DRNG.

IMHO, users using the get_random_u64 or get_random_u32 are use cases that do 
not require a fully seeded DRNG thus do not need a cryptographically strong 
random number. Hence, I would think that the logging should be removed from 
get_random_u32/u64.

Yet, logging should remain for get_random_bytes which should be denominated as 
the interface for use cases where cryptographically strong random numbers are 
required.

Ciao
Stephan



[PATCH] crypto: n2: make of_device_ids const

2017-06-15 Thread Arvind Yadav
of_device_ids are not supposed to change at runtime. All functions
working with of_device_ids provided by  work with const
of_device_ids. So mark the non-const structs as const.

File size before:
   textdata bss dec hex filename
   20012168  4842171079 drivers/crypto/n2_core.o

File size after constify dummy_tlb_ops.:
   textdata bss dec hex filename
   3601 536  4841851059 drivers/crypto/n2_core.o

Signed-off-by: Arvind Yadav 
---
 drivers/crypto/n2_core.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 4ecb77a..2694513 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -2169,7 +2169,7 @@ static int n2_mau_remove(struct platform_device *dev)
return 0;
 }
 
-static struct of_device_id n2_crypto_match[] = {
+static const struct of_device_id n2_crypto_match[] = {
{
.name = "n2cp",
.compatible = "SUNW,n2-cwq",
@@ -2196,7 +2196,7 @@ static int n2_mau_remove(struct platform_device *dev)
.remove =   n2_crypto_remove,
 };
 
-static struct of_device_id n2_mau_match[] = {
+static const struct of_device_id n2_mau_match[] = {
{
.name = "ncp",
.compatible = "SUNW,n2-mau",
-- 
1.9.1



Re: [kernel-hardening] Re: [PATCH v4 13/13] random: warn when kernel uses unseeded randomness

2017-06-15 Thread Michael Ellerman
Theodore Ts'o  writes:
> On Tue, Jun 06, 2017 at 07:48:04PM +0200, Jason A. Donenfeld wrote:
>> This enables an important dmesg notification about when drivers have
>> used the crng without it being seeded first. Prior, these errors would
>> occur silently, and so there hasn't been a great way of diagnosing these
>> types of bugs for obscure setups. By adding this as a config option, we
>> can leave it on by default, so that we learn where these issues happen,
>> in the field, will still allowing some people to turn it off, if they
>> really know what they're doing and do not want the log entries.
...
>
> This patch is pretty spammy.  On my KVM test kernel:
>
> random: bucket_table_alloc+0x15f/0x190 get_random_u32 called with crng_init = > 0
> random: bucket_table_alloc+0x15f/0x190 get_random_u32 called with crng_init = > 0
> random: bucket_table_alloc+0x15f/0x190 get_random_u32 called with crng_init = > 0
> random: bucket_table_alloc+0x15f/0x190 get_random_u32 called with crng_init = > 0
...
>
> At the very least we probably should do a logical "uniq" on the output
> (e.g., if we have complained about the previous callsite, don't whinge
> about it again).
>
> commit 9d9035bc6d7871a73d7f9aada4e63cb190874a68
> Author: Theodore Ts'o 
> Date:   Thu Jun 8 04:16:59 2017 -0400
>
> random: suppress duplicate crng_init=0 warnings
> 
> Suppress duplicate CONFIG_WARN_UNSEEDED_RANDOM warnings to avoid
> spamming dmesg.
> 
> Signed-off-by: Theodore Ts'o 

Even with this patch, it's still pretty spammy (today's linux-next):

 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 Initializing random number generator... random: arch_mmap_rnd+0x78/0xb0 
get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0
 random: arch_mmap_rnd+0x78/0xb0 get_random_u64 called with crng_init=0
 random: load_elf_binary+0x57c/0x1550 get_random_u64 called with crng_init=0
 random: arch_randomize_brk+0xa4/0xd0 get_random_u64 called with crng_init=0


Do I need to be doing anything to fix these? (this is on powerpc)

cheers


[PATCH 11/13] crypto: inside-secure - only dequeue when needed

2017-06-15 Thread Antoine Tenart
This force the need_dequeue flag to be unset whenever the dequeue
function is called, to avoid calling it when it is not necessary.

Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 8f195e031938..e7f87ac12685 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -429,6 +429,8 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, 
int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
+   priv->ring[ring].need_dequeue = false;
+
do {
spin_lock_bh(>ring[ring].queue_lock);
backlog = crypto_get_backlog(>ring[ring].queue);
@@ -631,10 +633,8 @@ static void safexcel_handle_result_work(struct work_struct 
*work)
 
safexcel_handle_result_descriptor(priv, data->ring);
 
-   if (priv->ring[data->ring].need_dequeue) {
-   priv->ring[data->ring].need_dequeue = false;
+   if (priv->ring[data->ring].need_dequeue)
safexcel_dequeue(data->priv, data->ring);
-   }
 }
 
 struct safexcel_ring_irq_data {
-- 
2.9.4



[PATCH 05/13] crypto: inside-secure - optimize DSE bufferability control

2017-06-15 Thread Antoine Tenart
From: Igal Liberman 

Configure the data write bufferability to always buffer packets in the
DSE. This change slightly improves performance.

Signed-off-by: Igal Liberman 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 1 +
 drivers/crypto/inside-secure/safexcel.h | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 658b307c6a11..73f4ef8d71f3 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -329,6 +329,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv 
*priv)
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | 
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+   val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
writel(val, priv->base + EIP197_HIA_DSE_CFG);
 
diff --git a/drivers/crypto/inside-secure/safexcel.h 
b/drivers/crypto/inside-secure/safexcel.h
index b8a81c568c99..7e3cbb9ac98e 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -139,6 +139,7 @@
 #define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n)((n) << 0)
 #define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n)  (((n) & 0x7) << 4)
 #define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n)((n) << 8)
+#define EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE  GENMASK(15, 14)
 #define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n)((n) << 16)
 #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n)  (((n) & 0x7) << 20)
 #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n)((n) << 24)
-- 
2.9.4



[PATCH 07/13] crypto: inside-secure - update the context and request later

2017-06-15 Thread Antoine Tenart
This move the context and request updates at the end of the cipher and
hash send() functions. This way the context and request fields are set
only when everything else was successful in the send() functions.

Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 7 +++
 drivers/crypto/inside-secure/safexcel_hash.c   | 8 
 2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c 
b/drivers/crypto/inside-secure/safexcel_cipher.c
index 8eea4d30db31..6037cdfc1f16 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -190,8 +190,6 @@ static int safexcel_aes_send(struct crypto_async_request 
*async,
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
int i, ret = 0;
 
-   request->req = >base;
-
if (req->src == req->dst) {
nr_src = dma_map_sg(priv->dev, req->src,
sg_nents_for_len(req->src, req->cryptlen),
@@ -264,10 +262,11 @@ static int safexcel_aes_send(struct crypto_async_request 
*async,
n_rdesc++;
}
 
-   ctx->base.handle_result = safexcel_handle_result;
-
spin_unlock_bh(>ring[ring].egress_lock);
 
+   request->req = >base;
+   ctx->base.handle_result = safexcel_handle_result;
+
*commands = n_cdesc;
*results = n_rdesc;
return 0;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c 
b/drivers/crypto/inside-secure/safexcel_hash.c
index 6eee1a502225..4e526372464f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -198,9 +198,6 @@ static int safexcel_ahash_send(struct crypto_async_request 
*async, int ring,
len -= extra;
}
 
-   request->req = >base;
-   ctx->base.handle_result = safexcel_handle_result;
-
spin_lock_bh(>ring[ring].egress_lock);
 
/* Add a command descriptor for the cached data, if any */
@@ -291,9 +288,12 @@ static int safexcel_ahash_send(struct crypto_async_request 
*async, int ring,
goto cdesc_rollback;
}
 
-   req->processed += len;
spin_unlock_bh(>ring[ring].egress_lock);
 
+   req->processed += len;
+   request->req = >base;
+   ctx->base.handle_result = safexcel_handle_result;
+
*commands = n_cdesc;
*results = 1;
return 0;
-- 
2.9.4



[PATCH 03/13] crypto: inside-secure - fix incorrect DSE data cache setting

2017-06-15 Thread Antoine Tenart
Set the correct value to the DSE data cache, using WR_CACHE_3BITS
instead of RD_CACHE_3BITS. This fixes an incorrect setting and helps
improving performances.

Reported-by: Igal Liberman 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 5485e925e18d..99755fc1a161 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -328,7 +328,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv 
*priv)
/* DMA transfer size to use */
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | 
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
-   val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+   val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
writel(val, priv->base + EIP197_HIA_DSE_CFG);
 
/* Leave the DSE threads reset state */
-- 
2.9.4



[PATCH 08/13] crypto: inside-secure - use one queue per hw ring

2017-06-15 Thread Antoine Tenart
Update the inside-secure safexcel driver from using one global queue to
one queue per hw ring. This ease the request management and keep the hw
in sync with what's done in sw.

Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c| 86 --
 drivers/crypto/inside-secure/safexcel.h| 12 ++--
 drivers/crypto/inside-secure/safexcel_cipher.c | 38 +++-
 drivers/crypto/inside-secure/safexcel_hash.c   | 38 +++-
 4 files changed, 89 insertions(+), 85 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 73f4ef8d71f3..8956b23803a8 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -422,20 +422,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv 
*priv)
return 0;
 }
 
-void safexcel_dequeue(struct safexcel_crypto_priv *priv)
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 {
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
struct safexcel_request *request;
-   int i, ret, n = 0, nreq[EIP197_MAX_RINGS] = {0};
-   int cdesc[EIP197_MAX_RINGS] = {0}, rdesc[EIP197_MAX_RINGS] = {0};
-   int commands, results;
+   int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
do {
-   spin_lock_bh(>lock);
-   req = crypto_dequeue_request(>queue);
-   backlog = crypto_get_backlog(>queue);
-   spin_unlock_bh(>lock);
+   spin_lock_bh(>ring[ring].queue_lock);
+   req = crypto_dequeue_request(>ring[ring].queue);
+   backlog = crypto_get_backlog(>ring[ring].queue);
+   spin_unlock_bh(>ring[ring].queue_lock);
 
if (!req)
goto finalize;
@@ -445,58 +443,51 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv)
goto requeue;
 
ctx = crypto_tfm_ctx(req->tfm);
-   ret = ctx->send(req, ctx->ring, request, , );
+   ret = ctx->send(req, ring, request, , );
if (ret) {
kfree(request);
 requeue:
-   spin_lock_bh(>lock);
-   crypto_enqueue_request(>queue, req);
-   spin_unlock_bh(>lock);
+   spin_lock_bh(>ring[ring].queue_lock);
+   crypto_enqueue_request(>ring[ring].queue, req);
+   spin_unlock_bh(>ring[ring].queue_lock);
 
-   priv->need_dequeue = true;
+   priv->ring[ring].need_dequeue = true;
continue;
}
 
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
 
-   spin_lock_bh(>ring[ctx->ring].egress_lock);
-   list_add_tail(>list, >ring[ctx->ring].list);
-   spin_unlock_bh(>ring[ctx->ring].egress_lock);
-
-   cdesc[ctx->ring] += commands;
-   rdesc[ctx->ring] += results;
+   spin_lock_bh(>ring[ring].egress_lock);
+   list_add_tail(>list, >ring[ring].list);
+   spin_unlock_bh(>ring[ring].egress_lock);
 
-   nreq[ctx->ring]++;
-   } while (n++ < EIP197_MAX_BATCH_SZ);
+   cdesc += commands;
+   rdesc += results;
+   } while (nreq++ < EIP197_MAX_BATCH_SZ);
 
 finalize:
-   if (n == EIP197_MAX_BATCH_SZ)
-   priv->need_dequeue = true;
-   else if (!n)
+   if (nreq == EIP197_MAX_BATCH_SZ)
+   priv->ring[ring].need_dequeue = true;
+   else if (!nreq)
return;
 
-   for (i = 0; i < priv->config.rings; i++) {
-   if (!nreq[i])
-   continue;
+   spin_lock_bh(>ring[ring].lock);
 
-   spin_lock_bh(>ring[i].lock);
+   /* Configure when we want an interrupt */
+   writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+  EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
+  priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
 
-   /* Configure when we want an interrupt */
-   writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
-  EIP197_HIA_RDR_THRESH_PROC_PKT(nreq[i]),
-  priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_THRESH);
+   /* let the RDR know we have pending descriptors */
+   writel((rdesc * priv->config.rd_offset) << 2,
+  priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
 
-   /* let the RDR know we have pending descriptors */
-   writel((rdesc[i] * priv->config.rd_offset) << 2,
-  priv->base + EIP197_HIA_RDR(i) + 
EIP197_HIA_xDR_PREP_COUNT);
+   /* let the CDR know we have pending descriptors */
+   writel((cdesc * priv->config.cd_offset) << 2,

[PATCH 09/13] crypto: inside-secure - stop requeueing failed requests

2017-06-15 Thread Antoine Tenart
This update the dequeue function of the inside-secure safexcel driver so
that failed requests aren't requeued when they fail (for whatever
reason, which can be because the hw ring is full).

Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 8956b23803a8..8ae133a9e3f2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -439,20 +439,22 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, 
int ring)
goto finalize;
 
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
-   if (!request)
-   goto requeue;
+   if (!request) {
+   spin_lock_bh(>ring[ring].queue_lock);
+   crypto_enqueue_request(>ring[ring].queue, req);
+   spin_unlock_bh(>ring[ring].queue_lock);
+
+   priv->ring[ring].need_dequeue = true;
+   goto finalize;
+   }
 
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, , );
if (ret) {
kfree(request);
-requeue:
-   spin_lock_bh(>ring[ring].queue_lock);
-   crypto_enqueue_request(>ring[ring].queue, req);
-   spin_unlock_bh(>ring[ring].queue_lock);
-
+   req->complete(req, ret);
priv->ring[ring].need_dequeue = true;
-   continue;
+   goto finalize;
}
 
if (backlog)
-- 
2.9.4



[PATCH 12/13] crypto: inside-secure - increase the batch size

2017-06-15 Thread Antoine Tenart
Increase the batch size to the maximum number of requests a ring can
handle at a time (its size). This is possible now that the request
queues are per hw ring. This improves performances.

Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/inside-secure/safexcel.h 
b/drivers/crypto/inside-secure/safexcel.h
index abe0f59d1473..304c5838c11a 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -23,7 +23,7 @@
 #define EIP197_MAX_TOKENS  5
 #define EIP197_MAX_RINGS   4
 #define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ8
+#define EIP197_MAX_BATCH_SZEIP197_DEFAULT_RING_SIZE
 
 #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
 GFP_KERNEL : GFP_ATOMIC)
-- 
2.9.4



[PATCH 00/13] crypto: inside-secure - various improvements

2017-06-15 Thread Antoine Tenart
Hi Herbert,

This series improves the newly added inside-secure driver in various
ways. There are also a few non-critical fixes.

The series is based on top of your cryptodev/master branch, as it
depends on the inside-secure driver addition ("crypto: inside-secure -
add SafeXcel EIP197 crypto engine driver").

I'd like to thank Ofer Heifetz and Igal Liberman who helped making
these changes!

Thanks,
Antoine

Antoine Tenart (10):
  crypto: inside-secure - use hmac ipad/opad constants
  crypto: inside-secure - fix the ring wr_cache offset
  crypto: inside-secure - fix incorrect DSE data cache setting
  crypto: inside-secure - update the context and request later
  crypto: inside-secure - use one queue per hw ring
  crypto: inside-secure - stop requeueing failed requests
  crypto: inside-secure - get the backlog before dequeueing the request
  crypto: inside-secure - only dequeue when needed
  crypto: inside-secure - increase the batch size
  crypto: inside-secure - use the base_end pointer in ring rollback

Igal Liberman (2):
  crypto: inside-secure - enable single WR in DSE configuration
  crypto: inside-secure - optimize DSE bufferability control

Ofer Heifetz (1):
  crypto: inside-secure - align the cipher and hash send functions

 drivers/crypto/inside-secure/safexcel.c| 104 -
 drivers/crypto/inside-secure/safexcel.h|  18 +++--
 drivers/crypto/inside-secure/safexcel_cipher.c |  47 ++-
 drivers/crypto/inside-secure/safexcel_hash.c   |  51 ++--
 drivers/crypto/inside-secure/safexcel_ring.c   |   2 +-
 5 files changed, 116 insertions(+), 106 deletions(-)

-- 
2.9.4



[PATCH 04/13] crypto: inside-secure - enable single WR in DSE configuration

2017-06-15 Thread Antoine Tenart
From: Igal Liberman 

When enable_single_wr is not enabled, the DSE will only write those
parts of a result descriptor that need updating, which means a final
result descriptor will be written in 2 or 3 smaller transfers.
When enable_single_wr is enabled the DSE will combine these 2-3
updates into one large write transfer, generally improving performance.

Signed-off-by: Igal Liberman 
Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel.c | 1 +
 drivers/crypto/inside-secure/safexcel.h | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/crypto/inside-secure/safexcel.c 
b/drivers/crypto/inside-secure/safexcel.c
index 99755fc1a161..658b307c6a11 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -329,6 +329,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv 
*priv)
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | 
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+   val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
writel(val, priv->base + EIP197_HIA_DSE_CFG);
 
/* Leave the DSE threads reset state */
diff --git a/drivers/crypto/inside-secure/safexcel.h 
b/drivers/crypto/inside-secure/safexcel.h
index 0328a9314b90..b8a81c568c99 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -143,6 +143,7 @@
 #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n)  (((n) & 0x7) << 20)
 #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n)((n) << 24)
 #define EIP197_HIA_DFE_CFG_DIS_DEBUG   (BIT(31) | BIT(29))
+#define EIP197_HIA_DSE_CFG_EN_SINGLE_WRBIT(29)
 #define EIP197_HIA_DSE_CFG_DIS_DEBUG   BIT(31)
 
 /* EIP197_HIA_DFE/DSE_THR_CTRL */
-- 
2.9.4



[PATCH 01/13] crypto: inside-secure - use hmac ipad/opad constants

2017-06-15 Thread Antoine Tenart
Replace the hmac ipad/opad values by their defined constants.

Signed-off-by: Antoine Tenart 
---
 drivers/crypto/inside-secure/safexcel_hash.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_hash.c 
b/drivers/crypto/inside-secure/safexcel_hash.c
index 060ea034c9da..6eee1a502225 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -8,6 +8,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include 
 #include 
 #include 
 #include 
@@ -774,8 +775,8 @@ static int safexcel_hmac_init_pad(struct ahash_request 
*areq,
memcpy(opad, ipad, blocksize);
 
for (i = 0; i < blocksize; i++) {
-   ipad[i] ^= 0x36;
-   opad[i] ^= 0x5c;
+   ipad[i] ^= HMAC_IPAD_VALUE;
+   opad[i] ^= HMAC_OPAD_VALUE;
}
 
return 0;
-- 
2.9.4



pkcs1pad and RSA e

2017-06-15 Thread Stephan Müller
Hi,

during the preparation of the self-test patch for pkcs1pad, I noticed the 
following strange behavior:

I set an RSA private key with e=0x10001 to generate a signature. This 
generation process was successful and the expected signature was generated. 
Now, when using the very same TFM with the already set private key and perform 
a signature verification on the previously generated signature failed.

After some experiments, I found that I had to set e=0x000 ... 10001 where 
the size of e is equal to the size of n. Still, signature generation passed. 
And now, using the same TFM with the set private key, the signature 
verification passed too.

How come that there is a dissimilar handling of e regarding signature 
generation and verification?

Ciao
Stephan


[PATCH 0/9] Bug fixes and ctr mode of operation

2017-06-15 Thread Harsh Jain
This series is based on cryptodev2.6 tree and includes bug fix ,ctr(aes), 
rfc3686(ctr(aes)) algo.

Harsh Jain (7):
  crypto: chcr - Pass lcb bit setting to firmware
  crypto: chcr - Set fallback key
  crypto: chcr - Return correct error code
  crypto: chcr - Avoid changing request structure
  crypto:chcr - Add ctr mode and process large sg entries for cipher
  MAINTAINERS:Add maintainer for chelsio crypto driver
  crypto: chcr - Ensure Destination sg entry size less than  2k
Atul Gupta (2):
  chcr - Add debug counters
  crypto: chcr - Select device in Round Robin fashion

 MAINTAINERS|7 +
 drivers/crypto/chelsio/chcr_algo.c | 1096 
 drivers/crypto/chelsio/chcr_algo.h |   30 +-
 drivers/crypto/chelsio/chcr_core.c |   56 +-
 drivers/crypto/chelsio/chcr_core.h |5 +-
 drivers/crypto/chelsio/chcr_crypto.h   |   25 +-
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h |1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c |   35 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c |1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h |   10 +
 10 files changed, 1020 insertions(+), 246 deletions(-)

-- 
1.8.3.1



[PATCH 2/9] crypto: chcr - Fix fallback key setting

2017-06-15 Thread Harsh Jain
Set key of fallback tfm for rfc4309.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index e8ff505..14641c6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2210,7 +2210,8 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead 
*aead, const u8 *key,
unsigned int keylen)
 {
struct chcr_context *ctx = crypto_aead_ctx(aead);
-struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+   struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+   int error;
 
if (keylen < 3) {
crypto_tfm_set_flags((struct crypto_tfm *)aead,
@@ -2218,6 +2219,15 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead 
*aead, const u8 *key,
aeadctx->enckey_len = 0;
return  -EINVAL;
}
+   crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+   crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+   error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+   crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+   crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK);
+   if (error)
+   return error;
keylen -= 3;
memcpy(aeadctx->salt, key + keylen, 3);
return chcr_ccm_common_setkey(aead, key, keylen);
-- 
1.8.3.1



[PATCH 3/9] crypto: chcr - Return correct error code

2017-06-15 Thread Harsh Jain
Return correct error instead of EINVAL.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c | 76 +-
 1 file changed, 42 insertions(+), 34 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 14641c6..156065d 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1399,7 +1399,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
unsigned short stop_offset = 0;
unsigned int  assoclen = req->assoclen;
unsigned int  authsize = crypto_aead_authsize(tfm);
-   int err = -EINVAL, src_nent;
+   int error = -EINVAL, src_nent;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1416,9 +1416,9 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
reqctx->dst = src;
 
if (req->src != req->dst) {
-   err = chcr_copy_assoc(req, aeadctx);
-   if (err)
-   return ERR_PTR(err);
+   error = chcr_copy_assoc(req, aeadctx);
+   if (error)
+   return ERR_PTR(error);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
   req->assoclen);
}
@@ -1430,6 +1430,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
 (op_type ? -authsize : authsize));
if (reqctx->dst_nents < 0) {
pr_err("AUTHENC:Invalid Destination sg entries\n");
+   error = -EINVAL;
goto err;
}
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
@@ -1443,8 +1444,10 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
-   if (!skb)
+   if (!skb) {
+   error = -ENOMEM;
goto err;
+   }
 
/* LLD is going to write the sge hdr. */
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
@@ -1496,9 +1499,9 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
sg_param.nents = reqctx->dst_nents;
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid;
-   sg_param.align = 0;
-   if (map_writesg_phys_cpl(_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
- _param))
+   error = map_writesg_phys_cpl(_ctx->lldi.pdev->dev, phys_cpl,
+   reqctx->dst, _param);
+   if (error)
goto dstmap_fail;
 
skb_set_transport_header(skb, transhdr_len);
@@ -1520,7 +1523,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
/* ivmap_fail: */
kfree_skb(skb);
 err:
-   return ERR_PTR(-EINVAL);
+   return ERR_PTR(error);
 }
 
 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1730,7 +1733,7 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
unsigned int dst_size = 0, kctx_len;
unsigned int sub_type;
unsigned int authsize = crypto_aead_authsize(tfm);
-   int err = -EINVAL, src_nent;
+   int error = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
 
@@ -1746,10 +1749,10 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
reqctx->dst = src;
 
if (req->src != req->dst) {
-   err = chcr_copy_assoc(req, aeadctx);
-   if (err) {
+   error = chcr_copy_assoc(req, aeadctx);
+   if (error) {
pr_err("AAD copy to destination buffer fails\n");
-   return ERR_PTR(err);
+   return ERR_PTR(error);
}
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
   req->assoclen);
@@ -1758,11 +1761,11 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
 (op_type ? -authsize : authsize));
if (reqctx->dst_nents < 0) {
pr_err("CCM:Invalid Destination sg entries\n");
+   error = -EINVAL;
goto err;
}
-
-
-   if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+   error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
+   if (error)
goto err;
 
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
@@ -1777,8 +1780,10 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
 
skb = alloc_skb((transhdr_len + 

[PATCH 6/9] chcr - Add debug counters

2017-06-15 Thread Harsh Jain
Count types of operation done by HW.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c | 16 +-
 drivers/crypto/chelsio/chcr_core.c |  2 ++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h |  1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 35 ++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h |  9 ++
 5 files changed, 62 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 03b817f..2f388af 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -154,6 +154,7 @@ int chcr_handle_resp(struct crypto_async_request *req, 
unsigned char *input,
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_req_ctx ctx_req;
unsigned int digestsize, updated_digestsize;
+   struct adapter *adap = padap(ctx->dev);
 
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
@@ -207,6 +208,7 @@ int chcr_handle_resp(struct crypto_async_request *req, 
unsigned char *input,
ctx_req.req.ahash_req->base.complete(req, err);
break;
}
+   atomic_inc(>chcr_stats.complete);
return err;
 }
 
@@ -639,6 +641,7 @@ static struct sk_buff *create_cipher_wr(struct 
cipher_wr_param *wrparam)
unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
+   struct adapter *adap = padap(ctx->dev);
 
phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
 
@@ -701,6 +704,7 @@ static struct sk_buff *create_cipher_wr(struct 
cipher_wr_param *wrparam)
skb_set_transport_header(skb, transhdr_len);
write_buffer_to_skb(skb, , reqctx->iv, ivsize);
write_sg_to_skb(skb, , wrparam->srcsg, wrparam->bytes);
+   atomic_inc(>chcr_stats.cipher_rqst);
create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
@@ -1337,6 +1341,7 @@ static struct sk_buff *create_hash_wr(struct 
ahash_request *req,
u8 hash_size_in_response = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
+   struct adapter *adap = padap(ctx->dev);
 
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len = param->alg_prm.result_size + iopad_alignment;
@@ -1393,7 +1398,7 @@ static struct sk_buff *create_hash_wr(struct 
ahash_request *req,
param->bfr_len);
if (param->sg_len != 0)
write_sg_to_skb(skb, , req->src, param->sg_len);
-
+   atomic_inc(>chcr_stats.digest_rqst);
create_wreq(ctx, chcr_req, >base, skb, kctx_len,
hash_size_in_response, 0, DUMMY_BYTES, 0);
req_ctx->skb = skb;
@@ -1873,6 +1878,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
+   struct adapter *adap = padap(ctx->dev);
 
if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
goto err;
@@ -1911,6 +1917,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
T6_MAX_AAD_SIZE,
transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
op_type)) {
+   atomic_inc(>chcr_stats.fallback);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
@@ -1983,6 +1990,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
}
write_buffer_to_skb(skb, , req->iv, ivsize);
write_sg_to_skb(skb, , src, req->cryptlen);
+   atomic_inc(>chcr_stats.cipher_rqst);
create_wreq(ctx, chcr_req, >base, skb, kctx_len, size, 1,
   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
reqctx->skb = skb;
@@ -2206,6 +2214,7 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
int error = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
+   struct adapter *adap = padap(ctx->dev);
 
 
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
@@ -2245,6 +2254,7 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
T6_MAX_AAD_SIZE - 18,
transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
op_type)) {
+   atomic_inc(>chcr_stats.fallback);

[PATCH 7/9] MAINTAINERS:Add maintainer for chelsio crypto driver

2017-06-15 Thread Harsh Jain
Add myself as maintainer for chcr.

Signed-off-by: Harsh Jain 
---
 MAINTAINERS | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 1f20176..504dc65 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3706,6 +3706,13 @@ S:   Supported
 F: drivers/infiniband/hw/cxgb4/
 F: include/uapi/rdma/cxgb4-abi.h
 
+CXGB4 CRYPTO DRIVER (chcr)
+M: Harsh Jain 
+L: linux-crypto@vger.kernel.org
+W: http://www.chelsio.com
+S: Supported
+F: drivers/crypto/chelsio
+
 CXGB4VF ETHERNET DRIVER (CXGB4VF)
 M: Casey Leedom 
 L: net...@vger.kernel.org
-- 
1.8.3.1



[PATCH 5/9] crypto:chcr - Add ctr mode and process large sg entries for cipher

2017-06-15 Thread Harsh Jain
It send multiple WRs to H/W to handle large sg lists. Adds ctr(aes)
and rfc(ctr(aes)) modes.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c   | 786 ---
 drivers/crypto/chelsio/chcr_algo.h   |  26 +-
 drivers/crypto/chelsio/chcr_core.c   |   1 -
 drivers/crypto/chelsio/chcr_core.h   |   3 +
 drivers/crypto/chelsio/chcr_crypto.h |  19 +-
 5 files changed, 690 insertions(+), 145 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 9c839c6..03b817f 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -55,6 +55,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 #include 
 #include 
 #include 
@@ -151,12 +153,11 @@ int chcr_handle_resp(struct crypto_async_request *req, 
unsigned char *input,
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_req_ctx ctx_req;
-   struct cpl_fw6_pld *fw6_pld;
unsigned int digestsize, updated_digestsize;
 
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
-   ctx_req.req.aead_req = (struct aead_request *)req;
+   ctx_req.req.aead_req = aead_request_cast(req);
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
dma_unmap_sg(_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
@@ -169,27 +170,16 @@ int chcr_handle_resp(struct crypto_async_request *req, 
unsigned char *input,
);
ctx_req.ctx.reqctx->verify = VERIFY_HW;
}
+   ctx_req.req.aead_req->base.complete(req, err);
break;
 
case CRYPTO_ALG_TYPE_ABLKCIPHER:
-   ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
-   ctx_req.ctx.ablk_ctx =
-   ablkcipher_request_ctx(ctx_req.req.ablk_req);
-   if (!err) {
-   fw6_pld = (struct cpl_fw6_pld *)input;
-   memcpy(ctx_req.req.ablk_req->info, _pld->data[2],
-  AES_BLOCK_SIZE);
-   }
-   dma_unmap_sg(_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
-ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
-   if (ctx_req.ctx.ablk_ctx->skb) {
-   kfree_skb(ctx_req.ctx.ablk_ctx->skb);
-   ctx_req.ctx.ablk_ctx->skb = NULL;
-   }
+err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+  input, err);
break;
 
case CRYPTO_ALG_TYPE_AHASH:
-   ctx_req.req.ahash_req = (struct ahash_request *)req;
+   ctx_req.req.ahash_req = ahash_request_cast(req);
ctx_req.ctx.ahash_ctx =
ahash_request_ctx(ctx_req.req.ahash_req);
digestsize =
@@ -214,6 +204,7 @@ int chcr_handle_resp(struct crypto_async_request *req, 
unsigned char *input,
   sizeof(struct cpl_fw6_pld),
   updated_digestsize);
}
+   ctx_req.req.ahash_req->base.complete(req, err);
break;
}
return err;
@@ -392,7 +383,7 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl 
*phys_cpl,
   struct phys_sge_parm *sg_param)
 {
struct phys_sge_pairs *to;
-   int out_buf_size = sg_param->obsize;
+   unsigned int len = 0, left_size = sg_param->obsize;
unsigned int nents = sg_param->nents, i, j = 0;
 
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
@@ -409,20 +400,15 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl 
*phys_cpl,
phys_cpl->rss_hdr_int.hash_val = 0;
to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
   sizeof(struct cpl_rx_phys_dsgl));
-
-   for (i = 0; nents; to++) {
-   for (j = 0; j < 8 && nents; j++, nents--) {
-   out_buf_size -= sg_dma_len(sg);
-   to->len[j] = htons(sg_dma_len(sg));
+   for (i = 0; nents && left_size; to++) {
+   for (j = 0; j < 8 && nents && left_size; j++, nents--) {
+   len = min(left_size, sg_dma_len(sg));
+   to->len[j] = htons(len);
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
+   left_size -= len;
sg = sg_next(sg);
}
}
-   if (out_buf_size) {
-   j--;
-   to--;
-   to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
-   }
 }
 
 static inline int 

[PATCH 9/9] crypto: chcr - Select device in Round Robin fashion

2017-06-15 Thread Harsh Jain
When multiple devices are present in system select device
in round-robin fashion for crypto operations

Signed-off-by: Atul Gupta 
Reviewed-by: Ganesh Goudar 
---
 drivers/crypto/chelsio/chcr_algo.c |  8 ++--
 drivers/crypto/chelsio/chcr_core.c | 53 ++
 drivers/crypto/chelsio/chcr_core.h |  2 +-
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c |  1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h |  1 +
 5 files changed, 44 insertions(+), 21 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 9a84ffa..aa4e5b8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1216,7 +1216,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request 
*req)
 
 static int chcr_device_init(struct chcr_context *ctx)
 {
-   struct uld_ctx *u_ctx;
+   struct uld_ctx *u_ctx = NULL;
struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
@@ -1224,12 +1224,12 @@ static int chcr_device_init(struct chcr_context *ctx)
 
id = smp_processor_id();
if (!ctx->dev) {
-   err = assign_chcr_device(>dev);
-   if (err) {
+   u_ctx = assign_chcr_device();
+   if (!u_ctx) {
pr_err("chcr device assignment fails\n");
goto out;
}
-   u_ctx = ULD_CTX(ctx);
+   ctx->dev = u_ctx->dev;
adap = padap(ctx->dev);
ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
adap->vres.ncrypto_fc);
diff --git a/drivers/crypto/chelsio/chcr_core.c 
b/drivers/crypto/chelsio/chcr_core.c
index 5ae659a..b6dd9cb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -29,6 +29,7 @@
 static LIST_HEAD(uld_ctx_list);
 static DEFINE_MUTEX(dev_mutex);
 static atomic_t dev_count;
+static struct uld_ctx *ctx_rr;
 
 typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
 static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
@@ -49,25 +50,28 @@
.rx_handler = chcr_uld_rx_handler,
 };
 
-int assign_chcr_device(struct chcr_dev **dev)
+struct uld_ctx *assign_chcr_device(void)
 {
-   struct uld_ctx *u_ctx;
-   int ret = -ENXIO;
+   struct uld_ctx *u_ctx = NULL;
 
/*
-* Which device to use if multiple devices are available TODO
-* May be select the device based on round robin. One session
-* must go to the same device to maintain the ordering.
+* When multiple devices are present in system select
+* device in round-robin fashion for crypto operations
+* Although One session must use the same device to
+* maintain request-response ordering.
 */
-   mutex_lock(_mutex); /* TODO ? */
-   list_for_each_entry(u_ctx, _ctx_list, entry)
-   if (u_ctx->dev) {
-   *dev = u_ctx->dev;
-   ret = 0;
-   break;
+   mutex_lock(_mutex);
+   if (!list_empty(_ctx_list)) {
+   u_ctx = ctx_rr;
+   if (list_is_last(_rr->entry, _ctx_list))
+   ctx_rr = list_first_entry(_ctx_list,
+ struct uld_ctx,
+ entry);
+   else
+   ctx_rr = list_next_entry(ctx_rr, entry);
}
mutex_unlock(_mutex);
-   return ret;
+   return u_ctx;
 }
 
 static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -82,11 +86,27 @@ static int chcr_dev_add(struct uld_ctx *u_ctx)
u_ctx->dev = dev;
dev->u_ctx = u_ctx;
atomic_inc(_count);
+   mutex_lock(_mutex);
+   list_add_tail(_ctx->entry, _ctx_list);
+   if (!ctx_rr)
+   ctx_rr = u_ctx;
+   mutex_unlock(_mutex);
return 0;
 }
 
 static int chcr_dev_remove(struct uld_ctx *u_ctx)
 {
+   if (ctx_rr == u_ctx) {
+   if (list_is_last(_rr->entry, _ctx_list))
+   ctx_rr = list_first_entry(_ctx_list,
+ struct uld_ctx,
+ entry);
+   else
+   ctx_rr = list_next_entry(ctx_rr, entry);
+   }
+   list_del(_ctx->entry);
+   if (list_empty(_ctx_list))
+   ctx_rr = NULL;
kfree(u_ctx->dev);
u_ctx->dev = NULL;
atomic_dec(_count);
@@ -139,10 +159,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info 
*lld)
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
+   if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
+   u_ctx = ERR_PTR(-ENOMEM);
+   goto out;
+   }
u_ctx->lldi = *lld;
-  

[PATCH 8/9] crypto: chcr - Ensure Destination sg entry size less than 2k

2017-06-15 Thread Harsh Jain
Allocate new sg list in case received destination sg list has entry
greater that 2k.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c   | 153 +++
 drivers/crypto/chelsio/chcr_crypto.h |   6 ++
 2 files changed, 142 insertions(+), 17 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 2f388af..9a84ffa 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -166,6 +166,8 @@ int chcr_handle_resp(struct crypto_async_request *req, 
unsigned char *input,
kfree_skb(ctx_req.ctx.reqctx->skb);
ctx_req.ctx.reqctx->skb = NULL;
}
+   free_new_sg(ctx_req.ctx.reqctx->newdstsg);
+   ctx_req.ctx.reqctx->newdstsg = NULL;
if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
chcr_verify_tag(ctx_req.req.aead_req, input,
);
@@ -1068,6 +1070,8 @@ static int chcr_handle_cipher_resp(struct 
ablkcipher_request *req,
chcr_send_wr(skb);
return 0;
 complete:
+   free_new_sg(reqctx->newdstsg);
+   reqctx->newdstsg = NULL;
req->base.complete(>base, err);
return err;
 }
@@ -1083,7 +1087,7 @@ static int process_cipher(struct ablkcipher_request *req,
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct  cipher_wr_param wrparam;
-   int bytes, err = -EINVAL;
+   int bytes, nents, err = -EINVAL;
 
reqctx->newdstsg = NULL;
reqctx->processed = 0;
@@ -1097,7 +1101,14 @@ static int process_cipher(struct ablkcipher_request *req,
goto error;
}
wrparam.srcsg = req->src;
-   reqctx->dstsg = req->dst;
+   if (is_newsg(req->dst, )) {
+   reqctx->newdstsg = alloc_new_sg(req->dst, nents);
+   if (IS_ERR(reqctx->newdstsg))
+   return PTR_ERR(reqctx->newdstsg);
+   reqctx->dstsg = reqctx->newdstsg;
+   } else {
+   reqctx->dstsg = req->dst;
+   }
bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
 SPACE_LEFT(ablkctx->enckey_len),
 ,
@@ -1150,6 +1161,8 @@ static int process_cipher(struct ablkcipher_request *req,
 
return 0;
 error:
+   free_new_sg(reqctx->newdstsg);
+   reqctx->newdstsg = NULL;
return err;
 }
 
@@ -1808,6 +1821,63 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
}
 }
 
+static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
+{
+   int nents = 0;
+   int ret = 0;
+
+   while (sgl) {
+   if (sgl->length > CHCR_SG_SIZE)
+   ret = 1;
+   nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
+   sgl = sg_next(sgl);
+   }
+   *newents = nents;
+   return ret;
+}
+
+static inline void free_new_sg(struct scatterlist *sgl)
+{
+   kfree(sgl);
+}
+
+static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
+  unsigned int nents)
+{
+   struct scatterlist *newsg, *sg;
+   int i, len, processed = 0;
+   struct page *spage;
+   int offset;
+
+   newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
+   if (!newsg)
+   return ERR_PTR(-ENOMEM);
+   sg = newsg;
+   sg_init_table(sg, nents);
+   offset = sgl->offset;
+   spage = sg_page(sgl);
+   for (i = 0; i < nents; i++) {
+   len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
+   sg_set_page(sg, spage, len, offset);
+   processed += len;
+   offset += len;
+   if (offset >= PAGE_SIZE) {
+   offset = offset % PAGE_SIZE;
+   spage++;
+   }
+   if (processed == sgl->length) {
+   processed = 0;
+   sgl = sg_next(sgl);
+   if (!sgl)
+   break;
+   spage = sg_page(sgl);
+   offset = sgl->offset;
+   }
+   sg = sg_next(sg);
+   }
+   return newsg;
+}
+
 static int chcr_copy_assoc(struct aead_request *req,
struct chcr_aead_ctx *ctx)
 {
@@ -1870,7 +1940,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
struct scatterlist *src;
unsigned int frags = 0, transhdr_len;
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
-   unsigned int   kctx_len = 0;
+   unsigned int   kctx_len = 0, nents;
unsigned short stop_offset = 0;
unsigned int  assoclen = req->assoclen;
unsigned int  authsize = crypto_aead_authsize(tfm);

[PATCH 4/9] crypto: chcr - Avoid changing request structure

2017-06-15 Thread Harsh Jain
Do not update assoclen received in aead_request.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c | 37 ++---
 1 file changed, 14 insertions(+), 23 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index 156065d..9c839c6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -126,13 +126,13 @@ static void chcr_verify_tag(struct aead_request *req, u8 
*input, int *err)
fw6_pld = (struct cpl_fw6_pld *)input;
if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
(get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
-   cmp = memcmp(_pld->data[2], (fw6_pld + 1), authsize);
+   cmp = crypto_memneq(_pld->data[2], (fw6_pld + 1), authsize);
} else {
 
sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
authsize, req->assoclen +
req->cryptlen - authsize);
-   cmp = memcmp(temp, (fw6_pld + 1), authsize);
+   cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
}
if (cmp)
*err = -EBADMSG;
@@ -1840,9 +1840,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request 
*req,
struct scatterlist *src;
unsigned int frags = 0, transhdr_len;
unsigned int ivsize = AES_BLOCK_SIZE;
-   unsigned int dst_size = 0, kctx_len;
+   unsigned int dst_size = 0, kctx_len, assoclen = req->assoclen;
unsigned char tag_offset = 0;
-   unsigned int crypt_len = 0;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
@@ -1854,27 +1853,21 @@ static struct sk_buff *create_gcm_wr(struct 
aead_request *req,
 
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
-   src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+   src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
 
-   src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+   src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
reqctx->dst = src;
if (req->src != req->dst) {
error = chcr_copy_assoc(req, aeadctx);
if (error)
return  ERR_PTR(error);
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
-  req->assoclen);
+  assoclen);
}
 
-   if (!req->cryptlen)
-   /* null-payload is not supported in the hardware.
-* software is sending block size
-*/
-   crypt_len = AES_BLOCK_SIZE;
-   else
-   crypt_len = req->cryptlen;
+
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 (op_type ? -authsize : authsize));
if (reqctx->dst_nents < 0) {
@@ -1907,19 +1900,19 @@ static struct sk_buff *create_gcm_wr(struct 
aead_request *req,
memset(chcr_req, 0, transhdr_len);
 
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
-   req->assoclen -= 8;
+   assoclen = req->assoclen - 8;
 
tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
ctx->dev->rx_channel_id, 2, (ivsize ?
-   (req->assoclen + 1) : 0));
+   (assoclen + 1) : 0));
chcr_req->sec_cpl.pldlen =
-   htonl(req->assoclen + ivsize + req->cryptlen);
+   htonl(assoclen + ivsize + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
-   req->assoclen ? 1 : 0, req->assoclen,
-   req->assoclen + ivsize + 1, 0);
+   assoclen ? 1 : 0, assoclen,
+   assoclen + ivsize + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
-   FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+   FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
tag_offset, tag_offset);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
@@ -1955,9 +1948,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request 
*req,
goto dstmap_fail;
 
skb_set_transport_header(skb, transhdr_len);
-
-   write_sg_to_skb(skb, , 

[PATCH 1/9] crypto: chcr - Pass lcb bit setting to firmware

2017-06-15 Thread Harsh Jain
GCM and CBC mode of operation requires Last Cipher Block.
This patch set lcb bit in WR header when required.

Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chcr_algo.c | 18 +++---
 drivers/crypto/chelsio/chcr_algo.h |  4 ++--
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index f00e0d8..e8ff505 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -518,7 +518,8 @@ static inline void create_wreq(struct chcr_context *ctx,
   void *req, struct sk_buff *skb,
   int kctx_len, int hash_sz,
   int is_iv,
-  unsigned int sc_len)
+  unsigned int sc_len,
+  unsigned int lcb)
 {
struct uld_ctx *u_ctx = ULD_CTX(ctx);
int iv_loc = IV_DSGL;
@@ -543,7 +544,8 @@ static inline void create_wreq(struct chcr_context *ctx,
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
-   is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
+   is_iv ? iv_loc : IV_NOP, !!lcb,
+   ctx->tx_qidx);
 
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
   qid);
@@ -652,7 +654,8 @@ static inline void create_wreq(struct chcr_context *ctx,
write_buffer_to_skb(skb, , reqctx->iv, ivsize);
write_sg_to_skb(skb, , req->src, req->nbytes);
create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
-   sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+   sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
+   ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb;
skb_get(skb);
return skb;
@@ -923,7 +926,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request 
*req,
write_sg_to_skb(skb, , req->src, param->sg_len);
 
create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
-   DUMMY_BYTES);
+   DUMMY_BYTES, 0);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -1508,7 +1511,7 @@ static struct sk_buff *create_authenc_wr(struct 
aead_request *req,
write_buffer_to_skb(skb, , req->iv, ivsize);
write_sg_to_skb(skb, , src, req->cryptlen);
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
-  sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+  sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
reqctx->skb = skb;
skb_get(skb);
 
@@ -1804,7 +1807,7 @@ static struct sk_buff *create_aead_ccm_wr(struct 
aead_request *req,
skb_set_transport_header(skb, transhdr_len);
frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
-   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
reqctx->skb = skb;
skb_get(skb);
return skb;
@@ -1950,7 +1953,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request 
*req,
write_buffer_to_skb(skb, , reqctx->iv, ivsize);
write_sg_to_skb(skb, , src, req->cryptlen);
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
-   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+   sizeof(struct cpl_rx_phys_dsgl) + dst_size,
+   reqctx->verify);
reqctx->skb = skb;
skb_get(skb);
return skb;
diff --git a/drivers/crypto/chelsio/chcr_algo.h 
b/drivers/crypto/chelsio/chcr_algo.h
index 751d06a..9894c7b 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -185,11 +185,11 @@
FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
 
-#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, fid) \
+#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
-   FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \
+   FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
 
-- 
1.8.3.1