[PATCH] crypto: Mark MORUS SIMD glue as x86-specific

2018-05-18 Thread Ondrej Mosnáček
From: Ondrej Mosnacek 

Commit 56e8e57fc3a7 ("crypto: morus - Add common SIMD glue code for
MORUS") accidetally consiedered the glue code to be usable by different
architectures, but it seems to be only usable on x86.

This patch moves it under arch/x86/crypto and adds 'depends on X86' to
the Kconfig options.

Reported-by: kbuild test robot 
Signed-off-by: Ondrej Mosnacek 
---
 arch/x86/crypto/Makefile | 3 +++
 {crypto => arch/x86/crypto}/morus1280_glue.c | 4 ++--
 {crypto => arch/x86/crypto}/morus640_glue.c  | 4 ++--
 crypto/Kconfig   | 6 --
 crypto/Makefile  | 2 --
 5 files changed, 11 insertions(+), 8 deletions(-)
 rename {crypto => arch/x86/crypto}/morus1280_glue.c (98%)
 rename {crypto => arch/x86/crypto}/morus640_glue.c (98%)

diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 3813e7cdaada..48e731d782e9 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -42,6 +42,9 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
 obj-$(CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2) += aegis128l-aesni.o
 obj-$(CONFIG_CRYPTO_AEGIS256_AESNI_SSE2) += aegis256-aesni.o
 
+obj-$(CONFIG_CRYPTO_MORUS640_GLUE) += morus640_glue.o
+obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o
+
 obj-$(CONFIG_CRYPTO_MORUS640_SSE2) += morus640-sse2.o
 obj-$(CONFIG_CRYPTO_MORUS1280_SSE2) += morus1280-sse2.o
 
diff --git a/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
similarity index 98%
rename from crypto/morus1280_glue.c
rename to arch/x86/crypto/morus1280_glue.c
index ce1e5c34b09d..0dccdda1eb3a 100644
--- a/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -1,6 +1,6 @@
 /*
  * The MORUS-1280 Authenticated-Encryption Algorithm
- *   Common glue skeleton
+ *   Common x86 SIMD glue skeleton
  *
  * Copyright (c) 2016-2018 Ondrej Mosnacek 
  * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
@@ -299,4 +299,4 @@ EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ondrej Mosnacek ");
-MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for optimizations");
+MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
diff --git a/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
similarity index 98%
rename from crypto/morus640_glue.c
rename to arch/x86/crypto/morus640_glue.c
index c7e788cfaa29..7b58fe4d9bd1 100644
--- a/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -1,6 +1,6 @@
 /*
  * The MORUS-640 Authenticated-Encryption Algorithm
- *   Common glue skeleton
+ *   Common x86 SIMD glue skeleton
  *
  * Copyright (c) 2016-2018 Ondrej Mosnacek 
  * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
@@ -295,4 +295,4 @@ EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ondrej Mosnacek ");
-MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for optimizations");
+MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 75f5efde9aa3..0c9883d60a51 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -341,7 +341,8 @@ config CRYPTO_MORUS640
  Support for the MORUS-640 dedicated AEAD algorithm.
 
 config CRYPTO_MORUS640_GLUE
-   tristate "MORUS-640 AEAD algorithm (glue for SIMD optimizations)"
+   tristate "MORUS-640 AEAD algorithm (glue for x86 SIMD optimizations)"
+   depends on X86
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
@@ -363,7 +364,8 @@ config CRYPTO_MORUS1280
  Support for the MORUS-1280 dedicated AEAD algorithm.
 
 config CRYPTO_MORUS1280_GLUE
-   tristate "MORUS-1280 AEAD algorithm (glue for SIMD optimizations)"
+   tristate "MORUS-1280 AEAD algorithm (glue for x86 SIMD optimizations)"
+   depends on X86
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
diff --git a/crypto/Makefile b/crypto/Makefile
index 68a7c546460a..6d1d40eeb964 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -91,8 +91,6 @@ obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o
 obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o
 obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
 obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
-obj-$(CONFIG_CRYPTO_MORUS640_GLUE) += morus640_glue.o
-obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o
 obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
 obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
 obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
-- 
2.17.0



[cryptodev:master 68/69] crypto/morus640_glue.c:147:2: error: too few arguments to function 'kernel_fpu_begin'

2018-05-18 Thread kbuild test robot
tree:   
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master
head:   6ecc9d9ff91ff26769e58164b6216c6189cb8302
commit: 56e8e57fc3a707bf4f23f88c4822e6cbc9a950dc [68/69] crypto: morus - Add 
common SIMD glue code for MORUS
config: s390-allmodconfig (attached as .config)
compiler: s390x-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
git checkout 56e8e57fc3a707bf4f23f88c4822e6cbc9a950dc
# save the attached .config to linux build tree
make.cross ARCH=s390 

All errors (new ones prefixed by >>):

   crypto/morus640_glue.c: In function 'crypto_morus640_glue_crypt':
>> crypto/morus640_glue.c:147:2: error: too few arguments to function 
>> 'kernel_fpu_begin'
 kernel_fpu_begin();
 ^~~~
   In file included from crypto/morus640_glue.c:24:0:
   arch/s390/include/asm/fpu/api.h:94:20: note: declared here
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
   ^~~~
>> crypto/morus640_glue.c:154:2: error: too few arguments to function 
>> 'kernel_fpu_end'
 kernel_fpu_end();
 ^~
   In file included from crypto/morus640_glue.c:24:0:
   arch/s390/include/asm/fpu/api.h:107:20: note: declared here
static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
   ^~
   crypto/morus640_glue.c: In function 'cryptd_morus640_glue_encrypt':
>> crypto/morus640_glue.c:239:6: error: implicit declaration of function 
>> 'irq_fpu_usable'; did you mean '__cpu_disable'? 
>> [-Werror=implicit-function-declaration]
 if (irq_fpu_usable() && (!in_atomic() ||
 ^~
 __cpu_disable
   cc1: some warnings being treated as errors
--
   crypto/morus1280_glue.c: In function 'crypto_morus1280_glue_crypt':
>> crypto/morus1280_glue.c:151:2: error: too few arguments to function 
>> 'kernel_fpu_begin'
 kernel_fpu_begin();
 ^~~~
   In file included from crypto/morus1280_glue.c:24:0:
   arch/s390/include/asm/fpu/api.h:94:20: note: declared here
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
   ^~~~
>> crypto/morus1280_glue.c:158:2: error: too few arguments to function 
>> 'kernel_fpu_end'
 kernel_fpu_end();
 ^~
   In file included from crypto/morus1280_glue.c:24:0:
   arch/s390/include/asm/fpu/api.h:107:20: note: declared here
static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
   ^~
   crypto/morus1280_glue.c: In function 'cryptd_morus1280_glue_encrypt':
>> crypto/morus1280_glue.c:243:6: error: implicit declaration of function 
>> 'irq_fpu_usable'; did you mean '__cpu_disable'? 
>> [-Werror=implicit-function-declaration]
 if (irq_fpu_usable() && (!in_atomic() ||
 ^~
 __cpu_disable
   cc1: some warnings being treated as errors

vim +/kernel_fpu_begin +147 crypto/morus640_glue.c

   137  
   138  static void crypto_morus640_glue_crypt(struct aead_request *req,
   139 struct morus640_ops ops,
   140 unsigned int cryptlen,
   141 struct morus640_block *tag_xor)
   142  {
   143  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   144  struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
   145  struct morus640_state state;
   146  
 > 147  kernel_fpu_begin();
   148  
   149  ctx->ops->init(&state, &ctx->key, req->iv);
   150  crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, 
req->assoclen);
   151  crypto_morus640_glue_process_crypt(&state, ops, req);
   152  ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
   153  
 > 154  kernel_fpu_end();
   155  }
   156  
   157  int crypto_morus640_glue_encrypt(struct aead_request *req)
   158  {
   159  struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   160  struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
   161  struct morus640_ops OPS = {
   162  .skcipher_walk_init = skcipher_walk_aead_encrypt,
   163  .crypt_blocks = ctx->ops->enc,
   164  .crypt_tail = ctx->ops->enc_tail,
   165  };
   166  
   167  struct morus640_block tag = {};
   168  unsigned int authsize = crypto_aead_authsize(tfm);
   169  unsigned int cryptlen = req->cryptlen;
   170  
   171  crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
   172  
   173  scatterwalk_map_and_copy(tag.bytes, req->dst,
   174   req->assoclen + cryptlen, authsize, 1);
   175  return 0;
   176  }
   177  EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt);
   178

[PATCH] crypto: chtls - fix a missing-check bug

2018-05-18 Thread Wenwen Wang
In do_chtls_setsockopt(), the tls crypto info is first copied from the
poiner 'optval' in userspace and saved to 'tmp_crypto_info'. Then the
'version' of the crypto info is checked. If the version is not as expected,
i.e., TLS_1_2_VERSION, error code -ENOTSUPP is returned to indicate that
the provided crypto info is not supported yet. Then, the 'cipher_type'
field of the 'tmp_crypto_info' is also checked to see if it is
TLS_CIPHER_AES_GCM_128. If it is, the whole struct of
tls12_crypto_info_aes_gcm_128 is copied from the pointer 'optval' and then
the function chtls_setkey() is invoked to set the key.

Given that the 'optval' pointer resides in userspace, a malicious userspace
process can race to change the data pointed by 'optval' between the two
copies. For example, a user can provide a crypto info with TLS_1_2_VERSION
and TLS_CIPHER_AES_GCM_128. After the first copy, the user can modify the
'version' and the 'cipher_type' fields to any versions and/or cipher types
that are not allowed. This way, the user can bypass the checks, inject
bad data to the kernel, cause chtls_setkey() to set a wrong key or other
issues.

This patch reuses the data copied in the first try so as to ensure these
checks will not be bypassed.

Signed-off-by: Wenwen Wang 
---
 drivers/crypto/chelsio/chtls/chtls_main.c | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c 
b/drivers/crypto/chelsio/chtls/chtls_main.c
index 007c45c..39aab05 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -491,9 +491,13 @@ static int do_chtls_setsockopt(struct sock *sk, int 
optname,
 
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
-   rc = copy_from_user(crypto_info, optval,
-   sizeof(struct
-  tls12_crypto_info_aes_gcm_128));
+   /* Obtain version and type from previous copy */
+   crypto_info[0] = tmp_crypto_info;
+   /* Now copy the following data */
+   rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+   optval + sizeof(*crypto_info),
+   sizeof(struct tls12_crypto_info_aes_gcm_128)
+   - sizeof(*crypto_info));
 
if (rc) {
rc = -EFAULT;
-- 
2.7.4



Re: [PATCH 0/4] Add support for MORUS AEAD algorithm

2018-05-18 Thread Herbert Xu
On Fri, May 11, 2018 at 02:19:08PM +0200, Ondrej Mosnáček wrote:
> From: Ondrej Mosnacek 
> 
> This patchset adds the MORUS AEAD algorithm implementation to the Linux 
> Crypto API.
> 
> MORUS [1] is a dedicated AEAD algorithm focused on SIMD instructions and 
> designed for high throughput both on modern processors and in hardware. It is 
> designed by Hongjun Wu and Tao Huang and has been submitted to the CAESAR 
> competiton [2], where it is currently one of the finalists [3]. MORUS uses 
> only logical bitwise operations and bitwise rotations as primitives.
> 
> MORUS has two variants:
> * MORUS-640 operating on 128-bit blocks and accepting a 128-bit key.
> * MORUS-1280 operating on 256-bit blocks and accepting a 128- or 256-bit key.
> Both variants accept a 128-bit IV and produce an up to 128-bit tag.
> 
> The patchset contains four patches, adding:
> * generic implementations
> * test vectors to testmgr
> * common glue code for x86_64 optimizations
> * x86_64 SSE2/AVX2 optimized implementations
> 
> Since there are no official test vectors currently available, the test 
> vectors in patch 2 were generated using a reference implementation from 
> public CAESAR benchmarks [4]. They should be replaced/complemented with 
> official test vectors if/when they become available.
> 
> The implementations have been developed in cooperation with Milan Broz (the 
> maintainer of dm-crypt and cryptsetup) and there is a plan to use them for 
> authenticated disk encryption in cryptsetup. They are a result of my Master's 
> thesis at the Faculty of Informatics, Masaryk University, Brno [5].
> 
> [1] https://competitions.cr.yp.to/round3/morusv2.pdf
> [2] https://competitions.cr.yp.to/caesar-call.html
> [3] https://competitions.cr.yp.to/caesar-submissions.html
> [4] https://bench.cr.yp.to/ebaead.html
> [5] https://is.muni.cz/th/409879/fi_m/?lang=en
> 
> Ondrej Mosnacek (4):
>   crypto: Add generic MORUS AEAD implementations
>   crypto: testmgr - Add test vectors for MORUS
>   crypto: Add common SIMD glue code for MORUS
>   crypto: x86 - Add optimized MORUS implementations

All applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH 0/3] Add support for AEGIS AEAD algorithm

2018-05-18 Thread Herbert Xu
On Fri, May 11, 2018 at 02:12:48PM +0200, Ondrej Mosnáček wrote:
> From: Ondrej Mosnacek 
> 
> This patchset adds the AEGIS AEAD algorithm implementation to the Linux 
> Crypto API.
> 
> AEGIS [1] is a dedicated AEAD algorithm based on the AES round function and 
> designed for high throughput both on modern processors and in hardware. It is 
> designed by Hongjun Wu and Bart Preneel and has been submitted to the CAESAR 
> competiton [2], where it is currently one of the finalists [3].
> 
> AEGIS uses the AES round function and logical bitwise operations as 
> primitives. It achieves extremely good performance in software (on platforms 
> with HW-accelerated AES round function) and in hardware.
> 
> AEGIS has three variants:
> * AEGIS-128 operating on 128-bit blocks and accepting a 128-bit IV and key.
> * AEGIS-128L operating on pairs of 128-bit blocks and accepting a 128-bit IV 
> and key.
> * AEGIS-256 operating on 128-bit blocks and accepting a 256-bit IV and key.
> All three variants produce an up to 128-bit tag.
> 
> The patchset contains three patches, adding:
> * generic implementations
> * test vectors to testmgr
> * x86_64 AES-NI+SSE2 optimized implementations
> 
> Since there are no official test vectors currently available, the test 
> vectors in patch 2 were generated using a reference implementation from 
> public CAESAR benchmarks [4]. They should be replaced/complemented with 
> official test vectors if/when they become available.
> 
> The implementations have been developed in cooperation with Milan Broz (the 
> maintainer of dm-crypt and cryptsetup) and there is a plan to use them for 
> authenticated disk encryption in cryptsetup. They are a result of my Master's 
> thesis at the Faculty of Informatics, Masaryk University, Brno [5].
> 
> [1] https://competitions.cr.yp.to/round3/aegisv11.pdf
> [2] https://competitions.cr.yp.to/caesar-call.html
> [3] https://competitions.cr.yp.to/caesar-submissions.html
> [4] https://bench.cr.yp.to/ebaead.html
> [5] https://is.muni.cz/th/409879/fi_m/?lang=en
> 
> Ondrej Mosnacek (3):
>   crypto: Add generic AEGIS AEAD implementations
>   crypto: testmgr - Add test vectors for AEGIS
>   crypto: x86 - Add optimized AEGIS implementations

All applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: reorder paes test lexicographically

2018-05-18 Thread Herbert Xu
On Fri, May 11, 2018 at 09:04:06AM +0100, Gilad Ben-Yossef wrote:
> Due to a snafu "paes" testmgr tests were not ordered
> lexicographically, which led to boot time warnings.
> Reorder the tests as needed.
> 
> Fixes: a794d8d ("crypto: ccree - enable support for hardware keys")
> Reported-by: Abdul Haleem 
> Signed-off-by: Gilad Ben-Yossef 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: nx: fix spelling mistake: "seqeunce" -> "sequence"

2018-05-18 Thread Herbert Xu
On Wed, May 09, 2018 at 10:16:36AM +0100, Colin King wrote:
> From: Colin Ian King 
> 
> Trivial fix to spelling mistake in CSB_ERR error message text
> 
> Signed-off-by: Colin Ian King 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] crypto: chelsio: request to HW should wrap

2018-05-18 Thread Herbert Xu
On Thu, May 10, 2018 at 10:14:42AM +0530, Atul Gupta wrote:
> -Tx request and data is copied to HW Q in 64B desc, check for
> end of queue and adjust the current position to start from
> beginning before passing the additional request info.
> -key context copy should check key length only
> -Few reverse christmas tree correction
> 
> Signed-off-by: Atul Gupta 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH] hwrng: n2: fix spelling mistake: "restesting" -> "retesting"

2018-05-18 Thread Herbert Xu
Colin King  wrote:
> From: Colin Ian King 
> 
> Trivial fix to spelling mistake in dev_err error message
> 
> Signed-off-by: Colin Ian King 

Patch applied.  Thanks.
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


[PATCH v2] fscrypt: log the crypto algorithm implementations

2018-05-18 Thread Eric Biggers
Log the crypto algorithm driver name for each fscrypt encryption mode on
its first use, also showing a friendly name for the mode.

This will help people determine whether the expected implementations are
being used.  In some cases we've seen people do benchmarks and reject
using encryption for performance reasons, when in fact they used a much
slower implementation of AES-XTS than was possible on the hardware.  It
can make an enormous difference; e.g., AES-XTS on ARM is about 10x
faster with the crypto extensions (AES instructions) than without.

This also makes it more obvious which modes are being used, now that
fscrypt supports multiple combinations of modes.

Example messages (with default modes, on x86_64):

[   35.492057] fscrypt: AES-256-CTS-CBC using implementation 
"cts(cbc-aes-aesni)"
[   35.492171] fscrypt: AES-256-XTS using implementation "xts-aes-aesni"

Note: algorithms can be dynamically added to the crypto API, which can
result in different implementations being used at different times.  But
this is rare; for most users, showing the first will be good enough.

Signed-off-by: Eric Biggers 
---

Changed since v1:
- Added missing "\n" (oops)

Note: this patch is on top of the other fscrypt patches I've sent out for 4.18.

 fs/crypto/keyinfo.c | 102 +---
 1 file changed, 68 insertions(+), 34 deletions(-)

diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 41f6025d5d7a..e997ca51192f 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -148,44 +148,64 @@ static int find_and_derive_key(const struct inode *inode,
return err;
 }
 
-static const struct {
+static struct fscrypt_mode {
+   const char *friendly_name;
const char *cipher_str;
int keysize;
+   bool logged_impl_name;
 } available_modes[] = {
-   [FS_ENCRYPTION_MODE_AES_256_XTS]  = { "xts(aes)",   64 },
-   [FS_ENCRYPTION_MODE_AES_256_CTS]  = { "cts(cbc(aes))",  32 },
-   [FS_ENCRYPTION_MODE_AES_128_CBC]  = { "cbc(aes)",   16 },
-   [FS_ENCRYPTION_MODE_AES_128_CTS]  = { "cts(cbc(aes))",  16 },
-   [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = { "xts(speck128)",  64 },
-   [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = { "cts(cbc(speck128))", 32 },
+   [FS_ENCRYPTION_MODE_AES_256_XTS] = {
+   .friendly_name = "AES-256-XTS",
+   .cipher_str = "xts(aes)",
+   .keysize = 64,
+   },
+   [FS_ENCRYPTION_MODE_AES_256_CTS] = {
+   .friendly_name = "AES-256-CTS-CBC",
+   .cipher_str = "cts(cbc(aes))",
+   .keysize = 32,
+   },
+   [FS_ENCRYPTION_MODE_AES_128_CBC] = {
+   .friendly_name = "AES-128-CBC",
+   .cipher_str = "cbc(aes)",
+   .keysize = 16,
+   },
+   [FS_ENCRYPTION_MODE_AES_128_CTS] = {
+   .friendly_name = "AES-128-CTS-CBC",
+   .cipher_str = "cts(cbc(aes))",
+   .keysize = 16,
+   },
+   [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
+   .friendly_name = "Speck128/256-XTS",
+   .cipher_str = "xts(speck128)",
+   .keysize = 64,
+   },
+   [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
+   .friendly_name = "Speck128/256-CTS-CBC",
+   .cipher_str = "cts(cbc(speck128))",
+   .keysize = 32,
+   },
 };
 
-static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
-const char **cipher_str_ret, int *keysize_ret)
+static struct fscrypt_mode *
+select_encryption_mode(const struct fscrypt_info *ci, const struct inode 
*inode)
 {
-   u32 mode;
-
if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
fscrypt_warn(inode->i_sb,
 "inode %lu uses unsupported encryption modes 
(contents mode %d, filenames mode %d)",
 inode->i_ino, ci->ci_data_mode,
 ci->ci_filename_mode);
-   return -EINVAL;
+   return ERR_PTR(-EINVAL);
}
 
-   if (S_ISREG(inode->i_mode)) {
-   mode = ci->ci_data_mode;
-   } else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
-   mode = ci->ci_filename_mode;
-   } else {
-   WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info 
for inode %lu, which is not encryptable (file type %d)\n",
- inode->i_ino, (inode->i_mode & S_IFMT));
-   return -EINVAL;
-   }
+   if (S_ISREG(inode->i_mode))
+   return &available_modes[ci->ci_data_mode];
+
+   if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+   return &available_modes[ci->ci_filename_mode];
 
-   *cipher_str_ret = available_modes[mode].cipher_str;
-   *keysize_ret = available_modes[mode].keysize;
-   return 0;
+   WARN_ONCE(1, 

Re: [RFC PATCH 5/5] KEYS: add KPP ecdh parser

2018-05-18 Thread Tudor Ambarus

Hi, Denis,

On 05/14/2018 10:54 PM, Denis Kenzior wrote:

Hi Tudor,

On 02/28/2018 10:52 AM, Tudor Ambarus wrote:

The ECDH private keys are expected to be encoded with the ecdh
helpers from kernel.

Use the ecdh helpers to check if the key is valid. If valid,
allocate a tfm and set the private key. There is a one-to-one
binding between the private key and the tfm. The tfm is allocated
once and used as many times as needed.

ECDH keys can be loaded like this:
 # echo -n 
020028000200200024d121ebe5cf2d83f6621b6e43843aa38be086c32019da92505303e1c0eab882 
\


This part looks a bit scary.  Isn't this translating directly to 
kpp_secret data structure (in looks like little-endian order) followed 
curve_id, etc. >


yes, this is how it works.

If the intent is to extend KPP with regular DH, DH + KDF, etc, then we 
might want to invent a proper format here?  I don't think that a Diffie 
Hellman or ECDH Private Key format was ever invented, similar to how 
PKCS8 is used for RSA.




This can be resolved by falling through kpp decoding types until one
recognizes the format.

Inventing an ASN.1 syntax would be logical but somewhat painful as D-H 
is frequently used with plain old random numbers and certificates are 
not stored on disk...


There was this kind of discussion when kpp was introduced, see:
https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg19599.html

Best,
ta




 | xxd -r -p | keyctl padd asymmetric private @s

Signed-off-by: Tudor Ambarus 
---
  crypto/asymmetric_keys/Kconfig  |   8 +++
  crypto/asymmetric_keys/Makefile |   5 ++
  crypto/asymmetric_keys/kpp_parser.c | 124 


  include/crypto/asym_kpp_subtype.h   |   2 +
  4 files changed, 139 insertions(+)
  create mode 100644 crypto/asymmetric_keys/kpp_parser.c


Regards,
-Denis



Re: [RFC PATCH 1/5] KEYS: Provide key type operations for kpp ops

2018-05-18 Thread Tudor Ambarus

Hi, Denis,

Thanks for the review! Please see inline.

On 05/14/2018 09:48 PM, Denis Kenzior wrote:

Hi Tudor,

On 02/28/2018 10:52 AM, Tudor Ambarus wrote:

Provide three new operations in the key_type struct that can be used to
provide access to kpp operations. These will be implemented for the
asymmetric key type in a later patch and may refer to a key retained in
RAM by the kernel or a key retained in crypto hardware.

 int (*asym_kpp_query)(const struct kernel_kpp_params *params,
   struct kernel_kpp_query *res);
 int (*asym_kpp_gen_pubkey)(struct kernel_kpp_params *params,
   void *out);
 int (*asym_kpp_compute_ss)(struct kernel_kpp_params *params,
const void *in, void *out);

Signed-off-by: Tudor Ambarus 
---
  Documentation/security/keys/core.rst | 54 


  include/linux/key-type.h |  7 +
  include/linux/keyctl.h   | 11 
  include/uapi/linux/keyctl.h  |  3 ++
  4 files changed, 75 insertions(+)

diff --git a/Documentation/security/keys/core.rst 
b/Documentation/security/keys/core.rst

index d224fd0..9b69a1f 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -1688,6 +1688,60 @@ The structure has a number of fields, some of 
which are mandatory:
   If successful, 0 will be returned.  If the key doesn't support 
this,

   EOPNOTSUPP will be returned.
+  *  ``int (*asym_kpp_gen_pubkey)(struct kernel_kpp_params *params, 
void *out);``
+  *  ``int (*asym_kpp_compute_ss)(struct kernel_kpp_params *params, 
const void *in, void *out);``

+
+ These methods are optional. If provided the first allows to 
generate the
+ public key pair corresponding to the private key. The second 
method allows
+ to generate a shared secret by  combining the private key and 
the other

+ party's public key.
+
+ In all cases, the following information is provided in the 
params block::

+
+   struct kernel_kpp_query {
+   struct key  *key;
+   __u32   in_len; /* Input data size */
+   __u32   out_len;/* Output buffer size */
+   }
+


Probably not a huge deal as most common key sizes are already supported, 
but... is there a way to query supported key sizes?  I think for 
DH_COMPUTE we didn't have this problem as everything was done in 
software.  However, if the intent is to use TPM / other hardware engines 
we might need a way to query supported key sizes.


Oh, there's a typo here, this structure should have been named
'kernel_kpp_params' and not 'kernel_kpp_query'. 'kernel_kpp_query' is
defined below, it provides a way for determining the maximum supported
key size.



+ This includes the key to be used and the sizes in bytes of the 
input and

+ output buffers.
+
+ For a given operation, the in and out buffers are used as follows::
+
+   Operation IDin,in_lenout,out_len
+   === ===  

+   KEYCTL_KPP_GEN_PUBKEY   -Corresponding 
public key

+   KEYCTL_KPP_COMPUTE_SS   Pair's public keyShared Secret
+
+ If successful, the public key generation and the shared secret 
computation

+ will return the amount of data written into the output buffer.
+
+  *  ``int (*asym_kpp_query)(const struct kernel_kpp_params *params, 
struct kernel_kpp_query *res);``

+
+ This method is optional. If provided it allows information about 
the

+ asymmetric KPP (Key Protocol Primitives) key held in the key to be
+ determined.
+
+ The ``params`` block will contain the key to be queried. 
``in_len`` and

+ ``out_len`` are unused.
+
+ If successful, the following information is filled in::
+
+   struct kernel_kpp_query {
+   __u32   supported_ops;  /* Which ops are 
supported */
+   __u32   max_size;   /* Maximum size of the 
output buffer */

+   };
+
+ The supported_ops field will contain a bitmask indicating what 
operations
+ are supported by the key, including public key generation and 
shared

+ secret computation. The following constants are defined for this::
+
+   KEYCTL_SUPPORTS_{GEN_PUBKEY, COMPUTE_SS};
+
+ If successful, 0 is returned.  If the key is not an asymmetric 
kpp key,

+ EOPNOTSUPP is returned.
+
  Request-Key Callback Service
  
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index bc9af55..d354b6b 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -19,6 +19,8 @@
  struct kernel_pkey_query;
  struct kernel_pkey_params;
+struct kernel_kpp_query;
+struct kernel_kpp_params;
  /*
   * key under-construction record
@@ -165,6 +167,11 @@ struct key_type {
 const void *in, void *out);
  int (*asym_verif

Re: [PATCH 3/3] arm64: dts: renesas: r8a7795: add ccree binding

2018-05-18 Thread Simon Horman
On Thu, May 17, 2018 at 04:12:23PM +0300, Gilad Ben-Yossef wrote:
> On Thu, May 17, 2018 at 12:04 PM, Simon Horman  wrote:
> > On Thu, May 17, 2018 at 11:01:57AM +0300, Gilad Ben-Yossef wrote:
> >> On Wed, May 16, 2018 at 10:43 AM, Simon Horman  wrote:
> >> > On Tue, May 15, 2018 at 04:50:44PM +0200, Geert Uytterhoeven wrote:
> >> >> Hi Gilad,
> >> >>
> >> >> On Tue, May 15, 2018 at 2:29 PM, Gilad Ben-Yossef  
> >> >> wrote:
> >> >> > Add bindings for CryptoCell instance in the SoC.
> >> >> >
> >> >> > Signed-off-by: Gilad Ben-Yossef 
> >> >>
> >> >> Thanks for your patch!
> >> >>
> >> >> > --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
> >> >> > +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
> >> >> > @@ -528,6 +528,14 @@
> >> >> > status = "disabled";
> >> >> > };
> >> >> >
> >> >> > +   arm_cc630p: crypto@e6601000 {
> >> >> > +   compatible = "arm,cryptocell-630p-ree";
> >> >> > +   interrupts = ;
> >> >> > +   #interrupt-cells = <2>;
> >> >>
> >> >> I believe the #interrupt-cells property is not needed.
> >> >>
> >> >> > +   reg = <0x0 0xe6601000 0 0x1000>;
> >> >> > +   clocks = <&cpg CPG_MOD 229>;
> >> >> > +   };
> >> >>
> >> >> The rest looks good, but I cannot verify the register block.
> >> >>
> >> >> > +
> >> >> > i2c3: i2c@e66d {
> >> >> > #address-cells = <1>;
> >> >> > #size-cells = <0>;
> >> >
> >> > Thanks, I have applied this after dropping the #interrupt-cells property.
> >>
> >> Thanks you!
> >>
> >> Alas, it will not work without the clk patch (the previous one in the
> >> series) so they need to be
> >> taken or dropped together.
> >
> > I think its fine if it does not yet work.
> > But not if its causes things that previously worked to stop working.
> 
> Based on the further discussion with Geert my recommendation is to
> drop my patch for now,
> take Geert CR clock  patch and I will follow up next week with a v2
> that fixes the clock
> handing as discussed with Geert.

Thanks, I will drop the patch.


[PATCH v6 07/28] x86/asm/crypto: annotate local functions

2018-05-18 Thread Jiri Slaby
Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation, but their ends are
annotated by ENDPROC. This is needed to balance ENDPROC for tools that
generate debuginfo.

To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S| 49 
 arch/x86/crypto/camellia-aesni-avx-asm_64.S  | 20 ++--
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++--
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S|  8 ++---
 arch/x86/crypto/ghash-clmulni-intel_asm.S|  4 +--
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S  |  8 ++---
 arch/x86/crypto/serpent-avx2-asm_64.S|  8 ++---
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S  |  8 ++---
 9 files changed, 62 insertions(+), 71 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index e762ef417562..b482ac1a1fb3 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1763,7 +1763,7 @@ ENDPROC(aesni_gcm_finalize)
 
 .align 4
 _key_expansion_128:
-_key_expansion_256a:
+SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1774,10 +1774,9 @@ _key_expansion_256a:
add $0x10, TKEYP
ret
 ENDPROC(_key_expansion_128)
-ENDPROC(_key_expansion_256a)
+SYM_FUNC_END(_key_expansion_256a)
 
-.align 4
-_key_expansion_192a:
+SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1799,10 +1798,9 @@ _key_expansion_192a:
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
-ENDPROC(_key_expansion_192a)
+SYM_FUNC_END(_key_expansion_192a)
 
-.align 4
-_key_expansion_192b:
+SYM_FUNC_START_LOCAL(_key_expansion_192b)
pshufd $0b01010101, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
pxor %xmm4, %xmm0
@@ -1819,10 +1817,9 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_192b)
+SYM_FUNC_END(_key_expansion_192b)
 
-.align 4
-_key_expansion_256b:
+SYM_FUNC_START_LOCAL(_key_expansion_256b)
pshufd $0b10101010, %xmm1, %xmm1
shufps $0b0001, %xmm2, %xmm4
pxor %xmm4, %xmm2
@@ -1832,7 +1829,7 @@ _key_expansion_256b:
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_256b)
+SYM_FUNC_END(_key_expansion_256b)
 
 /*
  * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -1985,8 +1982,7 @@ ENDPROC(aesni_enc)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc1:
+SYM_FUNC_START_LOCAL(_aesni_enc1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2029,7 +2025,7 @@ _aesni_enc1:
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
-ENDPROC(_aesni_enc1)
+SYM_FUNC_END(_aesni_enc1)
 
 /*
  * _aesni_enc4:internal ABI
@@ -2049,8 +2045,7 @@ ENDPROC(_aesni_enc1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_enc4:
+SYM_FUNC_START_LOCAL(_aesni_enc4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2138,7 +2133,7 @@ _aesni_enc4:
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
-ENDPROC(_aesni_enc4)
+SYM_FUNC_END(_aesni_enc4)
 
 /*
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
@@ -2177,8 +2172,7 @@ ENDPROC(aesni_dec)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec1:
+SYM_FUNC_START_LOCAL(_aesni_dec1)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE # round 0
@@ -2221,7 +2215,7 @@ _aesni_dec1:
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
-ENDPROC(_aesni_dec1)
+SYM_FUNC_END(_aesni_dec1)
 
 /*
  * _aesni_dec4:internal ABI
@@ -2241,8 +2235,7 @@ ENDPROC(_aesni_dec1)
  * KEY
  * TKEYP (T1)
  */
-.align 4
-_aesni_dec4:
+SYM_FUNC_START_LOCAL(_aesni_dec4)
movaps (KEYP), KEY  # key
mov KEYP, TKEYP
pxor KEY, STATE1# round 0
@@ -2330,7 +2323,7 @@ _aesni_dec4:
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
-ENDPROC(_aesni_dec4)
+SYM_FUNC_END(_aesni_dec4)
 
 /*
  * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
@@ -2608,8 +2601,7 @@ ENDPROC(aesni_cbc_dec)
  * INC:== 1, in little endian
  * BSWAP_MASK == endian swapping mask
  */
-.align 4
-_aesni_inc_init:
+SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
PSHUFB_XM

[PATCH v6 09/28] x86/asm: annotate aliases

2018-05-18 Thread Jiri Slaby
_key_expansion_128 is an alias to _key_expansion_256a, __memcpy to
memcpy, xen_syscall32_target to xen_sysenter_target, and so on. Annotate
them all using the new SYM_FUNC_START_ALIAS, SYM_FUNC_START_LOCAL_ALIAS,
and SYM_FUNC_END_ALIAS. This will make the tools generating the
debuginfo happy.

Signed-off-by: Jiri Slaby 
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Reviewed-by: Juergen Gross  [xen parts]
Cc: 
Cc: 
---
 arch/x86/crypto/aesni-intel_asm.S | 5 ++---
 arch/x86/lib/memcpy_64.S  | 4 ++--
 arch/x86/lib/memmove_64.S | 4 ++--
 arch/x86/lib/memset_64.S  | 4 ++--
 arch/x86/xen/xen-asm_64.S | 4 ++--
 5 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_asm.S 
b/arch/x86/crypto/aesni-intel_asm.S
index b482ac1a1fb3..c85ecb163c78 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1761,8 +1761,7 @@ ENDPROC(aesni_gcm_finalize)
 #endif
 
 
-.align 4
-_key_expansion_128:
+SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128)
 SYM_FUNC_START_LOCAL(_key_expansion_256a)
pshufd $0b, %xmm1, %xmm1
shufps $0b0001, %xmm0, %xmm4
@@ -1773,8 +1772,8 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
-ENDPROC(_key_expansion_128)
 SYM_FUNC_END(_key_expansion_256a)
+SYM_FUNC_END_ALIAS(_key_expansion_128)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192a)
pshufd $0b01010101, %xmm1, %xmm1
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..4911b1c61aa8 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -26,7 +26,7 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
 ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
  "jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,7 +40,7 @@ ENTRY(memcpy)
rep movsb
ret
 ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..50c1648311b3 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -26,7 +26,7 @@
  */
 .weak memmove
 
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(memmove)
 ENTRY(__memmove)
 
/* Handle more 32 bytes in loop */
@@ -208,6 +208,6 @@ ENTRY(__memmove)
 13:
retq
 ENDPROC(__memmove)
-ENDPROC(memmove)
+SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..927ac44d34aa 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -19,7 +19,7 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(memset)
 ENTRY(__memset)
/*
 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -43,8 +43,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
 ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
 EXPORT_SYMBOL(__memset)
 
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339e5c8e..e8f6f482bb20 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -164,13 +164,13 @@ ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-ENTRY(xen_syscall32_target)
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
lea 16(%rsp), %rsp  /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-ENDPROC(xen_syscall32_target)
 ENDPROC(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
 
 #endif /* CONFIG_IA32_EMULATION */
-- 
2.16.3



[PATCH v6 24/28] x86_64/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-18 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

And make sure ENTRY/ENDPROC is not defined on X86_64, given these were
the last users.

Signed-off-by: Jiri Slaby 
Reviewed-by: Rafael J. Wysocki  [hibernate]
Reviewed-by: Boris Ostrovsky  [xen bits]
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: "Rafael J. Wysocki" 
Cc: Len Brown 
Cc: Pavel Machek 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: Boris Ostrovsky 
Cc: Juergen Gross 
Cc: linux-crypto@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
---
 arch/x86/boot/compressed/efi_thunk_64.S|  4 +-
 arch/x86/boot/compressed/head_64.S | 16 +++---
 arch/x86/boot/compressed/mem_encrypt.S |  8 +--
 arch/x86/crypto/aes-i586-asm_32.S  |  8 +--
 arch/x86/crypto/aes-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/aes_ctrby8_avx-x86_64.S| 12 ++---
 arch/x86/crypto/aesni-intel_asm.S  | 60 +++---
 arch/x86/crypto/aesni-intel_avx-x86_64.S   | 24 -
 arch/x86/crypto/blowfish-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/camellia-aesni-avx-asm_64.S| 24 -
 arch/x86/crypto/camellia-aesni-avx2-asm_64.S   | 24 -
 arch/x86/crypto/camellia-x86_64-asm_64.S   | 16 +++---
 arch/x86/crypto/cast5-avx-x86_64-asm_64.S  | 16 +++---
 arch/x86/crypto/cast6-avx-x86_64-asm_64.S  | 24 -
 arch/x86/crypto/chacha20-avx2-x86_64.S |  4 +-
 arch/x86/crypto/chacha20-ssse3-x86_64.S|  8 +--
 arch/x86/crypto/crc32-pclmul_asm.S |  4 +-
 arch/x86/crypto/crc32c-pcl-intel-asm_64.S  |  4 +-
 arch/x86/crypto/crct10dif-pcl-asm_64.S |  4 +-
 arch/x86/crypto/des3_ede-asm_64.S  |  8 +--
 arch/x86/crypto/ghash-clmulni-intel_asm.S  |  8 +--
 arch/x86/crypto/poly1305-avx2-x86_64.S |  4 +-
 arch/x86/crypto/poly1305-sse2-x86_64.S |  8 +--
 arch/x86/crypto/salsa20-x86_64-asm_64.S|  4 +-
 arch/x86/crypto/serpent-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/serpent-avx2-asm_64.S  | 24 -
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S   |  8 +--
 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S  |  4 +-
 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S |  4 +-
 arch/x86/crypto/sha1_avx2_x86_64_asm.S |  4 +-
 arch/x86/crypto/sha1_ni_asm.S  |  4 +-
 arch/x86/crypto/sha1_ssse3_asm.S   |  4 +-
 arch/x86/crypto/sha256-avx-asm.S   |  4 +-
 arch/x86/crypto/sha256-avx2-asm.S  |  4 +-
 .../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S |  4 +-
 arch/x86/crypto/sha256-ssse3-asm.S |  4 +-
 arch/x86/crypto/sha256_ni_asm.S|  4 +-
 arch/x86/crypto/sha512-avx-asm.S   |  4 +-
 arch/x86/crypto/sha512-avx2-asm.S  |  4 +-
 .../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S|  8 +--
 .../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S   |  4 +-
 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S |  4 +-
 arch/x86/crypto/sha512-ssse3-asm.S |  4 +-
 arch/x86/crypto/twofish-avx-x86_64-asm_64.S| 24 -
 arch/x86/crypto/twofish-x86_64-asm_64-3way.S   |  8 +--
 arch/x86/crypto/twofish-x86_64-asm_64.S|  8 +--
 arch/x86/entry/entry_64.S  | 10 ++--
 arch/x86/entry/entry_64_compat.S   |  4 +-
 arch/x86/kernel/acpi/wakeup_64.S   |  8 +--
 arch/x86/kernel/ftrace_64.S| 20 
 arch/x86/kernel/head_64.S  | 12 ++---
 arch/x86/lib/checksum_32.S |  8 +--
 arch/x86/lib/clear_page_64.S   | 12 ++---
 arch/x86/lib/cmpxchg16b_emu.S  |  4 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  4 +-
 arch/x86/lib/copy_page_64.S|  4 +-
 arch/x86/lib/copy_user_64.S| 16 +++---
 arch/x86/lib/csum-copy_64.S|  4 +-
 arch/x86/lib/getuser.S | 16 +++---
 arch/x86/lib/hweight.S |  8 +--
 arch/x86/lib/iomap_copy_64.S   |  4 +-
 arch/x86/lib/memcpy_64.S   |  4 +-
 arch/x86/lib/memmove_64.S  |  4 +-
 arch/x86/lib/memset_64.S   |  4 +-
 arch/x86/lib/msr-reg.S |  8 +--
 arch/x86/lib/putuser.S

[PATCH v6 27/28] x86_32/asm: change all ENTRY+ENDPROC to SYM_FUNC_*

2018-05-18 Thread Jiri Slaby
These are all functions which are invoked from elsewhere, so we annotate
them as global using the new SYM_FUNC_START. And their ENDPROC's by
SYM_FUNC_END.

Now, we can finally force ENTRY/ENDPROC to be undefined on X86.

Signed-off-by: Jiri Slaby 
Cc: "H. Peter Anvin" 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: x...@kernel.org
Cc: Herbert Xu 
Cc: "David S. Miller" 
Cc: Bill Metzenthen 
Cc: Matt Fleming 
Cc: Ard Biesheuvel 
Cc: linux-crypto@vger.kernel.org
Cc: linux-...@vger.kernel.org
---
 arch/x86/boot/compressed/efi_stub_32.S |  4 ++--
 arch/x86/boot/compressed/head_32.S | 12 +--
 arch/x86/crypto/salsa20-i586-asm_32.S  |  4 ++--
 arch/x86/crypto/serpent-sse2-i586-asm_32.S |  8 
 arch/x86/crypto/twofish-i586-asm_32.S  |  8 
 arch/x86/entry/entry_32.S  | 24 +++---
 arch/x86/kernel/head_32.S  | 16 +++
 arch/x86/lib/atomic64_386_32.S |  4 ++--
 arch/x86/lib/atomic64_cx8_32.S | 32 +++---
 arch/x86/lib/checksum_32.S |  8 
 arch/x86/math-emu/div_Xsig.S   |  4 ++--
 arch/x86/math-emu/div_small.S  |  4 ++--
 arch/x86/math-emu/mul_Xsig.S   | 12 +--
 arch/x86/math-emu/polynom_Xsig.S   |  4 ++--
 arch/x86/math-emu/reg_norm.S   |  8 
 arch/x86/math-emu/reg_round.S  |  4 ++--
 arch/x86/math-emu/reg_u_add.S  |  4 ++--
 arch/x86/math-emu/reg_u_div.S  |  4 ++--
 arch/x86/math-emu/reg_u_mul.S  |  4 ++--
 arch/x86/math-emu/reg_u_sub.S  |  4 ++--
 arch/x86/math-emu/round_Xsig.S |  8 
 arch/x86/math-emu/shr_Xsig.S   |  4 ++--
 arch/x86/math-emu/wm_shrx.S|  8 
 arch/x86/math-emu/wm_sqrt.S|  4 ++--
 arch/x86/platform/efi/efi_stub_32.S|  4 ++--
 include/linux/linkage.h|  8 +++-
 26 files changed, 103 insertions(+), 105 deletions(-)

diff --git a/arch/x86/boot/compressed/efi_stub_32.S 
b/arch/x86/boot/compressed/efi_stub_32.S
index 257e341fd2c8..ed6c351d34ed 100644
--- a/arch/x86/boot/compressed/efi_stub_32.S
+++ b/arch/x86/boot/compressed/efi_stub_32.S
@@ -24,7 +24,7 @@
  */
 
 .text
-ENTRY(efi_call_phys)
+SYM_FUNC_START(efi_call_phys)
/*
 * 0. The function can only be called in Linux kernel. So CS has been
 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movlsaved_return_addr(%edx), %ecx
pushl   %ecx
ret
-ENDPROC(efi_call_phys)
+SYM_FUNC_END(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/boot/compressed/head_32.S 
b/arch/x86/boot/compressed/head_32.S
index 7e8ab0bb6968..3fa36496af12 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,7 +61,7 @@
.hidden _egot
 
__HEAD
-ENTRY(startup_32)
+SYM_FUNC_START(startup_32)
cld
/*
 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -142,14 +142,14 @@ ENTRY(startup_32)
  */
lealrelocated(%ebx), %eax
jmp *%eax
-ENDPROC(startup_32)
+SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
 /*
  * We don't need the return address, so set up the stack so efi_main() can find
  * its arguments.
  */
-ENTRY(efi_pe_entry)
+SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
 
call1f
@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl   %eax
pushl   %ecx
jmp 2f  /* Skip efi_config initialization */
-ENDPROC(efi_pe_entry)
+SYM_FUNC_END(efi_pe_entry)
 
-ENTRY(efi32_stub_entry)
+SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
popl%ecx
popl%edx
@@ -205,7 +205,7 @@ fail:
movlBP_code32_start(%esi), %eax
lealstartup_32(%eax), %eax
jmp *%eax
-ENDPROC(efi32_stub_entry)
+SYM_FUNC_END(efi32_stub_entry)
 #endif
 
.text
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S 
b/arch/x86/crypto/salsa20-i586-asm_32.S
index 6014b7b9e52a..edeb4c3e7389 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -8,7 +8,7 @@
 .text
 
 # enter salsa20_encrypt_bytes
-ENTRY(salsa20_encrypt_bytes)
+SYM_FUNC_START(salsa20_encrypt_bytes)
mov %esp,%eax
and $31,%eax
add $256,%eax
@@ -935,4 +935,4 @@ ENTRY(salsa20_encrypt_bytes)
add $64,%esi
# goto bytesatleast1
jmp ._bytesatleast1
-ENDPROC(salsa20_encrypt_bytes)
+SYM_FUNC_END(salsa20_encrypt_bytes)
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S 
b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index d348f1553a79..f3cebd3c6739 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -512,7 +512,7 @@
pxor t0,x3; \
movdqu x3,

cryptomgr_test / drbg_ctr: BUG: sleeping function called from invalid context

2018-05-18 Thread Geert Uytterhoeven
Hi,

After enabling CONFIG_CRYPTO_DRBG_CTR, I start seeing during kernel boot:

BUG: sleeping function called from invalid context at
include/crypto/algapi.h:416
in_atomic(): 1, irqs_disabled(): 0, pid: 203, name: cryptomgr_test
1 lock held by cryptomgr_test/203:
 #0: (ptrval) (&drbg->drbg_mutex){+.+.}, at: drbg_kcapi_seed+0x128/0x4bc
CPU: 3 PID: 203 Comm: cryptomgr_test Not tainted
4.17.0-rc5-salvator-x-00509-g0ad2b9f404d6a668-dirty #1742
Hardware name: Renesas Salvator-X 2nd version board based on r8a7795 ES2.0+ (DT)
Call trace:
 dump_backtrace+0x0/0x140
 show_stack+0x14/0x1c
 dump_stack+0xb4/0xf0
 ___might_sleep+0x1fc/0x218
 skcipher_walk_done+0x2c8/0x38c
 ctr_encrypt+0x84/0x110
 simd_skcipher_encrypt+0xa4/0xb0
 drbg_kcapi_sym_ctr+0xb4/0x178
 drbg_ctr_update+0x17c/0x2c4
 drbg_seed+0x20c/0x26c
 drbg_kcapi_seed+0x458/0x4bc
 crypto_rng_reset+0x84/0xa8
 alg_test_drbg+0x12c/0x324
 alg_test.part.7+0x264/0x2bc
 alg_test+0x44/0x58
 cryptomgr_test+0x28/0x48
 kthread+0x11c/0x124
 ret_from_fork+0x10/0x18

I tried following the code path, but couldn't find where it went wrong.

mutex_lock(&drbg->drbg_mutex) is called from drbg_instantiate(), which is
inlined by the compiler into drbg_kcapi_seed().

Do you have a clue?
Thanks!

Gr{oetje,eeting}s,

Geert

-- 
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- ge...@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds