Re: [PATCH v36 14/24] x86/sgx: Add SGX_IOC_ENCLAVE_INIT

2020-08-06 Thread Darren Kenny
On Thursday, 2020-07-16 at 16:52:53 +03, Jarkko Sakkinen wrote:
> Add an ioctl that performs ENCLS[EINIT], which locks down the measurement
> and initializes the enclave for entrance. After this, new pages can no
> longer be added.
>
> Acked-by: Jethro Beekman 
> Tested-by: Jethro Beekman 
> Tested-by: Haitao Huang 
> Tested-by: Chunyang Hui 
> Tested-by: Jordan Hand 
> Tested-by: Nathaniel McCallum 
> Tested-by: Seth Moore 

Tested-by: Darren Kenny 
Reviewed-by: Darren Kenny 

> Co-developed-by: Sean Christopherson 
> Signed-off-by: Sean Christopherson 
> Co-developed-by: Suresh Siddha 
> Signed-off-by: Suresh Siddha 
> Signed-off-by: Jarkko Sakkinen 
> ---
>  arch/x86/include/uapi/asm/sgx.h |  11 ++
>  arch/x86/kernel/cpu/sgx/ioctl.c | 188 
>  2 files changed, 199 insertions(+)
>
> diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
> index c8f199b3fb6f..5edb08ab8fd0 100644
> --- a/arch/x86/include/uapi/asm/sgx.h
> +++ b/arch/x86/include/uapi/asm/sgx.h
> @@ -23,6 +23,8 @@ enum sgx_page_flags {
>   _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
>  #define SGX_IOC_ENCLAVE_ADD_PAGES \
>   _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
> +#define SGX_IOC_ENCLAVE_INIT \
> + _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
>  
>  /**
>   * struct sgx_enclave_create - parameter structure for the
> @@ -52,4 +54,13 @@ struct sgx_enclave_add_pages {
>   __u64   count;
>  };
>  
> +/**
> + * struct sgx_enclave_init - parameter structure for the
> + *   %SGX_IOC_ENCLAVE_INIT ioctl
> + * @sigstruct:   address for the SIGSTRUCT data
> + */
> +struct sgx_enclave_init {
> + __u64 sigstruct;
> +};
> +
>  #endif /* _UAPI_ASM_X86_SGX_H */
> diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
> index c63a51362d14..3444de955191 100644
> --- a/arch/x86/kernel/cpu/sgx/ioctl.c
> +++ b/arch/x86/kernel/cpu/sgx/ioctl.c
> @@ -16,6 +16,9 @@
>  #include "encl.h"
>  #include "encls.h"
>  
> +/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. 
> */
> +static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
> +
>  static u32 sgx_calc_ssa_frame_size(u32 miscselect, u64 xfrm)
>  {
>   u32 size_max = PAGE_SIZE;
> @@ -485,6 +488,188 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl 
> *encl, void __user *arg)
>   return ret;
>  }
>  
> +static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
> +   void *hash)
> +{
> + SHASH_DESC_ON_STACK(shash, tfm);
> +
> + shash->tfm = tfm;
> +
> + return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
> +}
> +
> +static int sgx_get_key_hash(const void *modulus, void *hash)
> +{
> + struct crypto_shash *tfm;
> + int ret;
> +
> + tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
> + if (IS_ERR(tfm))
> + return PTR_ERR(tfm);
> +
> + ret = __sgx_get_key_hash(tfm, modulus, hash);
> +
> + crypto_free_shash(tfm);
> + return ret;
> +}
> +
> +static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
> +{
> + u64 *cache;
> + int i;
> +
> + cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
> + for (i = 0; i < 4; i++) {
> + if (enforce || (lepubkeyhash[i] != cache[i])) {
> + wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
> + cache[i] = lepubkeyhash[i];
> + }
> + }
> +}
> +
> +static int sgx_einit(struct sgx_sigstruct *sigstruct, void *token,
> +  struct sgx_epc_page *secs, u64 *lepubkeyhash)
> +{
> + int ret;
> +
> + preempt_disable();
> + sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
> + ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
> + if (ret == SGX_INVALID_EINITTOKEN) {
> + sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
> + ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
> + }
> + preempt_enable();
> + return ret;
> +}
> +
> +static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct 
> *sigstruct,
> +  void *token)
> +{
> + u64 mrsigner[4];
> + int ret;
> + int i;
> + int j;
> +
> + /* Check that the required attributes have been authorized. */
> + if (encl->secs_attributes & ~encl->allowed_attributes)
> + return -EACCES;
> +
> + ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
> + if (ret)
> + return ret;
> +
> + mutex_lock(>lock);
> +
> + /*
> +  * Periodically, EINIT polls for certain asynchronous events. If such an
> +  * event is detected, it completes with SGX_UNMSKED_EVENT.
> +  */
> + for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
> + for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
> + ret = sgx_einit(sigstruct, token, encl->secs.epc_page,
> + 

[PATCH v36 14/24] x86/sgx: Add SGX_IOC_ENCLAVE_INIT

2020-07-16 Thread Jarkko Sakkinen
Add an ioctl that performs ENCLS[EINIT], which locks down the measurement
and initializes the enclave for entrance. After this, new pages can no
longer be added.

Acked-by: Jethro Beekman 
Tested-by: Jethro Beekman 
Tested-by: Haitao Huang 
Tested-by: Chunyang Hui 
Tested-by: Jordan Hand 
Tested-by: Nathaniel McCallum 
Tested-by: Seth Moore 
Co-developed-by: Sean Christopherson 
Signed-off-by: Sean Christopherson 
Co-developed-by: Suresh Siddha 
Signed-off-by: Suresh Siddha 
Signed-off-by: Jarkko Sakkinen 
---
 arch/x86/include/uapi/asm/sgx.h |  11 ++
 arch/x86/kernel/cpu/sgx/ioctl.c | 188 
 2 files changed, 199 insertions(+)

diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index c8f199b3fb6f..5edb08ab8fd0 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -23,6 +23,8 @@ enum sgx_page_flags {
_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
 #define SGX_IOC_ENCLAVE_ADD_PAGES \
_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
+#define SGX_IOC_ENCLAVE_INIT \
+   _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
 
 /**
  * struct sgx_enclave_create - parameter structure for the
@@ -52,4 +54,13 @@ struct sgx_enclave_add_pages {
__u64   count;
 };
 
+/**
+ * struct sgx_enclave_init - parameter structure for the
+ *   %SGX_IOC_ENCLAVE_INIT ioctl
+ * @sigstruct: address for the SIGSTRUCT data
+ */
+struct sgx_enclave_init {
+   __u64 sigstruct;
+};
+
 #endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index c63a51362d14..3444de955191 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -16,6 +16,9 @@
 #include "encl.h"
 #include "encls.h"
 
+/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. */
+static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
+
 static u32 sgx_calc_ssa_frame_size(u32 miscselect, u64 xfrm)
 {
u32 size_max = PAGE_SIZE;
@@ -485,6 +488,188 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl 
*encl, void __user *arg)
return ret;
 }
 
+static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
+ void *hash)
+{
+   SHASH_DESC_ON_STACK(shash, tfm);
+
+   shash->tfm = tfm;
+
+   return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
+}
+
+static int sgx_get_key_hash(const void *modulus, void *hash)
+{
+   struct crypto_shash *tfm;
+   int ret;
+
+   tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
+   if (IS_ERR(tfm))
+   return PTR_ERR(tfm);
+
+   ret = __sgx_get_key_hash(tfm, modulus, hash);
+
+   crypto_free_shash(tfm);
+   return ret;
+}
+
+static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
+{
+   u64 *cache;
+   int i;
+
+   cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
+   for (i = 0; i < 4; i++) {
+   if (enforce || (lepubkeyhash[i] != cache[i])) {
+   wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+   cache[i] = lepubkeyhash[i];
+   }
+   }
+}
+
+static int sgx_einit(struct sgx_sigstruct *sigstruct, void *token,
+struct sgx_epc_page *secs, u64 *lepubkeyhash)
+{
+   int ret;
+
+   preempt_disable();
+   sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
+   ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
+   if (ret == SGX_INVALID_EINITTOKEN) {
+   sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
+   ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
+   }
+   preempt_enable();
+   return ret;
+}
+
+static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct 
*sigstruct,
+void *token)
+{
+   u64 mrsigner[4];
+   int ret;
+   int i;
+   int j;
+
+   /* Check that the required attributes have been authorized. */
+   if (encl->secs_attributes & ~encl->allowed_attributes)
+   return -EACCES;
+
+   ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
+   if (ret)
+   return ret;
+
+   mutex_lock(>lock);
+
+   /*
+* Periodically, EINIT polls for certain asynchronous events. If such an
+* event is detected, it completes with SGX_UNMSKED_EVENT.
+*/
+   for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
+   for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
+   ret = sgx_einit(sigstruct, token, encl->secs.epc_page,
+   mrsigner);
+   if (ret == SGX_UNMASKED_EVENT)
+   continue;
+   else
+   break;
+   }
+
+   if (ret != SGX_UNMASKED_EVENT)
+   break;
+
+