Add an ioctl that performs ENCLS[EINIT], which locks down the measurement
and initializes the enclave for entrance. After this, new pages can no
longer be added.

Acked-by: Jethro Beekman <jet...@fortanix.com>
Tested-by: Jethro Beekman <jet...@fortanix.com>
Tested-by: Haitao Huang <haitao.hu...@linux.intel.com>
Tested-by: Chunyang Hui <sanqian....@antfin.com>
Tested-by: Jordan Hand <jorh...@linux.microsoft.com>
Tested-by: Nathaniel McCallum <npmccal...@redhat.com>
Tested-by: Seth Moore <set...@google.com>
Tested-by: Darren Kenny <darren.ke...@oracle.com>
Reviewed-by: Darren Kenny <darren.ke...@oracle.com>
Co-developed-by: Sean Christopherson <sean.j.christopher...@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
Co-developed-by: Suresh Siddha <suresh.b.sid...@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.sid...@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakki...@linux.intel.com>
---
 arch/x86/include/uapi/asm/sgx.h |  11 ++
 arch/x86/kernel/cpu/sgx/encl.h  |   2 +
 arch/x86/kernel/cpu/sgx/ioctl.c | 193 ++++++++++++++++++++++++++++++++
 3 files changed, 206 insertions(+)

diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index 10cd48d06318..e401fa72eaab 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -23,6 +23,8 @@ enum sgx_page_flags {
        _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
 #define SGX_IOC_ENCLAVE_ADD_PAGES \
        _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
+#define SGX_IOC_ENCLAVE_INIT \
+       _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
 
 /**
  * struct sgx_enclave_create - parameter structure for the
@@ -52,4 +54,13 @@ struct sgx_enclave_add_pages {
        __u64 count;
 };
 
+/**
+ * struct sgx_enclave_init - parameter structure for the
+ *                           %SGX_IOC_ENCLAVE_INIT ioctl
+ * @sigstruct: address for the SIGSTRUCT data
+ */
+struct sgx_enclave_init {
+       __u64 sigstruct;
+};
+
 #endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 8ff445476657..0448d22d3010 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -70,6 +70,8 @@ struct sgx_encl {
        struct xarray page_array;
        struct sgx_encl_page secs;
        cpumask_t cpumask;
+       unsigned long attributes;
+       unsigned long attributes_mask;
 };
 
 extern const struct vm_operations_struct sgx_vm_ops;
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index e13e04737683..cf5a43d6daa2 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -128,6 +128,9 @@ static int sgx_encl_create(struct sgx_encl *encl, struct 
sgx_secs *secs)
        encl->base = secs->base;
        encl->size = secs->size;
        encl->ssaframesize = secs->ssa_frame_size;
+       encl->attributes = secs->attributes;
+       encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
+                               SGX_ATTR_KSS;
 
        /*
         * Set SGX_ENCL_CREATED only after the enclave is fully prepped.  This
@@ -490,6 +493,193 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl 
*encl, void __user *arg)
        return ret;
 }
 
+static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
+                             void *hash)
+{
+       SHASH_DESC_ON_STACK(shash, tfm);
+
+       shash->tfm = tfm;
+
+       return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
+}
+
+static int sgx_get_key_hash(const void *modulus, void *hash)
+{
+       struct crypto_shash *tfm;
+       int ret;
+
+       tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       ret = __sgx_get_key_hash(tfm, modulus, hash);
+
+       crypto_free_shash(tfm);
+       return ret;
+}
+
+static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct 
*sigstruct,
+                        void *token)
+{
+       u64 mrsigner[4];
+       void *addr;
+       int ret;
+       int i;
+       int j;
+
+       /*
+        * Deny initializing enclaves with attributes (namely provisioning)
+        * that have not been explicitly allowed.
+        */
+       if (encl->attributes & ~encl->attributes_mask)
+               return -EACCES;
+
+       /*
+        * Attributes should not be enforced *only* against what's available on
+        * platform (done in sgx_encl_create) but checked and enforced against
+        * the mask for enforcement in sigstruct. For example an enclave could
+        * opt to sign with AVX bit in xfrm, but still be loadable on a platform
+        * without it if the sigstruct->body.attributes_mask does not turn that
+        * bit on.
+        */
+       if (sigstruct->body.attributes & sigstruct->body.attributes_mask &
+           sgx_attributes_reserved_mask)
+               return -EINVAL;
+
+       if (sigstruct->body.miscselect & sigstruct->body.misc_mask &
+           sgx_misc_reserved_mask)
+               return -EINVAL;
+
+       if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask &
+           sgx_xfrm_reserved_mask)
+               return -EINVAL;
+
+       ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
+       if (ret)
+               return ret;
+
+       mutex_lock(&encl->lock);
+
+       /*
+        * ENCLS[EINIT] is interruptible because it has such a high latency,
+        * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending,
+        * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be
+        * serviced.
+        */
+       for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
+               for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
+                       addr = sgx_get_epc_addr(encl->secs.epc_page);
+
+                       preempt_disable();
+
+                       for (i = 0; i < 4; i++)
+                               wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, 
mrsigner[i]);
+
+                       ret = __einit(sigstruct, token, addr);
+
+                       preempt_enable();
+
+                       if (ret == SGX_UNMASKED_EVENT)
+                               continue;
+                       else
+                               break;
+               }
+
+               if (ret != SGX_UNMASKED_EVENT)
+                       break;
+
+               msleep_interruptible(SGX_EINIT_SLEEP_TIME);
+
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       goto err_out;
+               }
+       }
+
+       if (ret & ENCLS_FAULT_FLAG) {
+               if (encls_failed(ret))
+                       ENCLS_WARN(ret, "EINIT");
+
+               sgx_encl_destroy(encl);
+               ret = -EFAULT;
+       } else if (ret) {
+               pr_debug("EINIT returned %d\n", ret);
+               ret = -EPERM;
+       } else {
+               atomic_or(SGX_ENCL_INITIALIZED, &encl->flags);
+       }
+
+err_out:
+       mutex_unlock(&encl->lock);
+       return ret;
+}
+
+/**
+ * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
+ *
+ * @encl:      an enclave pointer
+ * @arg:       userspace pointer to a struct sgx_enclave_init instance
+ *
+ * Flush any outstanding enqueued EADD operations and perform EINIT.  The
+ * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
+ * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
+ *
+ * Return:
+ *   0 on success,
+ *   SGX error code on EINIT failure,
+ *   -errno otherwise
+ */
+static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
+{
+       struct sgx_sigstruct *sigstruct;
+       struct sgx_enclave_init einit;
+       struct page *initp_page;
+       void *token;
+       int ret;
+
+       if ((atomic_read(&encl->flags) & SGX_ENCL_INITIALIZED) ||
+           !(atomic_read(&encl->flags) & SGX_ENCL_CREATED))
+               return -EINVAL;
+
+       if (copy_from_user(&einit, arg, sizeof(einit)))
+               return -EFAULT;
+
+       initp_page = alloc_page(GFP_KERNEL);
+       if (!initp_page)
+               return -ENOMEM;
+
+       sigstruct = kmap(initp_page);
+       token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
+       memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
+
+       if (copy_from_user(sigstruct, (void __user *)einit.sigstruct,
+                          sizeof(*sigstruct))) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       /*
+        * A legacy field used with Intel signed enclaves. These used to mean
+        * regular and architectural enclaves. The CPU only accepts these values
+        * but they do not have any other meaning.
+        *
+        * Thus, reject any other values.
+        */
+       if (sigstruct->header.vendor != 0x0000 &&
+           sigstruct->header.vendor != 0x8086) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = sgx_encl_init(encl, sigstruct, token);
+
+out:
+       kunmap(initp_page);
+       __free_page(initp_page);
+       return ret;
+}
+
+
 long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 {
        struct sgx_encl *encl = filep->private_data;
@@ -511,6 +701,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, 
unsigned long arg)
        case SGX_IOC_ENCLAVE_ADD_PAGES:
                ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
                break;
+       case SGX_IOC_ENCLAVE_INIT:
+               ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
+               break;
        default:
                ret = -ENOIOCTLCMD;
                break;
-- 
2.25.1

Reply via email to