On Tue, Aug 25, 2020 at 06:44:12PM +0200, Borislav Petkov wrote:
> On Thu, Jul 16, 2020 at 04:52:50PM +0300, Jarkko Sakkinen wrote:
> 
> Just minor things below - I'm not even going to pretend I fully
> understand what's going on but FWICT, it looks non-threateningly ok to
> me.
> 
> > diff --git a/arch/x86/kernel/cpu/sgx/driver.c 
> > b/arch/x86/kernel/cpu/sgx/driver.c
> > new file mode 100644
> > index 000000000000..b52520407f5b
> > --- /dev/null
> > +++ b/arch/x86/kernel/cpu/sgx/driver.c
> > @@ -0,0 +1,177 @@
> > +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
> > +// Copyright(c) 2016-18 Intel Corporation.
> > +
> > +#include <linux/acpi.h>
> > +#include <linux/miscdevice.h>
> > +#include <linux/mman.h>
> > +#include <linux/security.h>
> > +#include <linux/suspend.h>
> > +#include <asm/traps.h>
> > +#include "driver.h"
> > +#include "encl.h"
> > +
> > +MODULE_DESCRIPTION("Intel SGX Enclave Driver");
> > +MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakki...@linux.intel.com>");
> > +MODULE_LICENSE("Dual BSD/GPL");
> 
> That boilerplate stuff usually goes to the end of the file.

These all are cruft from the times when we still had a kernel module.
I.e. I'll just remove them.

> 
> ...
> 
> > +static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
> > +                                           unsigned long addr)
> > +{
> > +   struct sgx_encl_page *entry;
> > +   unsigned int flags;
> > +
> > +   /* If process was forked, VMA is still there but vm_private_data is set
> > +    * to NULL.
> > +    */
> > +   if (!encl)
> > +           return ERR_PTR(-EFAULT);
> > +
> > +   flags = atomic_read(&encl->flags);
> > +
> 
> ^ Superfluous newline.
> 
> > +   if ((flags & SGX_ENCL_DEAD) || !(flags & SGX_ENCL_INITIALIZED))
> > +           return ERR_PTR(-EFAULT);
> > +
> > +   entry = xa_load(&encl->page_array, PFN_DOWN(addr));
> > +   if (!entry)
> > +           return ERR_PTR(-EFAULT);
> > +
> > +   /* Page is already resident in the EPC. */
> > +   if (entry->epc_page)
> > +           return entry;
> > +
> > +   return ERR_PTR(-EFAULT);
> > +}
> > +
> > +static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
> > +                                struct mm_struct *mm)
> > +{
> > +   struct sgx_encl_mm *encl_mm =
> > +           container_of(mn, struct sgx_encl_mm, mmu_notifier);
> 
> Just let it stick out.
> 
> > +   struct sgx_encl_mm *tmp = NULL;
> > +
> > +   /*
> > +    * The enclave itself can remove encl_mm.  Note, objects can't be moved
> > +    * off an RCU protected list, but deletion is ok.
> > +    */
> > +   spin_lock(&encl_mm->encl->mm_lock);
> > +   list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
> > +           if (tmp == encl_mm) {
> > +                   list_del_rcu(&encl_mm->list);
> > +                   break;
> > +           }
> > +   }
> > +   spin_unlock(&encl_mm->encl->mm_lock);
> > +
> > +   if (tmp == encl_mm) {
> > +           synchronize_srcu(&encl_mm->encl->srcu);
> > +           mmu_notifier_put(mn);
> > +   }
> > +}
> > +
> > +static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
> > +{
> > +   struct sgx_encl_mm *encl_mm =
> > +           container_of(mn, struct sgx_encl_mm, mmu_notifier);
> 
> Ditto.
> 
> ...
> 
> > +/**
> > + * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
> > + * @encl:          an enclave
> > + * @start:         lower bound of the address range, inclusive
> > + * @end:           upper bound of the address range, exclusive
> > + * @vm_prot_bits:  requested protections of the address range
> > + *
> > + * Iterate through the enclave pages contained within [@start, @end) to 
> > verify
> > + * the permissions requested by @vm_prot_bits do not exceed that of any 
> > enclave
> > + * page to be mapped.
> > + *
> > + * Return:
> > + *   0 on success,
> > + *   -EACCES if VMA permissions exceed enclave page permissions
> > + */
> > +int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
> > +                unsigned long end, unsigned long vm_flags)
> > +{
> > +   unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> > +   unsigned long idx_start = PFN_DOWN(start);
> > +   unsigned long idx_end = PFN_DOWN(end - 1);
> > +   struct sgx_encl_page *page;
> > +   XA_STATE(xas, &encl->page_array, idx_start);
> > +
> > +   /*
> > +    * Disallow RIE tasks as their VMA permissions might conflict with the
> 
> "RIE", hmm what is that?
> 
> /me looks at the test
> 
> Aaah, READ_IMPLIES_EXEC. Is "RIE" some widely accepted acronym I'm not
> aware of?

I think it was used in some email discussions related to this piece of
code but I'm happy to write it as READ_IMPLIES_EXEC :-)

> 
> > +    * enclave page permissions.
> > +    */
> > +   if (!!(current->personality & READ_IMPLIES_EXEC))
> 
> The "!!" is not really needed - you're in boolean context.
> 
> ...
> 
> -- 
> Regards/Gruss,
>     Boris.
> 
> https://people.kernel.org/tglx/notes-about-netiquette

Thanks for the remarks.

/Jarkko

Reply via email to