> -----Original Message-----
> From: kvm-ow...@vger.kernel.org [mailto:kvm-ow...@vger.kernel.org] On
> Behalf Of Paolo Bonzini
> Sent: Monday, March 31, 2014 9:31 PM
> To: Wu, Feng; g...@redhat.com; h...@zytor.com; kvm@vger.kernel.org
> Subject: Re: [PATCH v3 2/4] KVM: Add SMAP support when setting CR4
> 
> Just a few comments...
> 
> > -static void update_permission_bitmask(struct kvm_vcpu *vcpu,
> > +void update_permission_bitmask(struct kvm_vcpu *vcpu,
> >             struct kvm_mmu *mmu, bool ept)
> >  {
> >     unsigned bit, byte, pfec;
> >     u8 map;
> > -   bool fault, x, w, u, wf, uf, ff, smep;
> > +   bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, smep, smap = 0;
> >
> >     smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
> 
> Can you make an additional patch to rename this to cr4_smep?

Sure! I noticed your comments about this issue in the previous email, I was 
prepare to make a patch for it, will send out it today!
> 
> > +   cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
> >     for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
> >             pfec = byte << 1;
> >             map = 0;
> >             wf = pfec & PFERR_WRITE_MASK;
> >             uf = pfec & PFERR_USER_MASK;
> >             ff = pfec & PFERR_FETCH_MASK;
> > +           /*
> > +            * PFERR_RSVD_MASK bit is used to detect SMAP violation.
> > +            * We will check it in permission_fault(), this bit is
> > +            * set in pfec for normal fault, while it is cleared for
> > +            * SMAP violations.
> > +            */
> 
> "This bit is set in PFEC if we the access is _not_ subject to SMAP
> restrictions, and cleared otherwise.  The bit is only meaningful if
> the SMAP bit is set in CR4."
> 
> > +           smapf = !(pfec & PFERR_RSVD_MASK);
> >             for (bit = 0; bit < 8; ++bit) {
> >                     x = bit & ACC_EXEC_MASK;
> >                     w = bit & ACC_WRITE_MASK;
> > @@ -3627,11 +3635,32 @@ static void update_permission_bitmask(struct
> kvm_vcpu *vcpu,
> >                             w |= !is_write_protection(vcpu) && !uf;
> >                             /* Disallow supervisor fetches of user code if 
> > cr4.smep */
> >                             x &= !(smep && u && !uf);
> > +
> > +                           /*
> > +                            * SMAP:kernel-mode data accesses from user-mode
> > +                            * mappings should fault. A fault is considered
> > +                            * as a SMAP violation if all of the following
> > +                            * conditions are ture:
> > +                            *   - X86_CR4_SMAP is set in CR4
> > +                            *   - An user page is accessed
> > +                            *   - Page fault in kernel mode
> > +                            *   - !(CPL<3 && X86_EFLAGS_AC is set)
> 
> - if CPL < 3, EFLAGS.AC is clear

Should it be "if CPL =3 or EFLAGS.AC is clear" ?

> 
> > +                            *   Here, we cover the first three conditions,
> > +                            *   The CPL and X86_EFLAGS_AC is in smapf,which
> > +                            *   permission_fault() computes dynamically.
> 
> The fourth is computed dynamically in permission_fault() and is in SMAPF.
> 
> > +                            *   Also, SMAP does not affect instruction
> > +                            *   fetches, add the !ff check here to make it
> > +                            *   clearer.
> > +                            */
> > +                           smap = cr4_smap && u && !uf && !ff;
> >                     } else
> >                             /* Not really needed: no U/S accesses on ept  */
> >                             u = 1;
> >
> > -                   fault = (ff && !x) || (uf && !u) || (wf && !w);
> > +                   fault = (ff && !x) || (uf && !u) || (wf && !w) ||
> > +                           (smapf && smap);
> >                     map |= fault << bit;
> >             }
> >             mmu->permissions[byte] = map;
> > diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> > index 2926152..822190f 100644
> > --- a/arch/x86/kvm/mmu.h
> > +++ b/arch/x86/kvm/mmu.h
> > @@ -44,11 +44,17 @@
> >  #define PT_DIRECTORY_LEVEL 2
> >  #define PT_PAGE_TABLE_LEVEL 1
> >
> > -#define PFERR_PRESENT_MASK (1U << 0)
> > -#define PFERR_WRITE_MASK (1U << 1)
> > -#define PFERR_USER_MASK (1U << 2)
> > -#define PFERR_RSVD_MASK (1U << 3)
> > -#define PFERR_FETCH_MASK (1U << 4)
> > +#define PFERR_PRESENT_BIT 0
> > +#define PFERR_WRITE_BIT 1
> > +#define PFERR_USER_BIT 2
> > +#define PFERR_RSVD_BIT 3
> > +#define PFERR_FETCH_BIT 4
> > +
> > +#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
> > +#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
> > +#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
> > +#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
> > +#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
> >
> >  int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64
> sptes[4]);
> >  void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
> > @@ -73,6 +79,8 @@ int handle_mmio_page_fault_common(struct
> kvm_vcpu *vcpu, u64 addr, bool direct);
> >  void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu
> *context);
> >  void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu
> *context,
> >             bool execonly);
> > +void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu
> *mmu,
> > +           bool ept);
> >
> >  static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
> >  {
> > @@ -110,10 +118,30 @@ static inline bool is_write_protection(struct
> kvm_vcpu *vcpu)
> >   * Will a fault with a given page-fault error code (pfec) cause a 
> > permission
> >   * fault with the given access (in ACC_* format)?
> >   */
> > -static inline bool permission_fault(struct kvm_mmu *mmu, unsigned
> pte_access,
> > -                               unsigned pfec)
> > +static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu
> *mmu,
> > +                               unsigned pte_access, unsigned pfec)
> >  {
> > -   return (mmu->permissions[pfec >> 1] >> pte_access) & 1;
> > +   int cpl = kvm_x86_ops->get_cpl(vcpu);
> > +   unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
> > +
> > +   /*
> > +    * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
> > +    *
> > +    * If CPL = 3, SMAP applies to all supervisor-mode data accesses
> > +    * (these are implicit supervisor accesses) regardless of the value
> > +    * of EFLAGS.AC.
> > +    *
> > +    * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
> > +    * the result in X86_EFLAGS_AC. We then insert it in place of
> > +    * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
> > +    * but it will be one in index if SMAP checks are being overridden.
> > +    * It is important to keep this branchless.
> > +    */
> > +   unsigned long smap = (cpl-3) & (rflags & X86_EFLAGS_AC);
> 
> Spaces around minus.
> 
> > +   int index = (pfec >> 1) +
> > +               (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
> > +
> > +   return (mmu->permissions[index] >> pte_access) & 1;
> >  }
> >
> >  void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
> > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> > index cba218a..4107765 100644
> > --- a/arch/x86/kvm/paging_tmpl.h
> > +++ b/arch/x86/kvm/paging_tmpl.h
> > @@ -353,7 +353,7 @@ retry_walk:
> >             walker->ptes[walker->level - 1] = pte;
> >     } while (!is_last_gpte(mmu, walker->level, pte));
> >
> > -   if (unlikely(permission_fault(mmu, pte_access, access))) {
> > +   if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
> >             errcode |= PFERR_PRESENT_MASK;
> >             goto error;
> >     }
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 2b85784..5869c6d 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -646,6 +646,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned
> long cr4)
> >     if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
> >             return 1;
> >
> > +   if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
> > +           return 1;
> > +
> >     if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
> >             return 1;
> >
> > @@ -674,6 +677,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned
> long cr4)
> >         (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
> >             kvm_mmu_reset_context(vcpu);
> >
> > +   if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
> > +           update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
> > +
> >     if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
> >             kvm_update_cpuid(vcpu);
> >
> > @@ -4108,7 +4114,8 @@ static int vcpu_mmio_gva_to_gpa(struct
> kvm_vcpu *vcpu, unsigned long gva,
> >             | (write ? PFERR_WRITE_MASK : 0);
> >
> >     if (vcpu_match_mmio_gva(vcpu, gva)
> > -       && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access,
> access)) {
> > +       && !permission_fault(vcpu, vcpu->arch.walk_mmu,
> > +                            vcpu->arch.access, access)) {
> >             *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
> >                                     (gva & (PAGE_SIZE - 1));
> >             trace_vcpu_match_mmio(gva, *gpa, write, false);
> >
> 
> Thanks!
> 
> Paolo
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

Thanks,
Feng
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to