On Mon, Jul 29, 2013 at 11:48:06AM +0200, Paolo Bonzini wrote:
> Il 25/07/2013 12:59, Gleb Natapov ha scritto:
> > From: Nadav Har'El <[email protected]>
> > 
> > This is the first patch in a series which adds nested EPT support to KVM's
> > nested VMX. Nested EPT means emulating EPT for an L1 guest so that L1 can 
> > use
> > EPT when running a nested guest L2. When L1 uses EPT, it allows the L2 guest
> > to set its own cr3 and take its own page faults without either of L0 or L1
> > getting involved. This often significanlty improves L2's performance over 
> > the
> > previous two alternatives (shadow page tables over EPT, and shadow page
> > tables over shadow page tables).
> > 
> > This patch adds EPT support to paging_tmpl.h.
> > 
> > paging_tmpl.h contains the code for reading and writing page tables. The 
> > code
> > for 32-bit and 64-bit tables is very similar, but not identical, so
> > paging_tmpl.h is #include'd twice in mmu.c, once with PTTTYPE=32 and once
> > with PTTYPE=64, and this generates the two sets of similar functions.
> > 
> > There are subtle but important differences between the format of EPT tables
> > and that of ordinary x86 64-bit page tables, so for nested EPT we need a
> > third set of functions to read the guest EPT table and to write the shadow
> > EPT table.
> > 
> > So this patch adds third PTTYPE, PTTYPE_EPT, which creates functions 
> > (prefixed
> > with "EPT") which correctly read and write EPT tables.
> > 
> > Signed-off-by: Nadav Har'El <[email protected]>
> > Signed-off-by: Jun Nakajima <[email protected]>
> > Signed-off-by: Xinhao Xu <[email protected]>
> > Signed-off-by: Yang Zhang <[email protected]>
> > Signed-off-by: Gleb Natapov <[email protected]>
> > ---
> >  arch/x86/kvm/mmu.c         |    5 +++++
> >  arch/x86/kvm/paging_tmpl.h |   43 
> > +++++++++++++++++++++++++++++++++++++++----
> >  2 files changed, 44 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> > index 4c4274d..b5273c3 100644
> > --- a/arch/x86/kvm/mmu.c
> > +++ b/arch/x86/kvm/mmu.c
> > @@ -3494,6 +3494,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, 
> > unsigned level, unsigned gp
> >     return mmu->last_pte_bitmap & (1 << index);
> >  }
> >  
> > +#define PTTYPE_EPT 18 /* arbitrary */
> > +#define PTTYPE PTTYPE_EPT
> > +#include "paging_tmpl.h"
> > +#undef PTTYPE
> > +
> >  #define PTTYPE 64
> >  #include "paging_tmpl.h"
> >  #undef PTTYPE
> > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> > index 7581395..e38b3c0 100644
> > --- a/arch/x86/kvm/paging_tmpl.h
> > +++ b/arch/x86/kvm/paging_tmpl.h
> > @@ -58,6 +58,21 @@
> >     #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
> >     #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
> >     #define CMPXCHG cmpxchg
> > +#elif PTTYPE == PTTYPE_EPT
> > +   #define pt_element_t u64
> > +   #define guest_walker guest_walkerEPT
> > +   #define FNAME(name) ept_##name
> > +   #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
> > +   #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
> > +   #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
> > +   #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
> > +   #define PT_LEVEL_BITS PT64_LEVEL_BITS
> > +   #define PT_GUEST_ACCESSED_MASK 0
> > +   #define PT_GUEST_DIRTY_MASK 0
> > +   #define PT_GUEST_DIRTY_SHIFT 0
> > +   #define PT_GUEST_ACCESSED_SHIFT 0
> > +   #define CMPXCHG cmpxchg64
> > +   #define PT_MAX_FULL_LEVELS 4
> >  #else
> >     #error Invalid PTTYPE value
> >  #endif
> > @@ -90,6 +105,10 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
> >  
> >  static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned 
> > gpte)
> >  {
> > +#if PT_GUEST_DIRTY_MASK == 0
> > +   /* dirty bit is not supported, so no need to track it */
> > +   return;
> > +#else
> >     unsigned mask;
> >  
> >     BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
> > @@ -99,6 +118,7 @@ static inline void FNAME(protect_clean_gpte)(unsigned 
> > *access, unsigned gpte)
> >     mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
> >             PT_WRITABLE_MASK;
> >     *access &= mask;
> > +#endif
> 
> Please put this #if/#else/#endif in the previous patch.  (See also
> below on leaving out protect_clean_gpte altogether).
> 
Why? This change does not make much sense before EPT is introduced. The
previous patch is just a rename that should be easily verifiable by any
reviewer to be NOP.

> You probably should also have a
> 
>       BUILD_BUG_ON(PT_GUEST_DIRTY_SHIFT < PT_WRITABLE_SHIFT);
> 
> in the #else branch.
> 
> >  }
> >  
> >  static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int 
> > level)
> > @@ -111,7 +131,11 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu 
> > *mmu, u64 gpte, int level)
> >  
> >  static inline int FNAME(is_present_gpte)(unsigned long pte)
> >  {
> > +#if PTTYPE != PTTYPE_EPT
> >     return is_present_gpte(pte);
> > +#else
> > +   return pte & 7;
> > +#endif
> >  }
> >  
> >  static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> > @@ -147,7 +171,8 @@ static bool FNAME(prefetch_invalid_gpte)(struct 
> > kvm_vcpu *vcpu,
> >     if (!FNAME(is_present_gpte)(gpte))
> >             goto no_present;
> >  
> > -   if (!(gpte & PT_GUEST_ACCESSED_MASK))
> > +   /* if accessed bit is not supported prefetch non accessed gpte */
> > +   if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
> 
> Same for this hunk.  Please put it in the previous patch.
> 
> >             goto no_present;
> >  
> >     return false;
> > @@ -160,9 +185,14 @@ no_present:
> >  static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
> >  {
> >     unsigned access;
> > -
> > +#if PTTYPE == PTTYPE_EPT
> > +   BUILD_BUG_ON(ACC_WRITE_MASK != VMX_EPT_WRITABLE_MASK);
> > +   access = (gpte & VMX_EPT_WRITABLE_MASK) | ACC_USER_MASK |
> > +           ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0);
> > +#else
> >     access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> >     access &= ~(gpte >> PT64_NX_SHIFT);
> > +#endif
> >  
> >     return access;
> >  }
> > @@ -212,7 +242,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker 
> > *walker,
> >                                 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> >                                 gva_t addr, u32 access)
> >  {
> > -   int ret;
> >     pt_element_t pte;
> >     pt_element_t __user *uninitialized_var(ptep_user);
> >     gfn_t table_gfn;
> > @@ -322,7 +351,9 @@ retry_walk:
> >             accessed_dirty &= pte >>
> >                     (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
> > -   if (unlikely(!accessed_dirty)) {
> > +   if (PT_GUEST_DIRTY_MASK && unlikely(!accessed_dirty)) {
> > +           int ret;
> > +
> >             ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, 
> > write_fault);
> 
> I think the whole block of code starting at
> 
>         if (!write_fault)
>                 protect_clean_gpte(&pte_access, pte);
>         else
>                 /*
>                  * On a write fault, fold the dirty bit into accessed_dirty by
>                  * shifting it one place right.
>                  */
>               accessed_dirty &= pte >>
>                       (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
> 
> should be compiled out (in the previous patch) if dirty bits are not in use.
> The "then" branch does nothing in that case, and the "else" branch is dead
> code that makes no sense.
> 
I disagree, there ifdef was there and it was ugly. protect_clean_gpte
and update_accessed_dirty_bits had to be ifdefed too. Compiler should be
smart enough to get rid of all of this code when PT_GUEST_DIRTY_MASK is 0.
Doing it like that was Xiao idea and it looks much nicer. 

> Once you do this, you can add a
> 
>       BUILD_BUG_ON(PT_GUEST_DIRTY_SHIFT < PT_GUEST_ACCESSED_SHIFT);
> 
> before the shift.
> 
> Please check if, with these changes, you can avoid defining
> PT_GUEST_{DIRTY,ACCESSED}_SHIFT altogether in the EPT case.
> This is safer because you are sure you left no undefined
> behaviors when a bit is being folded onto another.
You basically ask me to get back to the patch how it was before I
addressed Xiao comment and add some more idfefs because previously not
all places where A/D bits were used were protected by it. IMO this would
be a step backward especially as the method in this patch series is a
preparation for A/D support for EPT. When those bits are supported with
EPT they are different than in regular page tables.

> 
> In principle, with these changes you could leave protect_clean_gpte in mmu.c.
Only if I ifdef all other uses of in in the file.

> I'm not sure what is the cleanest thing to do there, so I'll leave that to
> your judgement.
> 
> Paolo
> 
> >             if (unlikely(ret < 0))
> >                     goto error;
> > @@ -359,6 +390,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
> >                                     access);
> >  }
> >  
> > +#if PTTYPE != PTTYPE_EPT
> >  static int FNAME(walk_addr_nested)(struct guest_walker *walker,
> >                                struct kvm_vcpu *vcpu, gva_t addr,
> >                                u32 access)
> > @@ -366,6 +398,7 @@ static int FNAME(walk_addr_nested)(struct guest_walker 
> > *walker,
> >     return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
> >                                     addr, access);
> >  }
> > +#endif
> >  
> >  static bool
> >  FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> > @@ -793,6 +826,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, 
> > gva_t vaddr, u32 access,
> >     return gpa;
> >  }
> >  
> > +#if PTTYPE != PTTYPE_EPT
> >  static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
> >                                   u32 access,
> >                                   struct x86_exception *exception)
> > @@ -811,6 +845,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu 
> > *vcpu, gva_t vaddr,
> >  
> >     return gpa;
> >  }
> > +#endif
> >  
> >  /*
> >   * Using the cached information from sp->gfns is safe because:
> > 

--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to