Il 25/07/2013 12:59, Gleb Natapov ha scritto:
> From: Nadav Har'El <[email protected]>
>
> This is the first patch in a series which adds nested EPT support to KVM's
> nested VMX. Nested EPT means emulating EPT for an L1 guest so that L1 can use
> EPT when running a nested guest L2. When L1 uses EPT, it allows the L2 guest
> to set its own cr3 and take its own page faults without either of L0 or L1
> getting involved. This often significanlty improves L2's performance over the
> previous two alternatives (shadow page tables over EPT, and shadow page
> tables over shadow page tables).
>
> This patch adds EPT support to paging_tmpl.h.
>
> paging_tmpl.h contains the code for reading and writing page tables. The code
> for 32-bit and 64-bit tables is very similar, but not identical, so
> paging_tmpl.h is #include'd twice in mmu.c, once with PTTTYPE=32 and once
> with PTTYPE=64, and this generates the two sets of similar functions.
>
> There are subtle but important differences between the format of EPT tables
> and that of ordinary x86 64-bit page tables, so for nested EPT we need a
> third set of functions to read the guest EPT table and to write the shadow
> EPT table.
>
> So this patch adds third PTTYPE, PTTYPE_EPT, which creates functions (prefixed
> with "EPT") which correctly read and write EPT tables.
>
> Signed-off-by: Nadav Har'El <[email protected]>
> Signed-off-by: Jun Nakajima <[email protected]>
> Signed-off-by: Xinhao Xu <[email protected]>
> Signed-off-by: Yang Zhang <[email protected]>
> Signed-off-by: Gleb Natapov <[email protected]>
> ---
> arch/x86/kvm/mmu.c | 5 +++++
> arch/x86/kvm/paging_tmpl.h | 43 +++++++++++++++++++++++++++++++++++++++----
> 2 files changed, 44 insertions(+), 4 deletions(-)
Ok, let's rewind and retry.
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 4c4274d..b5273c3 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3494,6 +3494,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
> unsigned level, unsigned gp
> return mmu->last_pte_bitmap & (1 << index);
> }
>
> +#define PTTYPE_EPT 18 /* arbitrary */
> +#define PTTYPE PTTYPE_EPT
> +#include "paging_tmpl.h"
> +#undef PTTYPE
> +
> #define PTTYPE 64
> #include "paging_tmpl.h"
> #undef PTTYPE
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 7581395..e38b3c0 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -58,6 +58,21 @@
> #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
> #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
> #define CMPXCHG cmpxchg
> +#elif PTTYPE == PTTYPE_EPT
> + #define pt_element_t u64
> + #define guest_walker guest_walkerEPT
> + #define FNAME(name) ept_##name
> + #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
> + #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
> + #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
> + #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
> + #define PT_LEVEL_BITS PT64_LEVEL_BITS
> + #define PT_GUEST_ACCESSED_MASK 0
> + #define PT_GUEST_DIRTY_MASK 0
> + #define PT_GUEST_DIRTY_SHIFT 0
> + #define PT_GUEST_ACCESSED_SHIFT 0
> + #define CMPXCHG cmpxchg64
> + #define PT_MAX_FULL_LEVELS 4
> #else
> #error Invalid PTTYPE value
> #endif
Please add a
BUILD_BUG_ON(!!PT_GUEST_ACCESSED_MASK != !!PT_GUEST_DIRTY_MASK);
#if PT_GUEST_ACCESSED_MASK
BUILD_BUG_ON(PT_GUEST_DIRTY_SHIFT <= PT_GUEST_ACCESSED_SHIFT);
BUILD_BUG_ON(PT_GUEST_DIRTY_SHIFT <= PT_WRITABLE_SHIFT);
BUILD_BUG_ON(PT_GUEST_DIRTY_MASK != (1 << PT_GUEST_DIRTY_SHIFT));
BUILD_BUG_ON(PT_GUEST_ACCESSED_MASK != (1 << PT_GUEST_ACCESSED_SHIFT));
#endif
here.
> @@ -90,6 +105,10 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
>
> static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
> {
> +#if PT_GUEST_DIRTY_MASK == 0
> + /* dirty bit is not supported, so no need to track it */
> + return;
> +#else
Here you can use a regular "if" instead of the preprocessor if. Also
please move this and all other PT_GUEST_*-related changes to a separate
patch.
> unsigned mask;
>
> BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
> @@ -99,6 +118,7 @@ static inline void FNAME(protect_clean_gpte)(unsigned
> *access, unsigned gpte)
> mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
> PT_WRITABLE_MASK;
> *access &= mask;
> +#endif
> }
>
> static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
> @@ -111,7 +131,11 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu,
> u64 gpte, int level)
>
> static inline int FNAME(is_present_gpte)(unsigned long pte)
> {
> +#if PTTYPE != PTTYPE_EPT
> return is_present_gpte(pte);
> +#else
> + return pte & 7;
> +#endif
> }
>
> static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> @@ -147,7 +171,8 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu
> *vcpu,
> if (!FNAME(is_present_gpte)(gpte))
> goto no_present;
>
> - if (!(gpte & PT_GUEST_ACCESSED_MASK))
> + /* if accessed bit is not supported prefetch non accessed gpte */
> + if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
> goto no_present;
>
> return false;
> @@ -160,9 +185,14 @@ no_present:
> static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
> {
> unsigned access;
> -
> +#if PTTYPE == PTTYPE_EPT
> + BUILD_BUG_ON(ACC_WRITE_MASK != VMX_EPT_WRITABLE_MASK);
> + access = (gpte & VMX_EPT_WRITABLE_MASK) | ACC_USER_MASK |
> + ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0);
You can use a ?: here even for writable. The compiler will simplify (a
& b ? b : 0) to just "a & b" for single-bit b.
> +#else
> access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> access &= ~(gpte >> PT64_NX_SHIFT);
> +#endif
>
> return access;
> }
> @@ -212,7 +242,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker
> *walker,
> struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> gva_t addr, u32 access)
> {
> - int ret;
This change is not needed anymore since you removed the #ifdef.
> pt_element_t pte;
> pt_element_t __user *uninitialized_var(ptep_user);
> gfn_t table_gfn;
> @@ -322,7 +351,9 @@ retry_walk:
> accessed_dirty &= pte >>
> (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
This shift is one of two things that bugged me. I dislike including
"wrong" code just because it is dead. Perhaps you can #define the
shifts to 8 and 9 already now, even if the masks stay 0?
Then when you implement nEPT A/D bits you only have to flip the masks
from 0 to (1 << PT_GUEST_*_SHIFT).
>
> - if (unlikely(!accessed_dirty)) {
> + if (PT_GUEST_DIRTY_MASK && unlikely(!accessed_dirty)) {
If we want to drop the dead-code elimination burden on the compiler,
let's go all the way.
So, instead of this "if", please make update_accessed_dirty_bits return
0 at once if PT_GUEST_DIRTY_MASK == 0; this way you can do the same
thing for protect_clean_gpte and update_accessed_dirty_bits.
Paolo
> + int ret;
> +
> ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
> write_fault);
> if (unlikely(ret < 0))
> goto error;
> @@ -359,6 +390,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
> access);
> }
>
> +#if PTTYPE != PTTYPE_EPT
> static int FNAME(walk_addr_nested)(struct guest_walker *walker,
> struct kvm_vcpu *vcpu, gva_t addr,
> u32 access)
> @@ -366,6 +398,7 @@ static int FNAME(walk_addr_nested)(struct guest_walker
> *walker,
> return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
> addr, access);
> }
> +#endif
>
> static bool
> FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> @@ -793,6 +826,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu,
> gva_t vaddr, u32 access,
> return gpa;
> }
>
> +#if PTTYPE != PTTYPE_EPT
> static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
> u32 access,
> struct x86_exception *exception)
> @@ -811,6 +845,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu
> *vcpu, gva_t vaddr,
>
> return gpa;
> }
> +#endif
>
> /*
> * Using the cached information from sp->gfns is safe because:
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html