On Tue, Aug 16, 2011 at 02:44:42PM +0800, Xiao Guangrong wrote:
> Fast prefetch spte for the unsync shadow page on invlpg path
> 
> Signed-off-by: Xiao Guangrong <[email protected]>
> ---
>  arch/x86/include/asm/kvm_host.h |    4 +---
>  arch/x86/kvm/mmu.c              |   38 +++++++++++++++-----------------------
>  arch/x86/kvm/paging_tmpl.h      |   30 ++++++++++++++++++------------
>  arch/x86/kvm/x86.c              |    4 ++--
>  4 files changed, 36 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 58ea3a7..927ba73 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -460,7 +460,6 @@ struct kvm_arch {
>       unsigned int n_requested_mmu_pages;
>       unsigned int n_max_mmu_pages;
>       unsigned int indirect_shadow_pages;
> -     atomic_t invlpg_counter;
>       struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
>       /*
>        * Hash table of struct kvm_mmu_page.
> @@ -754,8 +753,7 @@ int fx_init(struct kvm_vcpu *vcpu);
>  
>  void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> -                    const u8 *new, int bytes,
> -                    bool guest_initiated);
> +                    const u8 *new, int bytes);
>  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
>  int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
>  void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ed3e778..f6de2fc 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3530,8 +3530,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu 
> *vcpu)
>  }
>  
>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> -                    const u8 *new, int bytes,
> -                    bool guest_initiated)
> +                    const u8 *new, int bytes)
>  {
>       gfn_t gfn = gpa >> PAGE_SHIFT;
>       union kvm_mmu_page_role mask = { .word = 0 };
> @@ -3540,7 +3539,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>       LIST_HEAD(invalid_list);
>       u64 entry, gentry, *spte;
>       unsigned pte_size, page_offset, misaligned, quadrant, offset;
> -     int level, npte, invlpg_counter, r, flooded = 0;
> +     int level, npte, r, flooded = 0;
>       bool remote_flush, local_flush, zap_page;
>  
>       /*
> @@ -3555,19 +3554,16 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t 
> gpa,
>  
>       pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
>  
> -     invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
> -
>       /*
>        * Assume that the pte write on a page table of the same type
>        * as the current vcpu paging mode since we update the sptes only
>        * when they have the same mode.
>        */
> -     if ((is_pae(vcpu) && bytes == 4) || !new) {
> +     if (is_pae(vcpu) && bytes == 4) {
>               /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
> -             if (is_pae(vcpu)) {
> -                     gpa &= ~(gpa_t)7;
> -                     bytes = 8;
> -             }
> +             gpa &= ~(gpa_t)7;
> +             bytes = 8;
> +
>               r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
>               if (r)
>                       gentry = 0;
> @@ -3593,22 +3589,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t 
> gpa,
>        */
>       mmu_topup_memory_caches(vcpu);
>       spin_lock(&vcpu->kvm->mmu_lock);
> -     if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
> -             gentry = 0;
>       kvm_mmu_free_some_pages(vcpu);
>       ++vcpu->kvm->stat.mmu_pte_write;
>       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
> -     if (guest_initiated) {
> -             if (gfn == vcpu->arch.last_pt_write_gfn
> -                 && !last_updated_pte_accessed(vcpu)) {
> -                     ++vcpu->arch.last_pt_write_count;
> -                     if (vcpu->arch.last_pt_write_count >= 3)
> -                             flooded = 1;
> -             } else {
> -                     vcpu->arch.last_pt_write_gfn = gfn;
> -                     vcpu->arch.last_pt_write_count = 1;
> -                     vcpu->arch.last_pte_updated = NULL;
> -             }
> +     if (gfn == vcpu->arch.last_pt_write_gfn
> +         && !last_updated_pte_accessed(vcpu)) {
> +             ++vcpu->arch.last_pt_write_count;
> +             if (vcpu->arch.last_pt_write_count >= 3)
> +                     flooded = 1;
> +     } else {
> +             vcpu->arch.last_pt_write_gfn = gfn;
> +             vcpu->arch.last_pt_write_count = 1;
> +             vcpu->arch.last_pte_updated = NULL;
>       }
>  
>       mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 7862c05..bdc2241 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -670,20 +670,27 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t 
> gva)
>  {
>       struct kvm_shadow_walk_iterator iterator;
>       struct kvm_mmu_page *sp;
> -     gpa_t pte_gpa = -1;
>       int level;
>       u64 *sptep;
>  
>       vcpu_clear_mmio_info(vcpu, gva);
>  
> -     spin_lock(&vcpu->kvm->mmu_lock);
> +     /*
> +      * No need to check return value here, rmap_can_add() can
> +      * help us to skip pte prefetch later.
> +      */
> +     mmu_topup_memory_caches(vcpu);
>  
> +     spin_lock(&vcpu->kvm->mmu_lock);
>       for_each_shadow_entry(vcpu, gva, iterator) {
>               level = iterator.level;
>               sptep = iterator.sptep;
>  
>               sp = page_header(__pa(sptep));
>               if (is_last_spte(*sptep, level)) {
> +                     pt_element_t gpte;
> +                     gpa_t pte_gpa;
> +
>                       if (!sp->unsync)
>                               break;
>  
> @@ -692,22 +699,21 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t 
> gva)
>  
>                       if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
>                               kvm_flush_remote_tlbs(vcpu->kvm);
> +
> +                     if (rmap_can_add(vcpu))
> +                             break;

if (!rmap_can_add(vcpu))
        break;

?

> +
> +                     if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
> +                                               sizeof(pt_element_t)))
> +                             break;
> +
> +                     FNAME(update_pte)(vcpu, sp, sptep, &gpte);
>               }
>  
>               if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
>                       break;

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to