On 14/02/2016 12:31, Xiao Guangrong wrote:
> Split rmap_write_protect() and introduce the function to abstract the write
> protection based on the slot
> 
> This function will be used in the later patch
> 
> Signed-off-by: Xiao Guangrong <[email protected]>
> ---
>  arch/x86/kvm/mmu.c | 16 +++++++++++-----
>  arch/x86/kvm/mmu.h |  2 ++
>  2 files changed, 13 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index e1bb66c..edad3c7 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1336,23 +1336,29 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct 
> kvm *kvm,
>               kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
>  }
>  
> -static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
> +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> +                                 struct kvm_memory_slot *slot, u64 gfn)
>  {
> -     struct kvm_memory_slot *slot;
>       struct kvm_rmap_head *rmap_head;
>       int i;
>       bool write_protected = false;
>  
> -     slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> -
>       for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
>               rmap_head = __gfn_to_rmap(gfn, i, slot);
> -             write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, 
> true);
> +             write_protected |= __rmap_write_protect(kvm, rmap_head, true);
>       }
>  
>       return write_protected;
>  }
>  
> +static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
> +{
> +     struct kvm_memory_slot *slot;
> +
> +     slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +     return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
> +}
> +
>  static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
>  {
>       u64 *sptep;
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index de92bed..58fe98a 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -177,4 +177,6 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, 
> gfn_t gfn_end);
>  
>  void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
>  void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
> +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> +                                 struct kvm_memory_slot *slot, u64 gfn);
>  #endif
> 

Reviewed-by: Paolo Bonzini <[email protected]>

Reply via email to