On 07/25/2013 06:59 PM, Gleb Natapov wrote:
> From: Nadav Har'El <[email protected]>
>
> For preparation, we just move gpte_access(), prefetch_invalid_gpte(),
> s_rsvd_bits_set(), protect_clean_gpte() and is_dirty_gpte() from mmu.c
> to paging_tmpl.h.
>
> Signed-off-by: Nadav Har'El <[email protected]>
> Signed-off-by: Jun Nakajima <[email protected]>
> Signed-off-by: Xinhao Xu <[email protected]>
> Signed-off-by: Yang Zhang <[email protected]>
> Signed-off-by: Jun Nakajima <[email protected]>
> Signed-off-by: Gleb Natapov <[email protected]>
> ---
> arch/x86/kvm/mmu.c | 55 ------------------------------
> arch/x86/kvm/paging_tmpl.h | 80
> +++++++++++++++++++++++++++++++++++++-------
> 2 files changed, 68 insertions(+), 67 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 3a9493a..4c4274d 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -331,11 +331,6 @@ static int is_large_pte(u64 pte)
> return pte & PT_PAGE_SIZE_MASK;
> }
>
> -static int is_dirty_gpte(unsigned long pte)
> -{
> - return pte & PT_DIRTY_MASK;
> -}
> -
> static int is_rmap_spte(u64 pte)
> {
> return is_shadow_present_pte(pte);
> @@ -2574,14 +2569,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
> mmu_free_roots(vcpu);
> }
>
> -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
> -{
> - int bit7;
> -
> - bit7 = (gpte >> 7) & 1;
> - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
> -}
> -
> static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
> bool no_dirty_log)
> {
> @@ -2594,26 +2581,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu
> *vcpu, gfn_t gfn,
> return gfn_to_pfn_memslot_atomic(slot, gfn);
> }
>
> -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
> - struct kvm_mmu_page *sp, u64 *spte,
> - u64 gpte)
> -{
> - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
> - goto no_present;
> -
> - if (!is_present_gpte(gpte))
> - goto no_present;
> -
> - if (!(gpte & PT_ACCESSED_MASK))
> - goto no_present;
> -
> - return false;
> -
> -no_present:
> - drop_spte(vcpu->kvm, spte);
> - return true;
> -}
> -
> static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
> struct kvm_mmu_page *sp,
> u64 *start, u64 *end)
> @@ -3501,18 +3468,6 @@ static void paging_free(struct kvm_vcpu *vcpu)
> nonpaging_free(vcpu);
> }
>
> -static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
> -{
> - unsigned mask;
> -
> - BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
> -
> - mask = (unsigned)~ACC_WRITE_MASK;
> - /* Allow write access to dirty gptes */
> - mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
> PT_WRITABLE_MASK;
> - *access &= mask;
> -}
> -
> static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
> unsigned access, int *nr_present)
> {
> @@ -3530,16 +3485,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64
> *sptep, gfn_t gfn,
> return false;
> }
>
> -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
> -{
> - unsigned access;
> -
> - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> - access &= ~(gpte >> PT64_NX_SHIFT);
> -
> - return access;
> -}
> -
> static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level,
> unsigned gpte)
> {
> unsigned index;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 7769699..fb26ca9 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -80,6 +80,31 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
> return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
> }
>
> +static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
> +{
> + unsigned mask;
> +
> + BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
> +
> + mask = (unsigned)~ACC_WRITE_MASK;
> + /* Allow write access to dirty gptes */
> + mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
> PT_WRITABLE_MASK;
> + *access &= mask;
> +}
> +
> +static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
> +{
> + int bit7;
> +
> + bit7 = (gpte >> 7) & 1;
> + return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
> +}
> +
> +static inline int FNAME(is_present_gpte)(unsigned long pte)
> +{
> + return is_present_gpte(pte);
> +}
It is adding a new function not just move and failed to mention it in the
change log. :)
> +
> static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> pt_element_t __user *ptep_user, unsigned index,
> pt_element_t orig_pte, pt_element_t new_pte)
> @@ -103,6 +128,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu,
> struct kvm_mmu *mmu,
> return (ret != orig_pte);
> }
>
> +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
> + struct kvm_mmu_page *sp, u64 *spte,
> + u64 gpte)
> +{
> + if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
> + goto no_present;
> +
> + if (!FNAME(is_present_gpte)(gpte))
> + goto no_present;
> +
> + if (!(gpte & PT_ACCESSED_MASK))
> + goto no_present;
> +
> + return false;
> +
> +no_present:
> + drop_spte(vcpu->kvm, spte);
> + return true;
> +}
> +
> +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
> +{
> + unsigned access;
> +
> + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
> + access &= ~(gpte >> PT64_NX_SHIFT);
> +
> + return access;
> +}
> +
> static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
> struct kvm_mmu *mmu,
> struct guest_walker *walker,
> @@ -123,7 +178,8 @@ static int FNAME(update_accessed_dirty_bits)(struct
> kvm_vcpu *vcpu,
> trace_kvm_mmu_set_accessed_bit(table_gfn, index,
> sizeof(pte));
> pte |= PT_ACCESSED_MASK;
> }
> - if (level == walker->level && write_fault &&
> !is_dirty_gpte(pte)) {
> + if (level == walker->level && write_fault &&
> + !(pte & PT_DIRTY_MASK)) {
Why use the raw code instead of the function? Since it is the only user of
this function, so drop the function?
Others look good to me.
Reviewed-by: Xiao Guangrong <[email protected]>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html