For preparation, we just move gpte_access() and prefetch_invalid_gpte() from 
mmu.c to paging_tmpl.h.

Signed-off-by: Nadav Har'El <n...@il.ibm.com>
Signed-off-by: Jun Nakajima <jun.nakaj...@intel.com>
Signed-off-by: Xinhao Xu <xinhao...@intel.com>
---
 arch/x86/kvm/mmu.c         | 30 ------------------------------
 arch/x86/kvm/paging_tmpl.h | 40 +++++++++++++++++++++++++++++++++++-----
 2 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 956ca35..a431495 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2480,26 +2480,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu 
*vcpu, gfn_t gfn,
        return gfn_to_pfn_memslot_atomic(slot, gfn);
 }
 
-static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
-                                 struct kvm_mmu_page *sp, u64 *spte,
-                                 u64 gpte)
-{
-       if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
-               goto no_present;
-
-       if (!is_present_gpte(gpte))
-               goto no_present;
-
-       if (!(gpte & PT_ACCESSED_MASK))
-               goto no_present;
-
-       return false;
-
-no_present:
-       drop_spte(vcpu->kvm, spte);
-       return true;
-}
-
 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
                                    struct kvm_mmu_page *sp,
                                    u64 *start, u64 *end)
@@ -3399,16 +3379,6 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, 
unsigned access,
        return false;
 }
 
-static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
-{
-       unsigned access;
-
-       access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
-       access &= ~(gpte >> PT64_NX_SHIFT);
-
-       return access;
-}
-
 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned 
gpte)
 {
        unsigned index;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 105dd5b..13ceca6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -103,6 +103,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, 
struct kvm_mmu *mmu,
        return (ret != orig_pte);
 }
 
+static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *sp, u64 *spte,
+                                 u64 gpte)
+{
+       if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+               goto no_present;
+
+       if (!is_present_gpte(gpte))
+               goto no_present;
+
+       if (!(gpte & PT_ACCESSED_MASK))
+               goto no_present;
+
+       return false;
+
+no_present:
+       drop_spte(vcpu->kvm, spte);
+       return true;
+}
+
+static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
+{
+       unsigned access;
+
+       access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
+       access &= ~(gpte >> PT64_NX_SHIFT);
+
+       return access;
+}
+
 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                             struct kvm_mmu *mmu,
                                             struct guest_walker *walker,
@@ -225,7 +255,7 @@ retry_walk:
                }
 
                accessed_dirty &= pte;
-               pte_access = pt_access & gpte_access(vcpu, pte);
+               pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 
                walker->ptes[walker->level - 1] = pte;
        } while (!is_last_gpte(mmu, walker->level, pte));
@@ -309,13 +339,13 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
        gfn_t gfn;
        pfn_t pfn;
 
-       if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
+       if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
                return false;
 
        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 
        gfn = gpte_to_gfn(gpte);
-       pte_access = sp->role.access & gpte_access(vcpu, gpte);
+       pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
        protect_clean_gpte(&pte_access, gpte);
        pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
                        no_dirty_log && (pte_access & ACC_WRITE_MASK));
@@ -782,14 +812,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
                                          sizeof(pt_element_t)))
                        return -EINVAL;
 
-               if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
+               if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
                        vcpu->kvm->tlbs_dirty++;
                        continue;
                }
 
                gfn = gpte_to_gfn(gpte);
                pte_access = sp->role.access;
-               pte_access &= gpte_access(vcpu, gpte);
+               pte_access &= FNAME(gpte_access)(vcpu, gpte);
                protect_clean_gpte(&pte_access, gpte);
 
                if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
-- 
1.8.1.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to