From: Takuya Yoshikawa <[email protected]>

Introduce two new helpers: set_accessed_bit() and is_last_gpte().

These names were suggested by Ingo and Avi.

Cc: Ingo Molnar <[email protected]>
Signed-off-by: Takuya Yoshikawa <[email protected]>
---
 arch/x86/kvm/paging_tmpl.h |   57 ++++++++++++++++++++++++++++++++-----------
 1 files changed, 42 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 92fe275..d655a4b6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -113,6 +113,43 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, 
pt_element_t gpte)
        return access;
 }
 
+static int FNAME(set_accessed_bit)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                                  gfn_t table_gfn, unsigned index,
+                                  pt_element_t __user *ptep_user,
+                                  pt_element_t *ptep)
+{
+       int ret;
+
+       trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(*ptep));
+       ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
+                                 *ptep, *ptep|PT_ACCESSED_MASK);
+       if (unlikely(ret))
+               return ret;
+
+       mark_page_dirty(vcpu->kvm, table_gfn);
+       *ptep |= PT_ACCESSED_MASK;
+
+       return 0;
+}
+
+static bool FNAME(is_last_gpte)(struct guest_walker *walker,
+                               struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                               pt_element_t gpte)
+{
+       if (walker->level == PT_PAGE_TABLE_LEVEL)
+               return true;
+
+       if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
+           (PTTYPE == 64 || is_pse(vcpu)))
+               return true;
+
+       if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
+           (mmu->root_level == PT64_ROOT_LEVEL))
+               return true;
+
+       return false;
+}
+
 /*
  * Fetch a guest pte for a guest virtual address
  */
@@ -214,31 +251,21 @@ retry_walk:
 
                if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
                        int ret;
-                       trace_kvm_mmu_set_accessed_bit(table_gfn, index,
-                                                      sizeof(pte));
-                       ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
-                                                 pte, pte|PT_ACCESSED_MASK);
-                       if (unlikely(ret < 0)) {
+
+                       ret = FNAME(set_accessed_bit)(vcpu, mmu, table_gfn,
+                                                     index, ptep_user, &pte);
+                       if (ret < 0) {
                                errcode |= PFERR_PRESENT_MASK;
                                goto error;
                        } else if (ret)
                                goto retry_walk;
-
-                       mark_page_dirty(vcpu->kvm, table_gfn);
-                       pte |= PT_ACCESSED_MASK;
                }
 
                pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 
                walker->ptes[walker->level - 1] = pte;
 
-               if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
-                   ((walker->level == PT_DIRECTORY_LEVEL) &&
-                               is_large_pte(pte) &&
-                               (PTTYPE == 64 || is_pse(vcpu))) ||
-                   ((walker->level == PT_PDPE_LEVEL) &&
-                               is_large_pte(pte) &&
-                               mmu->root_level == PT64_ROOT_LEVEL)) {
+               if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
                        int lvl = walker->level;
                        gpa_t real_gpa;
                        gfn_t gfn;
-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to