From: Yulei Zhang <[email protected]>

Refine the fast page fault code so that it can be used in either
normal ept mode or direct build EPT mode.

Signed-off-by: Yulei Zhang <[email protected]>
---
 arch/x86/kvm/mmu/mmu.c | 28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f2124f52b286..fda6c4196854 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3443,12 +3443,13 @@ static bool page_fault_can_be_fast(u32 error_code)
  * someone else modified the SPTE from its original value.
  */
 static bool
-fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, gpa_t gpa,
                        u64 *sptep, u64 old_spte, u64 new_spte)
 {
        gfn_t gfn;
 
-       WARN_ON(!sp->role.direct);
+       WARN_ON(!vcpu->arch.direct_build_tdp &&
+               (!sptep_to_sp(sptep)->role.direct));
 
        /*
         * Theoretically we could also set dirty bit (and flush TLB) here in
@@ -3470,7 +3471,8 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
                 * The gfn of direct spte is stable since it is
                 * calculated by sp->gfn.
                 */
-               gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
+
+               gfn = gpa >> PAGE_SHIFT;
                kvm_vcpu_mark_page_dirty(vcpu, gfn);
        }
 
@@ -3498,10 +3500,10 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, 
gpa_t cr2_or_gpa,
                            u32 error_code)
 {
        struct kvm_shadow_walk_iterator iterator;
-       struct kvm_mmu_page *sp;
        bool fault_handled = false;
        u64 spte = 0ull;
        uint retry_count = 0;
+       int pte_level = 0;
 
        if (!page_fault_can_be_fast(error_code))
                return false;
@@ -3515,8 +3517,15 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t 
cr2_or_gpa,
                        if (!is_shadow_present_pte(spte))
                                break;
 
-               sp = sptep_to_sp(iterator.sptep);
-               if (!is_last_spte(spte, sp->role.level))
+               if (iterator.level < PG_LEVEL_4K)
+                       pte_level  = PG_LEVEL_4K;
+               else
+                       pte_level = iterator.level;
+
+               WARN_ON(!vcpu->arch.direct_build_tdp &&
+                       (pte_level != sptep_to_sp(iterator.sptep)->role.level));
+
+               if (!is_last_spte(spte, pte_level))
                        break;
 
                /*
@@ -3559,7 +3568,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t 
cr2_or_gpa,
                         *
                         * See the comments in kvm_arch_commit_memory_region().
                         */
-                       if (sp->role.level > PG_LEVEL_4K)
+                       if (pte_level > PG_LEVEL_4K)
                                break;
                }
 
@@ -3573,7 +3582,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t 
cr2_or_gpa,
                 * since the gfn is not stable for indirect shadow page. See
                 * Documentation/virt/kvm/locking.rst to get more detail.
                 */
-               fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
+               fault_handled = fast_pf_fix_direct_spte(vcpu, cr2_or_gpa,
                                                        iterator.sptep, spte,
                                                        new_spte);
                if (fault_handled)
@@ -4106,6 +4115,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t 
gpa, u32 error_code,
        if (fast_page_fault(vcpu, gpa, error_code))
                return RET_PF_RETRY;
 
+       if (vcpu->arch.direct_build_tdp)
+               return RET_PF_EMULATE;
+
        r = mmu_topup_memory_caches(vcpu, false);
        if (r)
                return r;
-- 
2.17.1

Reply via email to