Now that all x86 page fault paths precisely track refcounted pages, use
Use kvm_page_fault.refcounted_page to put references to struct page memory
when finishing page faults.  This is a baby step towards eliminating
kvm_pfn_to_refcounted_page().

Tested-by: Alex Bennée <alex.ben...@linaro.org>
Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a038cde74f0d..f9b7e3a7370f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4373,6 +4373,9 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu 
*vcpu,
        lockdep_assert_once(lockdep_is_held(&vcpu->kvm->mmu_lock) ||
                            r == RET_PF_RETRY);
 
+       if (!fault->refcounted_page)
+               return;
+
        /*
         * If the page that KVM got from the *primary MMU* is writable, and KVM
         * installed or reused a SPTE, mark the page/folio dirty.  Note, this
@@ -4384,9 +4387,9 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu 
*vcpu,
         * folio dirty if KVM could locklessly make the SPTE writable.
         */
        if (!fault->map_writable || r == RET_PF_RETRY)
-               kvm_release_pfn_clean(fault->pfn);
+               kvm_release_page_clean(fault->refcounted_page);
        else
-               kvm_release_pfn_dirty(fault->pfn);
+               kvm_release_page_dirty(fault->refcounted_page);
 }
 
 static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
-- 
2.47.0.rc1.288.g06298d1525-goog


Reply via email to