When adding pages prior to boot, SEV needs to pin the resulting host pfn
so that the pages that are consumed by sev_launch_update_data() are not
moved after the memory is encrypted, which would corrupt the guest data.

Signed-off-by: Sean Christopherson <[email protected]>
---
 arch/x86/kvm/mmu/mmu.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index cab3b2f2f49c3..92b133d7b1713 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4156,7 +4156,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool 
prefault, gfn_t gfn,
 }
 
 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
-                            bool prefault, int max_level, bool is_tdp)
+                            bool prefault, int max_level, bool is_tdp,
+                            kvm_pfn_t *pfn)
 {
        bool write = error_code & PFERR_WRITE_MASK;
        bool exec = error_code & PFERR_FETCH_MASK;
@@ -4165,7 +4166,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t 
gpa, u32 error_code,
 
        gfn_t gfn = gpa >> PAGE_SHIFT;
        unsigned long mmu_seq;
-       kvm_pfn_t pfn;
        int r;
 
        if (page_fault_handle_page_track(vcpu, error_code, gfn))
@@ -4184,10 +4184,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+       if (try_async_pf(vcpu, prefault, gfn, gpa, pfn, write, &map_writable))
                return RET_PF_RETRY;
 
-       if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
+       if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, *pfn, ACC_ALL, &r))
                return r;
 
        r = RET_PF_RETRY;
@@ -4197,23 +4197,25 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
        r = make_mmu_pages_available(vcpu);
        if (r)
                goto out_unlock;
-       r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn,
+       r = __direct_map(vcpu, gpa, write, map_writable, max_level, *pfn,
                         prefault, is_tdp && lpage_disallowed);
 
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
-       kvm_release_pfn_clean(pfn);
+       kvm_release_pfn_clean(*pfn);
        return r;
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
                                u32 error_code, bool prefault)
 {
+       kvm_pfn_t pfn;
+
        pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
 
        /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
        return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
-                                PG_LEVEL_2M, false);
+                                PG_LEVEL_2M, false, &pfn);
 }
 
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
@@ -4252,6 +4254,7 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                       bool prefault)
 {
+       kvm_pfn_t pfn;
        int max_level;
 
        for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
@@ -4265,7 +4268,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, 
u32 error_code,
        }
 
        return direct_page_fault(vcpu, gpa, error_code, prefault,
-                                max_level, true);
+                                max_level, true, &pfn);
 }
 
 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
-- 
2.28.0

Reply via email to