From: Zhang Yi Z <yi.z.zh...@linux.intel.com>

We also should setup SPP page structure while we catch
a SPP miss, some case, such as hotplug vcpu, should update
the SPP page table in SPP miss handler.

Signed-off-by: Zhang Yi Z <yi.z.zh...@linux.intel.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/mmu.c              | 12 ++++++++++++
 arch/x86/kvm/vmx.c              |  8 ++++++++
 3 files changed, 22 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ef50d98..bc56c4c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1260,6 +1260,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 
u64 error_code,
 int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
                                u32 access_map, gfn_t gfn);
 
+int kvm_mmu_get_spp_acsess_map(struct kvm *kvm, u32 *access_map, gfn_t gfn);
+
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c229324..88b8571 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4129,6 +4129,17 @@ static void mmu_spp_spte_set(u64 *sptep, u64 new_spte)
        __set_spte(sptep, new_spte);
 }
 
+int kvm_mmu_get_spp_acsess_map(struct kvm *kvm, u32 *access_map, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot;
+
+       slot = gfn_to_memslot(kvm, gfn);
+       *access_map = *gfn_to_subpage_wp_info(slot, gfn);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_get_spp_acsess_map);
+
 int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
                                u32 access_map, gfn_t gfn)
 {
@@ -4174,6 +4185,7 @@ int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
        spin_unlock(&kvm->mmu_lock);
        return -EFAULT;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_setup_spp_structure);
 
 int kvm_mmu_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info)
 {
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9116b53..c4cd773 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8005,6 +8005,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 static int handle_spp(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
+       gpa_t gpa;
+       gfn_t gfn;
+       u32 map;
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
@@ -8031,6 +8034,11 @@ static int handle_spp(struct kvm_vcpu *vcpu)
                 * SPP table here.
                 */
                pr_debug("SPP: %s: SPPT Miss!!!\n", __func__);
+
+               gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+               gfn = gpa >> PAGE_SHIFT;
+               kvm_mmu_get_spp_acsess_map(vcpu->kvm, &map, gfn);
+               kvm_mmu_setup_spp_structure(vcpu, map, gfn);
                return 1;
        }
 
-- 
2.7.4

Reply via email to