From: Christoffer Dall <[email protected]>

Unmap/flush shadow stage 2 page tables for the nested VMs as well as the
stage 2 page table for the guest hypervisor.

Note: A bunch of the code in mmu.c relating to MMU notifiers is
currently dealt with in an extremely abrupt way, for example by clearing
out an entire shadow stage-2 table. This will be handled in a more
efficient way using the reverse mapping feature in a later version of
the patch series.

Signed-off-by: Christoffer Dall <[email protected]>
Signed-off-by: Jintack Lim <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
---
 arch/arm64/include/asm/kvm_mmu.h    |  3 +++
 arch/arm64/include/asm/kvm_nested.h |  3 +++
 arch/arm64/kvm/mmu.c                | 29 ++++++++++++++++++---
 arch/arm64/kvm/nested.c             | 39 +++++++++++++++++++++++++++++
 4 files changed, 70 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 579980a8b05f..eaec0366526d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,6 +158,8 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t 
size,
                           void __iomem **haddr);
 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
                             void **haddr);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+                           phys_addr_t addr, phys_addr_t end);
 void free_hyp_pgds(void);
 
 void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 
size);
@@ -166,6 +168,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu 
*mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
                          phys_addr_t pa, unsigned long size, bool writable);
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t 
end);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/include/asm/kvm_nested.h 
b/arch/arm64/include/asm/kvm_nested.h
index 3f3d8e10bd99..2987806850f0 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -114,6 +114,9 @@ extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, 
phys_addr_t gipa,
 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
                                    struct kvm_s2_trans *trans);
 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
+extern void kvm_nested_s2_wp(struct kvm *kvm);
+extern void kvm_nested_s2_clear(struct kvm *kvm);
+extern void kvm_nested_s2_flush(struct kvm *kvm);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
                            u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6db8fa8bc5a3..fddcbe200573 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -179,13 +179,20 @@ void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, 
phys_addr_t start, u64 size)
        __unmap_stage2_range(mmu, start, size, true);
 }
 
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+                           phys_addr_t addr, phys_addr_t end)
+{
+       stage2_apply_range_resched(kvm_s2_mmu_to_kvm(mmu), addr, end, 
kvm_pgtable_stage2_flush);
+}
+
 static void stage2_flush_memslot(struct kvm *kvm,
                                 struct kvm_memory_slot *memslot)
 {
        phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
        phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+       struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
-       stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
+       kvm_stage2_flush_range(mmu, addr, end);
 }
 
 /**
@@ -208,6 +215,8 @@ static void stage2_flush_vm(struct kvm *kvm)
        kvm_for_each_memslot(memslot, slots)
                stage2_flush_memslot(kvm, memslot);
 
+       kvm_nested_s2_flush(kvm);
+
        spin_unlock(&kvm->mmu_lock);
        srcu_read_unlock(&kvm->srcu, idx);
 }
@@ -562,6 +571,8 @@ void stage2_unmap_vm(struct kvm *kvm)
        kvm_for_each_memslot(memslot, slots)
                stage2_unmap_memslot(kvm, memslot);
 
+       kvm_nested_s2_clear(kvm);
+
        spin_unlock(&kvm->mmu_lock);
        mmap_read_unlock(current->mm);
        srcu_read_unlock(&kvm->srcu, idx);
@@ -636,7 +647,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t 
guest_ipa,
  * @addr:      Start address of range
  * @end:       End address of range
  */
-static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, 
phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t 
end)
 {
        struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
        stage2_apply_range_resched(kvm, addr, end, 
kvm_pgtable_stage2_wrprotect);
@@ -668,7 +679,8 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int 
slot)
        end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
        spin_lock(&kvm->mmu_lock);
-       stage2_wp_range(&kvm->arch.mmu, start, end);
+       kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
+       kvm_nested_s2_wp(kvm);
        spin_unlock(&kvm->mmu_lock);
        kvm_flush_remote_tlbs(kvm);
 }
@@ -692,7 +704,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
        phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
        phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-       stage2_wp_range(&kvm->arch.mmu, start, end);
+       kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
 }
 
 /*
@@ -707,6 +719,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm 
*kvm,
                gfn_t gfn_offset, unsigned long mask)
 {
        kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+       kvm_nested_s2_wp(kvm);
 }
 
 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
@@ -1247,6 +1260,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct 
kvm_gfn_range *range)
                             (range->end - range->start) << PAGE_SHIFT,
                             range->may_block);
 
+       kvm_nested_s2_clear(kvm);
        return 0;
 }
 
@@ -1275,6 +1289,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct 
kvm_gfn_range *range)
                               PAGE_SIZE, __pfn_to_phys(pfn),
                               KVM_PGTABLE_PROT_R, NULL);
 
+       kvm_nested_s2_clear(kvm);
        return 0;
 }
 
@@ -1293,6 +1308,11 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range 
*range)
                                        range->start << PAGE_SHIFT);
        pte = __pte(kpte);
        return pte_valid(pte) && pte_young(pte);
+
+       /*
+        * TODO: Handle nested_mmu structures here using the reverse mapping in
+        * a later version of patch series.
+        */
 }
 
 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1525,6 +1545,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 
        spin_lock(&kvm->mmu_lock);
        kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+       kvm_nested_s2_clear(kvm);
        spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 2e6a97e43396..9aa4cefc954d 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -505,6 +505,45 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
        return kvm_inject_nested_sync(vcpu, esr_el2);
 }
 
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_wp(struct kvm *kvm)
+{
+       int i;
+
+       for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+               struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+               if (kvm_s2_mmu_valid(mmu))
+                       kvm_stage2_wp_range(mmu, 0, kvm_phys_size(kvm));
+       }
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_clear(struct kvm *kvm)
+{
+       int i;
+
+       for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+               struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+               if (kvm_s2_mmu_valid(mmu))
+                       kvm_unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
+       }
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_flush(struct kvm *kvm)
+{
+       int i;
+
+       for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+               struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+               if (kvm_s2_mmu_valid(mmu))
+                       kvm_stage2_flush_range(mmu, 0, kvm_phys_size(kvm));
+       }
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.29.2

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to