- After the guest becomes secure, when we handle a page fault of a page
  belonging to SVM in HV, send that page to UV via UV_PAGE_IN.
- Whenever a page is unmapped on the HV side, inform UV via UV_PAGE_INVAL.
- Ensure all those routines that walk the secondary page tables of
  the guest don't do so in case of secure VM. For secure guest, the
  active secondary page tables are in secure memory and the secondary
  page tables in HV are freed when guest becomes secure.

Signed-off-by: Bharata B Rao <bhar...@linux.ibm.com>
---
 arch/powerpc/include/asm/kvm_host.h       | 12 ++++++++++++
 arch/powerpc/include/asm/ultravisor-api.h |  1 +
 arch/powerpc/include/asm/ultravisor.h     |  5 +++++
 arch/powerpc/kvm/book3s_64_mmu_radix.c    | 22 ++++++++++++++++++++++
 arch/powerpc/kvm/book3s_hv_uvmem.c        | 20 ++++++++++++++++++++
 5 files changed, 60 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index cab3099db8d4..17780c82c1b4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -876,6 +876,8 @@ static inline void kvm_arch_vcpu_block_finish(struct 
kvm_vcpu *vcpu) {}
 #ifdef CONFIG_PPC_UV
 int kvmppc_uvmem_init(void);
 void kvmppc_uvmem_free(void);
+bool kvmppc_is_guest_secure(struct kvm *kvm);
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa);
 #else
 static inline int kvmppc_uvmem_init(void)
 {
@@ -883,6 +885,16 @@ static inline int kvmppc_uvmem_init(void)
 }
 
 static inline void kvmppc_uvmem_free(void) {}
+
+static inline bool kvmppc_is_guest_secure(struct kvm *kvm)
+{
+       return false;
+}
+
+static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa)
+{
+       return -EFAULT;
+}
 #endif /* CONFIG_PPC_UV */
 
 #endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/ultravisor-api.h 
b/arch/powerpc/include/asm/ultravisor-api.h
index 46b1ee381695..cf200d4ce703 100644
--- a/arch/powerpc/include/asm/ultravisor-api.h
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -29,5 +29,6 @@
 #define UV_UNREGISTER_MEM_SLOT         0xF124
 #define UV_PAGE_IN                     0xF128
 #define UV_PAGE_OUT                    0xF12C
+#define UV_PAGE_INVAL                  0xF138
 
 #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/include/asm/ultravisor.h 
b/arch/powerpc/include/asm/ultravisor.h
index 719c0c3930b9..b333241bbe4c 100644
--- a/arch/powerpc/include/asm/ultravisor.h
+++ b/arch/powerpc/include/asm/ultravisor.h
@@ -57,4 +57,9 @@ static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
        return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid);
 }
 
+static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
+{
+       return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
+}
+
 #endif /* _ASM_POWERPC_ULTRAVISOR_H */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 2d415c36a61d..93ad34e63045 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -19,6 +19,8 @@
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/pte-walk.h>
+#include <asm/ultravisor.h>
+#include <asm/kvm_host.h>
 
 /*
  * Supported radix tree geometry.
@@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
        if (!(dsisr & DSISR_PRTABLE_FAULT))
                gpa |= ea & 0xfff;
 
+       if (kvmppc_is_guest_secure(kvm))
+               return kvmppc_send_page_to_uv(kvm, gpa & PAGE_MASK);
+
        /* Get the corresponding memslot */
        memslot = gfn_to_memslot(kvm, gfn);
 
@@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct 
kvm_memory_slot *memslot,
        unsigned long gpa = gfn << PAGE_SHIFT;
        unsigned int shift;
 
+       if (kvmppc_is_guest_secure(kvm)) {
+               uv_page_inval(kvm->arch.lpid, gpa, PAGE_SIZE);
+               return 0;
+       }
+
        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
        if (ptep && pte_present(*ptep))
                kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
@@ -989,6 +999,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot 
*memslot,
        int ref = 0;
        unsigned long old, *rmapp;
 
+       if (kvmppc_is_guest_secure(kvm))
+               return ref;
+
        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
        if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
                old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
@@ -1013,6 +1026,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct 
kvm_memory_slot *memslot,
        unsigned int shift;
        int ref = 0;
 
+       if (kvmppc_is_guest_secure(kvm))
+               return ref;
+
        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
        if (ptep && pte_present(*ptep) && pte_young(*ptep))
                ref = 1;
@@ -1030,6 +1046,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
        int ret = 0;
        unsigned long old, *rmapp;
 
+       if (kvmppc_is_guest_secure(kvm))
+               return ret;
+
        ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
        if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
                ret = 1;
@@ -1082,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
        unsigned long gpa;
        unsigned int shift;
 
+       if (kvmppc_is_guest_secure(kvm))
+               return;
+
        gpa = memslot->base_gfn << PAGE_SHIFT;
        spin_lock(&kvm->mmu_lock);
        for (n = memslot->npages; n; --n) {
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 68a00df1ed79..6e1da68bbdd5 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -49,6 +49,11 @@ struct kvmppc_uvmem_page_pvt {
        bool skip_page_out;
 };
 
+bool kvmppc_is_guest_secure(struct kvm *kvm)
+{
+       return !!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE);
+}
+
 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
@@ -443,6 +448,21 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
        return ret;
 }
 
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa)
+{
+       unsigned long pfn;
+       int ret;
+
+       pfn = gfn_to_pfn(kvm, gpa >> PAGE_SHIFT);
+       if (is_error_noslot_pfn(pfn))
+               return -EFAULT;
+
+       ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gpa, 0, PAGE_SHIFT);
+       kvm_release_pfn_clean(pfn);
+
+       return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
+}
+
 static u64 kvmppc_get_secmem_size(void)
 {
        struct device_node *np;
-- 
2.21.0

Reply via email to