Re: [PATCH v3 3/4] KVM: PPC: Book3S HV: migrate remaining normal-GFNs to secure-GFNs in H_SVM_INIT_DONE

2020-06-29 Thread Laurent Dufour

Le 28/06/2020 à 18:20, Bharata B Rao a écrit :

On Fri, Jun 19, 2020 at 03:43:41PM -0700, Ram Pai wrote:

H_SVM_INIT_DONE incorrectly assumes that the Ultravisor has explicitly


As noted in the last iteration, can you reword the above please?
I don't see it as an incorrect assumption, but see it as extension of
scope now :-)


called H_SVM_PAGE_IN for all secure pages. These GFNs continue to be
normal GFNs associated with normal PFNs; when infact, these GFNs should
have been secure GFNs, associated with device PFNs.

Move all the PFNs associated with the SVM's GFNs, to secure-PFNs, in
H_SVM_INIT_DONE. Skip the GFNs that are already Paged-in or Shared
through H_SVM_PAGE_IN, or Paged-in followed by a Paged-out through
UV_PAGE_OUT.

Cc: Paul Mackerras 
Cc: Benjamin Herrenschmidt 
Cc: Michael Ellerman 
Cc: Bharata B Rao 
Cc: Aneesh Kumar K.V 
Cc: Sukadev Bhattiprolu 
Cc: Laurent Dufour 
Cc: Thiago Jung Bauermann 
Cc: David Gibson 
Cc: Claudio Carvalho 
Cc: kvm-...@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Ram Pai 
---
  Documentation/powerpc/ultravisor.rst|   2 +
  arch/powerpc/include/asm/kvm_book3s_uvmem.h |   2 +
  arch/powerpc/kvm/book3s_hv_uvmem.c  | 154 +++-
  3 files changed, 132 insertions(+), 26 deletions(-)

diff --git a/Documentation/powerpc/ultravisor.rst 
b/Documentation/powerpc/ultravisor.rst
index 363736d..3bc8957 100644
--- a/Documentation/powerpc/ultravisor.rst
+++ b/Documentation/powerpc/ultravisor.rst
@@ -933,6 +933,8 @@ Return values
* H_UNSUPPORTED if called from the wrong context (e.g.
from an SVM or before an H_SVM_INIT_START
hypercall).
+   * H_STATE   if the hypervisor could not successfully
+transition the VM to Secure VM.
  
  Description

  ~~~
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h 
b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
index 5a9834e..b9cd7eb 100644
--- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -22,6 +22,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
  unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
  void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
 struct kvm *kvm, bool skip_page_out);
+int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
+   const struct kvm_memory_slot *memslot);
  #else
  static inline int kvmppc_uvmem_init(void)
  {
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index c8c0290..449e8a7 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -93,6 +93,7 @@
  #include 
  #include 
  #include 
+#include 
  
  static struct dev_pagemap kvmppc_uvmem_pgmap;

  static unsigned long *kvmppc_uvmem_bitmap;
@@ -339,6 +340,21 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, 
struct kvm *kvm,
return false;
  }
  
+/* return true, if the GFN is a shared-GFN, or a secure-GFN */

+bool kvmppc_gfn_has_transitioned(unsigned long gfn, struct kvm *kvm)
+{
+   struct kvmppc_uvmem_slot *p;
+
+   list_for_each_entry(p, >arch.uvmem_pfns, list) {
+   if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
+   unsigned long index = gfn - p->base_pfn;
+
+   return (p->pfns[index] & KVMPPC_GFN_FLAG_MASK);
+   }
+   }
+   return false;
+}
+
  unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
  {
struct kvm_memslots *slots;
@@ -379,12 +395,31 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
  
  unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)

  {
+   struct kvm_memslots *slots;
+   struct kvm_memory_slot *memslot;
+   int srcu_idx;
+   long ret = H_SUCCESS;
+
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
return H_UNSUPPORTED;
  
+	/* migrate any unmoved normal pfn to device pfns*/

+   srcu_idx = srcu_read_lock(>srcu);
+   slots = kvm_memslots(kvm);
+   kvm_for_each_memslot(memslot, slots) {
+   ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
+   if (ret) {
+   ret = H_STATE;
+   goto out;
+   }
+   }
+
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
pr_info("LPID %d went secure\n", kvm->arch.lpid);
-   return H_SUCCESS;
+
+out:
+   srcu_read_unlock(>srcu, srcu_idx);
+   return ret;
  }
  
  /*

@@ -505,12 +540,14 @@ static struct page *kvmppc_uvmem_get_page(unsigned long 
gpa, struct kvm *kvm)
  }
  
  /*

- * Alloc a PFN from private device memory pool and copy page from normal
- * memory to secure memory using UV_PAGE_IN uvcall.
+ * Alloc a PFN from private device memory pool. If @pagein is true,
+ * copy page from normal memory to secure memory using 

Re: [PATCH v3 3/4] KVM: PPC: Book3S HV: migrate remaining normal-GFNs to secure-GFNs in H_SVM_INIT_DONE

2020-06-28 Thread Bharata B Rao
On Fri, Jun 19, 2020 at 03:43:41PM -0700, Ram Pai wrote:
> H_SVM_INIT_DONE incorrectly assumes that the Ultravisor has explicitly

As noted in the last iteration, can you reword the above please?
I don't see it as an incorrect assumption, but see it as extension of
scope now :-)

> called H_SVM_PAGE_IN for all secure pages. These GFNs continue to be
> normal GFNs associated with normal PFNs; when infact, these GFNs should
> have been secure GFNs, associated with device PFNs.
> 
> Move all the PFNs associated with the SVM's GFNs, to secure-PFNs, in
> H_SVM_INIT_DONE. Skip the GFNs that are already Paged-in or Shared
> through H_SVM_PAGE_IN, or Paged-in followed by a Paged-out through
> UV_PAGE_OUT.
> 
> Cc: Paul Mackerras 
> Cc: Benjamin Herrenschmidt 
> Cc: Michael Ellerman 
> Cc: Bharata B Rao 
> Cc: Aneesh Kumar K.V 
> Cc: Sukadev Bhattiprolu 
> Cc: Laurent Dufour 
> Cc: Thiago Jung Bauermann 
> Cc: David Gibson 
> Cc: Claudio Carvalho 
> Cc: kvm-...@vger.kernel.org
> Cc: linuxppc-dev@lists.ozlabs.org
> Signed-off-by: Ram Pai 
> ---
>  Documentation/powerpc/ultravisor.rst|   2 +
>  arch/powerpc/include/asm/kvm_book3s_uvmem.h |   2 +
>  arch/powerpc/kvm/book3s_hv_uvmem.c  | 154 
> +++-
>  3 files changed, 132 insertions(+), 26 deletions(-)
> 
> diff --git a/Documentation/powerpc/ultravisor.rst 
> b/Documentation/powerpc/ultravisor.rst
> index 363736d..3bc8957 100644
> --- a/Documentation/powerpc/ultravisor.rst
> +++ b/Documentation/powerpc/ultravisor.rst
> @@ -933,6 +933,8 @@ Return values
>   * H_UNSUPPORTED if called from the wrong context (e.g.
>   from an SVM or before an H_SVM_INIT_START
>   hypercall).
> + * H_STATE   if the hypervisor could not successfully
> +transition the VM to Secure VM.
>  
>  Description
>  ~~~
> diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h 
> b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
> index 5a9834e..b9cd7eb 100644
> --- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
> +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
> @@ -22,6 +22,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
>  unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
>  void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
>struct kvm *kvm, bool skip_page_out);
> +int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
> + const struct kvm_memory_slot *memslot);
>  #else
>  static inline int kvmppc_uvmem_init(void)
>  {
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
> b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index c8c0290..449e8a7 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -93,6 +93,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  static struct dev_pagemap kvmppc_uvmem_pgmap;
>  static unsigned long *kvmppc_uvmem_bitmap;
> @@ -339,6 +340,21 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, 
> struct kvm *kvm,
>   return false;
>  }
>  
> +/* return true, if the GFN is a shared-GFN, or a secure-GFN */
> +bool kvmppc_gfn_has_transitioned(unsigned long gfn, struct kvm *kvm)
> +{
> + struct kvmppc_uvmem_slot *p;
> +
> + list_for_each_entry(p, >arch.uvmem_pfns, list) {
> + if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
> + unsigned long index = gfn - p->base_pfn;
> +
> + return (p->pfns[index] & KVMPPC_GFN_FLAG_MASK);
> + }
> + }
> + return false;
> +}
> +
>  unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
>  {
>   struct kvm_memslots *slots;
> @@ -379,12 +395,31 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
>  
>  unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
>  {
> + struct kvm_memslots *slots;
> + struct kvm_memory_slot *memslot;
> + int srcu_idx;
> + long ret = H_SUCCESS;
> +
>   if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
>   return H_UNSUPPORTED;
>  
> + /* migrate any unmoved normal pfn to device pfns*/
> + srcu_idx = srcu_read_lock(>srcu);
> + slots = kvm_memslots(kvm);
> + kvm_for_each_memslot(memslot, slots) {
> + ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
> + if (ret) {
> + ret = H_STATE;
> + goto out;
> + }
> + }
> +
>   kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
>   pr_info("LPID %d went secure\n", kvm->arch.lpid);
> - return H_SUCCESS;
> +
> +out:
> + srcu_read_unlock(>srcu, srcu_idx);
> + return ret;
>  }
>  
>  /*
> @@ -505,12 +540,14 @@ static struct page *kvmppc_uvmem_get_page(unsigned long 
> gpa, struct kvm *kvm)
>  }
>  
>  /*
> - * Alloc a PFN from private device memory pool and copy page from normal
> - * memory to secure memory using UV_PAGE_IN uvcall.
> + * Alloc a PFN