Re: [v4 1/5] KVM: PPC: Book3S HV: Disable page merging in H_SVM_INIT_START

2020-07-22 Thread Bharata B Rao
On Fri, Jul 17, 2020 at 01:00:23AM -0700, Ram Pai wrote:
> Page-merging of pages in memory-slots associated with a Secure VM,
> is disabled in H_SVM_PAGE_IN handler.
> 
> This operation should have been done much earlier; the moment the VM
> is initiated for secure-transition. Delaying this operation, increases
> the probability for those pages to acquire new references , making it
> impossible to migrate those pages.
> 
> Disable page-migration in H_SVM_INIT_START handling.
> 
> Signed-off-by: Ram Pai 

Reviewed-by: Bharata B Rao 

with a few observations below...

> ---
>  Documentation/powerpc/ultravisor.rst |  1 +
>  arch/powerpc/kvm/book3s_hv_uvmem.c   | 98 
> +++-
>  2 files changed, 76 insertions(+), 23 deletions(-)
> 
> diff --git a/Documentation/powerpc/ultravisor.rst 
> b/Documentation/powerpc/ultravisor.rst
> index df136c8..a1c8c37 100644
> --- a/Documentation/powerpc/ultravisor.rst
> +++ b/Documentation/powerpc/ultravisor.rst
> @@ -895,6 +895,7 @@ Return values
>  One of the following values:
>  
>   * H_SUCCESS  on success.
> +* H_STATEif the VM is not in a position to switch to secure.
>  
>  Description
>  ~~~
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
> b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index e6f76bc..0baa293 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -211,6 +211,65 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, 
> struct kvm *kvm,
>   return false;
>  }
>  
> +static int kvmppc_memslot_page_merge(struct kvm *kvm,
> + struct kvm_memory_slot *memslot, bool merge)
> +{
> + unsigned long gfn = memslot->base_gfn;
> + unsigned long end, start = gfn_to_hva(kvm, gfn);
> + int ret = 0;
> + struct vm_area_struct *vma;
> + int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
> +
> + if (kvm_is_error_hva(start))
> + return H_STATE;
> +
> + end = start + (memslot->npages << PAGE_SHIFT);
> +
> + mmap_write_lock(kvm->mm);
> + do {
> + vma = find_vma_intersection(kvm->mm, start, end);
> + if (!vma) {
> + ret = H_STATE;
> + break;
> + }
> + ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
> +   merge_flag, &vma->vm_flags);
> + if (ret) {
> + ret = H_STATE;
> + break;
> + }
> + start = vma->vm_end + 1;

This should be start = vma->vm_end I believe.

> + } while (end > vma->vm_end);
> +
> + mmap_write_unlock(kvm->mm);
> + return ret;
> +}
> +
> +static int __kvmppc_page_merge(struct kvm *kvm, bool merge)
> +{
> + struct kvm_memslots *slots;
> + struct kvm_memory_slot *memslot;
> + int ret = 0;
> +
> + slots = kvm_memslots(kvm);
> + kvm_for_each_memslot(memslot, slots) {
> + ret = kvmppc_memslot_page_merge(kvm, memslot, merge);
> + if (ret)
> + break;
> + }
> + return ret;
> +}

You walk through all the slots here to issue kvm_madvise, but...

> +
> +static inline int kvmppc_disable_page_merge(struct kvm *kvm)
> +{
> + return __kvmppc_page_merge(kvm, false);
> +}
> +
> +static inline int kvmppc_enable_page_merge(struct kvm *kvm)
> +{
> + return __kvmppc_page_merge(kvm, true);
> +}
> +
>  unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
>  {
>   struct kvm_memslots *slots;
> @@ -232,11 +291,18 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
>   return H_AUTHORITY;
>  
>   srcu_idx = srcu_read_lock(&kvm->srcu);
> +
> + /* disable page-merging for all memslot */
> + ret = kvmppc_disable_page_merge(kvm);
> + if (ret)
> + goto out;
> +
> + /* register the memslot */
>   slots = kvm_memslots(kvm);
>   kvm_for_each_memslot(memslot, slots) {

... you are walking thro' the same set of slots here anyway. I think
it makes sense to issue merge advices from here itself. That will
help you to share code with kvmppc_memslot_create() in 5/5.

All the below 3 calls are common to both the code paths, I think
they can be carved out into a separate function if you prefer.

kvmppc_uvmem_slot_init
kvmppc_memslot_page_merge
uv_register_mem_slot

Regards,
Bharata.


[v4 1/5] KVM: PPC: Book3S HV: Disable page merging in H_SVM_INIT_START

2020-07-17 Thread Ram Pai
Page-merging of pages in memory-slots associated with a Secure VM,
is disabled in H_SVM_PAGE_IN handler.

This operation should have been done much earlier; the moment the VM
is initiated for secure-transition. Delaying this operation, increases
the probability for those pages to acquire new references , making it
impossible to migrate those pages.

Disable page-migration in H_SVM_INIT_START handling.

Signed-off-by: Ram Pai 
---
 Documentation/powerpc/ultravisor.rst |  1 +
 arch/powerpc/kvm/book3s_hv_uvmem.c   | 98 +++-
 2 files changed, 76 insertions(+), 23 deletions(-)

diff --git a/Documentation/powerpc/ultravisor.rst 
b/Documentation/powerpc/ultravisor.rst
index df136c8..a1c8c37 100644
--- a/Documentation/powerpc/ultravisor.rst
+++ b/Documentation/powerpc/ultravisor.rst
@@ -895,6 +895,7 @@ Return values
 One of the following values:
 
* H_SUCCESS  on success.
+* H_STATEif the VM is not in a position to switch to secure.
 
 Description
 ~~~
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e6f76bc..0baa293 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -211,6 +211,65 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, 
struct kvm *kvm,
return false;
 }
 
+static int kvmppc_memslot_page_merge(struct kvm *kvm,
+   struct kvm_memory_slot *memslot, bool merge)
+{
+   unsigned long gfn = memslot->base_gfn;
+   unsigned long end, start = gfn_to_hva(kvm, gfn);
+   int ret = 0;
+   struct vm_area_struct *vma;
+   int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
+
+   if (kvm_is_error_hva(start))
+   return H_STATE;
+
+   end = start + (memslot->npages << PAGE_SHIFT);
+
+   mmap_write_lock(kvm->mm);
+   do {
+   vma = find_vma_intersection(kvm->mm, start, end);
+   if (!vma) {
+   ret = H_STATE;
+   break;
+   }
+   ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ merge_flag, &vma->vm_flags);
+   if (ret) {
+   ret = H_STATE;
+   break;
+   }
+   start = vma->vm_end + 1;
+   } while (end > vma->vm_end);
+
+   mmap_write_unlock(kvm->mm);
+   return ret;
+}
+
+static int __kvmppc_page_merge(struct kvm *kvm, bool merge)
+{
+   struct kvm_memslots *slots;
+   struct kvm_memory_slot *memslot;
+   int ret = 0;
+
+   slots = kvm_memslots(kvm);
+   kvm_for_each_memslot(memslot, slots) {
+   ret = kvmppc_memslot_page_merge(kvm, memslot, merge);
+   if (ret)
+   break;
+   }
+   return ret;
+}
+
+static inline int kvmppc_disable_page_merge(struct kvm *kvm)
+{
+   return __kvmppc_page_merge(kvm, false);
+}
+
+static inline int kvmppc_enable_page_merge(struct kvm *kvm)
+{
+   return __kvmppc_page_merge(kvm, true);
+}
+
 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
 {
struct kvm_memslots *slots;
@@ -232,11 +291,18 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
return H_AUTHORITY;
 
srcu_idx = srcu_read_lock(&kvm->srcu);
+
+   /* disable page-merging for all memslot */
+   ret = kvmppc_disable_page_merge(kvm);
+   if (ret)
+   goto out;
+
+   /* register the memslot */
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
if (kvmppc_uvmem_slot_init(kvm, memslot)) {
ret = H_PARAMETER;
-   goto out;
+   break;
}
ret = uv_register_mem_slot(kvm->arch.lpid,
   memslot->base_gfn << PAGE_SHIFT,
@@ -245,9 +311,12 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
if (ret < 0) {
kvmppc_uvmem_slot_free(kvm, memslot);
ret = H_PARAMETER;
-   goto out;
+   break;
}
}
+
+   if (ret)
+   kvmppc_enable_page_merge(kvm);
 out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
@@ -384,7 +453,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long 
gpa, struct kvm *kvm)
  */
 static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
   unsigned long end, unsigned long gpa, struct kvm *kvm,
-  unsigned long page_shift, bool *downgrade)
+  unsigned long page_shift)
 {
unsigned long src_pfn, dst_pfn = 0;
struct migrate_vma mig;
@@ -400,18 +469,6 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, 
unsigned long start,
mig.src = &src_pfn;
mig.dst = &dst_pfn;
 
-   /*
-* We come here