Re: [RFC PATCH 3/3] PPC, KVM, CMA: use general CMA reserved area management framework

2014-06-05 Thread Aneesh Kumar K.V
Paolo Bonzini pbonz...@redhat.com writes:

 Il 03/06/2014 09:02, Michal Nazarewicz ha scritto:
 On Tue, Jun 03 2014, Joonsoo Kim wrote:
 Now, we have general CMA reserved area management framework,
 so use it for future maintainabilty. There is no functional change.

 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

 Acked-by: Michal Nazarewicz min...@mina86.com


 Acked-by: Paolo Bonzini pbonz...@redhat.com

 Aneesh, can you test this series?

Sorry for the late reply. I will test this and update here.

-aneesh

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [RFC PATCH 3/3] PPC, KVM, CMA: use general CMA reserved area management framework

2014-06-03 Thread Michal Nazarewicz
On Tue, Jun 03 2014, Joonsoo Kim wrote:
 Now, we have general CMA reserved area management framework,
 so use it for future maintainabilty. There is no functional change.

 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

Acked-by: Michal Nazarewicz min...@mina86.com

-- 
Best regards, _ _
.o. | Liege of Serenely Enlightened Majesty of  o' \,=./ `o
..o | Computer Science,  Michał “mina86” Nazarewicz(o o)
ooo +--m...@google.com--xmpp:min...@jabber.org--ooO--(_)--Ooo--
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [RFC PATCH 3/3] PPC, KVM, CMA: use general CMA reserved area management framework

2014-06-03 Thread Paolo Bonzini

Il 03/06/2014 09:02, Michal Nazarewicz ha scritto:

On Tue, Jun 03 2014, Joonsoo Kim wrote:

Now, we have general CMA reserved area management framework,
so use it for future maintainabilty. There is no functional change.

Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com


Acked-by: Michal Nazarewicz min...@mina86.com



Acked-by: Paolo Bonzini pbonz...@redhat.com

Aneesh, can you test this series?

Paolo
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[RFC PATCH 3/3] PPC, KVM, CMA: use general CMA reserved area management framework

2014-06-02 Thread Joonsoo Kim
Now, we have general CMA reserved area management framework,
so use it for future maintainabilty. There is no functional change.

Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c 
b/arch/powerpc/kvm/book3s_hv_builtin.c
index 8cd0dae..43c3f81 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -15,12 +15,14 @@
 #include linux/init.h
 #include linux/memblock.h
 #include linux/sizes.h
+#include linux/cma.h
 
 #include asm/cputable.h
 #include asm/kvm_ppc.h
 #include asm/kvm_book3s.h
 
-#include book3s_hv_cma.h
+#define KVM_CMA_CHUNK_ORDER18
+
 /*
  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
  * should be power of 2.
@@ -42,6 +44,8 @@ static unsigned long kvm_cma_resv_ratio = 5;
 unsigned long kvm_rma_pages = (1  27)  PAGE_SHIFT; /* 128MB */
 EXPORT_SYMBOL_GPL(kvm_rma_pages);
 
+static struct cma *kvm_cma;
+
 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
 static inline int lpcr_rmls(unsigned long rma_size)
@@ -96,7 +100,7 @@ struct kvm_rma_info *kvm_alloc_rma()
ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
if (!ri)
return NULL;
-   page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
+   page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
if (!page)
goto err_out;
atomic_set(ri-use_count, 1);
@@ -111,7 +115,7 @@ EXPORT_SYMBOL_GPL(kvm_alloc_rma);
 void kvm_release_rma(struct kvm_rma_info *ri)
 {
if (atomic_dec_and_test(ri-use_count)) {
-   kvm_release_cma(pfn_to_page(ri-base_pfn), kvm_rma_pages);
+   cma_release(kvm_cma, pfn_to_page(ri-base_pfn), kvm_rma_pages);
kfree(ri);
}
 }
@@ -133,13 +137,13 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
/* Old CPUs require HPT aligned on a multiple of its size */
if (!cpu_has_feature(CPU_FTR_ARCH_206))
align_pages = nr_pages;
-   return kvm_alloc_cma(nr_pages, align_pages);
+   return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
 
 void kvm_release_hpt(struct page *page, unsigned long nr_pages)
 {
-   kvm_release_cma(page, nr_pages);
+   cma_release(kvm_cma, page, nr_pages);
 }
 EXPORT_SYMBOL_GPL(kvm_release_hpt);
 
@@ -178,6 +182,7 @@ void __init kvm_cma_reserve(void)
align_size = HPT_ALIGN_PAGES  PAGE_SHIFT;
 
align_size = max(kvm_rma_pages  PAGE_SHIFT, align_size);
-   kvm_cma_declare_contiguous(selected_size, align_size);
+   cma_declare_contiguous(selected_size, 0, 0, align_size,
+   KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, kvm_cma);
}
 }
diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c
deleted file mode 100644
index d9d3d85..000
--- a/arch/powerpc/kvm/book3s_hv_cma.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Contiguous Memory Allocator for ppc KVM hash pagetable  based on CMA
- * for DMA mapping framework
- *
- * Copyright IBM Corporation, 2013
- * Author Aneesh Kumar K.V aneesh.ku...@linux.vnet.ibm.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- *
- */
-#define pr_fmt(fmt) kvm_cma:  fmt
-
-#ifdef CONFIG_CMA_DEBUG
-#ifndef DEBUG
-#  define DEBUG
-#endif
-#endif
-
-#include linux/memblock.h
-#include linux/mutex.h
-#include linux/sizes.h
-#include linux/slab.h
-
-#include book3s_hv_cma.h
-
-struct kvm_cma {
-   unsigned long   base_pfn;
-   unsigned long   count;
-   unsigned long   *bitmap;
-};
-
-static DEFINE_MUTEX(kvm_cma_mutex);
-static struct kvm_cma kvm_cma_area;
-
-/**
- * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
- *   for kvm hash pagetable
- * @size:  Size of the reserved memory.
- * @alignment:  Alignment for the contiguous memory area
- *
- * This function reserves memory for kvm cma area. It should be
- * called by arch code when early allocator (memblock or bootmem)
- * is still activate.
- */
-long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
-{
-   long base_pfn;
-   phys_addr_t addr;
-   struct kvm_cma *cma = kvm_cma_area;
-
-   pr_debug(%s(size %lx)\n, __func__, (unsigned long)size);
-
-   if (!size)
-   return -EINVAL;
-   /*
-* Sanitise input arguments.
-* We should be pageblock aligned for CMA.
-*/
-   alignment = max(alignment, (phys_addr_t)(PAGE_SIZE  pageblock_order));
-   size = ALIGN(size, alignment);
-   /*
-* Reserve memory
-* Use __memblock_alloc_base() since
-*