# HG changeset patch
# User Jimi Xenidis <[EMAIL PROTECTED]>
# Node ID 2329c5315dd1edfe4b1d557f0249fffa049fb305
# Parent  a88a9894a4bd51e461a200565b27e4bfd6c6b34d
[POWERPC] memory cleanup

The following changes have been made:
  - htab is allocated by "order" using RMA size, soon it will be
    assigned memory.
  - keep track of RMA in terms of its allocated page struct and order
  - free htab and RMA memory in arch_domain_destroy()
  - get rid of PTEG FULL message, everyone thinks its a bug

Signed-off-by: Jimi Xenidis <[EMAIL PROTECTED]>
---
 xen/arch/powerpc/domain.c           |   33 +++++++++++++++++++--------------
 xen/arch/powerpc/domain_build.c     |    6 +++---
 xen/arch/powerpc/htab.c             |    5 +++--
 xen/arch/powerpc/mm.c               |    4 ++--
 xen/arch/powerpc/ofd_fixup.c        |    8 ++++----
 xen/arch/powerpc/papr/xlate.c       |    2 ++
 xen/arch/powerpc/powerpc64/ppc970.c |    4 ++--
 xen/include/asm-powerpc/domain.h    |   13 ++++++++-----
 xen/include/asm-powerpc/htab.h      |    5 +----
 9 files changed, 44 insertions(+), 36 deletions(-)

diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/domain.c Sun Aug 13 19:19:37 2006 -0400
@@ -73,10 +73,9 @@ unsigned long hypercall_create_continuat
 
 int arch_domain_create(struct domain *d)
 {
-    struct page_info *rma;
     unsigned long rma_base;
     unsigned long rma_size;
-    unsigned int rma_order;
+    uint htab_order;
 
     if (d->domain_id == IDLE_DOMAIN_ID) {
         d->shared_info = (void *)alloc_xenheap_page();
@@ -85,38 +84,44 @@ int arch_domain_create(struct domain *d)
         return 0;
     }
 
-    rma_order = cpu_rma_order();
-    rma_size = 1UL << rma_order << PAGE_SHIFT;
+    d->arch.rma_order = cpu_rma_order();
+    rma_size = 1UL << d->arch.rma_order << PAGE_SHIFT;
 
     /* allocate the real mode area */
-    d->max_pages = 1UL << rma_order;
-    rma = alloc_domheap_pages(d, rma_order, 0);
-    if (NULL == rma)
+    d->max_pages = 1UL << d->arch.rma_order;
+    d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
+    if (NULL == d->arch.rma_page)
         return 1;
-    rma_base = page_to_maddr(rma);
+    rma_base = page_to_maddr(d->arch.rma_page);
 
     BUG_ON(rma_base & (rma_size-1)); /* check alignment */
-
-    d->arch.rma_base = rma_base;
-    d->arch.rma_size = rma_size;
 
     printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_size);
     memset((void *)rma_base, 0, rma_size);
 
-    htab_alloc(d, LOG_DEFAULT_HTAB_BYTES);
-
     d->shared_info = (shared_info_t *)
         (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
 
     d->arch.large_page_sizes = 1;
     d->arch.large_page_shift[0] = 24; /* 16 M for 970s */
 
+    /* FIXME: we need to the the maximum addressible memory for this
+     * domain to calculate this correctly. It should probably be set
+     * by the managment tools */
+    htab_order = d->arch.rma_order - 6; /* (1/64) */
+    if (test_bit(_DOMF_privileged, &d->domain_flags)) {
+        /* bump the htab size of privleged domains */
+        ++htab_order;
+    }
+    htab_alloc(d, htab_order);
+
     return 0;
 }
 
 void arch_domain_destroy(struct domain *d)
 {
-    unimplemented();
+    free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
+    htab_free(d);
 }
 
 void machine_halt(void)
diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c   Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/domain_build.c   Sun Aug 13 19:19:37 2006 -0400
@@ -105,8 +105,8 @@ int construct_dom0(struct domain *d,
     struct domain_setup_info dsi;
     ulong dst;
     u64 *ofh_tree;
-    ulong rma_sz = d->arch.rma_size;
-    ulong rma = d->arch.rma_base;
+    ulong rma_sz = rma_size(d->arch.rma_order);
+    ulong rma = page_to_maddr(d->arch.rma_page);
     start_info_t *si;
     ulong eomem;
     int am64 = 1;
@@ -145,7 +145,7 @@ int construct_dom0(struct domain *d,
 
     /* By default DOM0 is allocated all available memory. */
     d->max_pages = ~0U;
-    d->tot_pages = (d->arch.rma_size >> PAGE_SHIFT);
+    d->tot_pages = 1UL << d->arch.rma_order;
 
     ASSERT( image_len < rma_sz );
 
diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/htab.c
--- a/xen/arch/powerpc/htab.c   Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/htab.c   Sun Aug 13 19:19:37 2006 -0400
@@ -34,13 +34,14 @@ static ulong htab_calc_sdr1(ulong htab_a
     return (htab_addr | (sdr1_htabsize & SDR1_HTABSIZE_MASK));
 }
 
-void htab_alloc(struct domain *d, int log_htab_bytes)
+void htab_alloc(struct domain *d, uint order)
 {
     ulong htab_raddr;
+    ulong log_htab_bytes = order + PAGE_SHIFT;
     ulong htab_bytes = 1UL << log_htab_bytes;
 
     /* XXX use alloc_domheap_pages instead? */
-    htab_raddr = (ulong)alloc_xenheap_pages(log_htab_bytes - PAGE_SHIFT);
+    htab_raddr = (ulong)alloc_xenheap_pages(order);
     ASSERT(htab_raddr != 0);
     /* XXX check alignment guarantees */
     ASSERT((htab_raddr & (htab_bytes-1)) == 0);
diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/mm.c     Sun Aug 13 19:19:37 2006 -0400
@@ -109,8 +109,8 @@ extern void copy_page(void *dp, void *sp
 
 ulong pfn2mfn(struct domain *d, long pfn, int *type)
 {
-    ulong rma_base_mfn = d->arch.rma_base >> PAGE_SHIFT;
-    ulong rma_size_mfn = d->arch.rma_size >> PAGE_SHIFT;
+    ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
+    ulong rma_size_mfn = 1UL << d->arch.rma_order;
     ulong mfn;
     int t;
 
diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/ofd_fixup.c
--- a/xen/arch/powerpc/ofd_fixup.c      Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/ofd_fixup.c      Sun Aug 13 19:19:37 2006 -0400
@@ -359,8 +359,8 @@ static ofdn_t ofd_memory_props(void *m, 
     ofdn_t n = -1;
     ulong start = 0;
     static char name[] = "memory";
-    ulong mem_size = d->arch.rma_size;
-    ulong chunk_size = d->arch.rma_size;
+    ulong mem_size = rma_size(d->arch.rma_order);
+    ulong chunk_size = rma_size(d->arch.rma_order);
 
     /* Remove all old memory props */
     do {
@@ -424,12 +424,12 @@ static ofdn_t ofd_xen_props(void *m, str
         ASSERT(xl < sizeof (xen));
         ofd_prop_add(m, n, "version", xen, xl + 1);
 
-        val[0] = (ulong)si - d->arch.rma_base;
+        val[0] = (ulong)si - page_to_maddr(d->arch.rma_page);
         val[1] = PAGE_SIZE;
         ofd_prop_add(m, n, "start-info", val, sizeof (val));
 
         val[1] =  RMA_LAST_DOM0 * PAGE_SIZE;
-        val[0] =  d->arch.rma_size - val[1];
+        val[0] =  rma_size(d->arch.rma_order) - val[1];
         ofd_prop_add(m, n, "reserved", val, sizeof (val));
 
         n = ofd_node_add(m, n, console, sizeof (console));
diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/papr/xlate.c     Sun Aug 13 19:19:37 2006 -0400
@@ -258,8 +258,10 @@ static void h_enter(struct cpu_user_regs
         }
     }
 
+#ifdef DEBUG
     /* If the PTEG is full then no additional values are returned. */
     printk("%s: PTEG FULL\n", __func__);
+#endif
 
     regs->gprs[3] = H_PTEG_Full;
 }
diff -r a88a9894a4bd -r 2329c5315dd1 xen/arch/powerpc/powerpc64/ppc970.c
--- a/xen/arch/powerpc/powerpc64/ppc970.c       Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/arch/powerpc/powerpc64/ppc970.c       Sun Aug 13 19:19:37 2006 -0400
@@ -114,8 +114,8 @@ void cpu_init_vcpu(struct vcpu *v)
 {
     struct domain *d = v->domain;
     union hid4 hid4;
-    ulong rma_base = d->arch.rma_base;
-    ulong rma_size = d->arch.rma_size;
+    ulong rma_base = page_to_maddr(d->arch.rma_page);
+    ulong rma_size = rma_size(d->arch.rma_order);
 
     hid4.word = mfhid4();
 
diff -r a88a9894a4bd -r 2329c5315dd1 xen/include/asm-powerpc/domain.h
--- a/xen/include/asm-powerpc/domain.h  Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/include/asm-powerpc/domain.h  Sun Aug 13 19:19:37 2006 -0400
@@ -32,10 +32,11 @@
 
 struct arch_domain {
     struct domain_htab htab;
-    /* The RMO area is fixed to the domain and is accessible while the
+
+    /* The Real Mode area is fixed to the domain and is accessible while the
      * processor is in real mode */
-    ulong rma_base;
-    ulong rma_size;
+    struct page_info *rma_page;
+    uint rma_order;
 
     /* This is regular memory, only available thru translataion */
     ulong logical_base_pfn;
@@ -106,9 +107,11 @@ extern void load_float(struct vcpu *);
 #define RMA_CONSOLE 3
 #define RMA_LAST_DOMU 3
 
-static inline ulong rma_addr(struct arch_domain *d, int type)
+#define rma_size(rma_order) (1UL << (rma_order) << PAGE_SHIFT)
+
+static inline ulong rma_addr(struct arch_domain *ad, int type)
 {
-    return d->rma_size - (type * PAGE_SIZE);
+    return rma_size(ad->rma_order) - (type * PAGE_SIZE);
 }
 
 #endif
diff -r a88a9894a4bd -r 2329c5315dd1 xen/include/asm-powerpc/htab.h
--- a/xen/include/asm-powerpc/htab.h    Sun Aug 13 14:43:50 2006 -0400
+++ b/xen/include/asm-powerpc/htab.h    Sun Aug 13 19:19:37 2006 -0400
@@ -25,9 +25,6 @@
 #include <xen/types.h>
 
 /***** general PowerPC architecture limits ******/
-
-#define LOG_DEFAULT_HTAB_BYTES  20
-#define DEFAULT_HTAB_BYTES      (1UL << LOG_HTAB_BYTES)
 
 /* 256KB, from PowerPC Architecture specification */
 #define HTAB_MIN_LOG_SIZE 18
@@ -137,6 +134,6 @@ struct domain_htab {
 };
 
 struct domain;
-extern void htab_alloc(struct domain *d, int log_htab_bytes);
+extern void htab_alloc(struct domain *d, uint order);
 extern void htab_free(struct domain *d);
 #endif

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@lists.xensource.com
http://lists.xensource.com/xen-ppc-devel

Reply via email to