Please double-check the usedbit issue below otherwise I'll ACK this.
-JX

On Jan 11, 2007, at 4:51 PM, Hollis Blanchard wrote:

# HG changeset patch
# User Hollis Blanchard <[EMAIL PROTECTED]>
# Date 1168550320 21600
# Node ID d98b2fbc100cfec5678a787ba7bfd0b065254793
# Parent  dbc74db14a4b39d359365fcf8257216d968fa269
[POWERPC][XEN] Mark heap memory based on boot_of.c's allocator.
- Explain why we have another allocator (that wasn't so hard now was it?). - Create and export boot_of_mem_avail() to allow later code to iterate over the
  allocator bitmap.
- Use boot_of_mem_avail() to place memory in the heap, instead of using globals
  and making assumptions about the ordering of reserved areas.

Signed-off-by: Hollis Blanchard <[EMAIL PROTECTED]>

diff -r dbc74db14a4b -r d98b2fbc100c xen/arch/powerpc/boot_of.c
--- a/xen/arch/powerpc/boot_of.c        Tue Dec 12 14:35:07 2006 -0600
+++ b/xen/arch/powerpc/boot_of.c        Thu Jan 11 15:18:40 2007 -0600
@@ -43,6 +43,14 @@ static int of_out;
 static int of_out;
 static ulong eomem;

+/* Track memory during early boot with a limited per-page bitmap. We need an + * allocator to tell us where we can place RTAS, our copy of the device tree. + * We could examine the "available" properties in memory nodes, but we + * apparently can't depend on firmware to update those when we call "claim". So
+ * we need to track it ourselves.
+ * We can't dynamically allocate the bitmap, because we would need something
+ * to tell us where it's safe to allocate...
+ */
 #define MEM_AVAILABLE_PAGES ((32 << 20) >> PAGE_SHIFT)
 static DECLARE_BITMAP(mem_available_pages, MEM_AVAILABLE_PAGES);

@@ -530,6 +538,33 @@ static ulong boot_of_alloc(ulong size)

         pos = pos + i;
     }
+}
+
+int boot_of_mem_avail(int pos, ulong *startpage, ulong *endpage)
If you'd like to hide the bitmap, then perhaps the first arg should be a start address and return the address of the next "used" page?

+{
+    ulong freebit;
+    ulong usedbit;
+
+    /* find first free page. */
+ freebit = find_next_zero_bit(mem_available_pages, MEM_AVAILABLE_PAGES, pos);
+    if (freebit >= MEM_AVAILABLE_PAGES) {
+ /* We know everything after MEM_AVAILABLE_PAGES is still free. */
+        *startpage = MEM_AVAILABLE_PAGES << PAGE_SHIFT;
+        *endpage = ~0UL;
+        return -1;
+    }
+    *startpage = freebit << PAGE_SHIFT;
+
+    /* now find first used page after that. */
+ usedbit = find_next_bit(mem_available_pages, MEM_AVAILABLE_PAGES, freebit);
+    if (usedbit >= MEM_AVAILABLE_PAGES) {
+ /* We know everything after MEM_AVAILABLE_PAGES is still free. */
+        *endpage = ~0UL;
+        return -1;
+    }
+    *endpage = usedbit << PAGE_SHIFT;

I'm not 100% but the code below looks like require that end represent a free page so "usedbit - 1"?

+
+    return usedbit;
 }

 static ulong boot_of_mem_init(void)
diff -r dbc74db14a4b -r d98b2fbc100c xen/arch/powerpc/memory.c
--- a/xen/arch/powerpc/memory.c Tue Dec 12 14:35:07 2006 -0600
+++ b/xen/arch/powerpc/memory.c Thu Jan 11 15:18:40 2007 -0600
@@ -42,8 +42,6 @@ unsigned long xenheap_phys_end;
 unsigned long xenheap_phys_end;
 static uint nr_pages;
 static ulong xenheap_size;
-static ulong save_start;
-static ulong save_end;

 struct membuf {
     ulong start;
@@ -51,30 +49,6 @@ struct membuf {
 };

 typedef void (*walk_mem_fn)(struct membuf *, uint);
-
-static ulong free_xenheap(ulong start, ulong end)
-{
-    start = ALIGN_UP(start, PAGE_SIZE);
-    end = ALIGN_DOWN(end, PAGE_SIZE);
-
-    DBG("%s: 0x%lx - 0x%lx\n", __func__, start, end);
-
-    /* need to do this better */
-    if (save_start <= end && save_start >= start) {
-        DBG("%s:     Go around the saved area: 0x%lx - 0x%lx\n",
-               __func__, save_start, save_end);
-        init_xenheap_pages(start, ALIGN_DOWN(save_start, PAGE_SIZE));
-        xenheap_size += ALIGN_DOWN(save_start, PAGE_SIZE) - start;
-
-        init_xenheap_pages(ALIGN_UP(save_end, PAGE_SIZE), end);
-        xenheap_size += end - ALIGN_UP(save_end, PAGE_SIZE);
-    } else {
-        init_xenheap_pages(start, end);
-        xenheap_size += end - start;
-    }
-
-    return ALIGN_UP(end, PAGE_SIZE);
-}

 static void set_max_page(struct membuf *mb, uint entries)
 {
@@ -113,6 +87,7 @@ static void heap_init(struct membuf *mb,
             start_blk = xenheap_phys_end;
         }

+        DBG("boot free: %016lx - %016lx\n", start_blk, end_blk);
         init_boot_pages(start_blk, end_blk);
         total_pages += (end_blk - start_blk) >> PAGE_SHIFT;
     }
@@ -141,72 +116,31 @@ static void ofd_walk_mem(void *m, walk_m
     }
 }

-static void setup_xenheap(module_t *mod, int mcount)
-{
-    int i;
-    ulong freemem;
-
-    freemem = ALIGN_UP((ulong)_end, PAGE_SIZE);
-
-    for (i = 0; i < mcount; i++) {
-        u32 s;
-
-        if (mod[i].mod_end == mod[i].mod_start)
-            continue;
-
-        s = ALIGN_DOWN(mod[i].mod_start, PAGE_SIZE);
-
-        if (mod[i].mod_start > (ulong)_start &&
-            mod[i].mod_start < (ulong)_end) {
-            /* mod was linked in */
-            continue;
-        }
-
-        if (s < freemem)
-            panic("module addresses must assend\n");
-
-        free_xenheap(freemem, s);
-        freemem = ALIGN_UP(mod[i].mod_end, PAGE_SIZE);
-
-    }
-
-    /* the rest of the xenheap, starting at the end of modules */
-    free_xenheap(freemem, xenheap_phys_end);
-}
-
 void memory_init(module_t *mod, int mcount)
 {
     ulong eomem;
     ulong heap_start;
+    ulong bitmap_start;
+    ulong bitmap_end;
     ulong xh_pages;
+    ulong start;
+    ulong end;
+    int pos = 0;

     /* lets find out how much memory there is and set max_page */
     max_page = 0;
     printk("Physical RAM map:\n");
     ofd_walk_mem((void *)oftree, set_max_page);
     eomem = max_page << PAGE_SHIFT;
-
-    if (eomem == 0){
+    if (eomem == 0) {
         panic("ofd_walk_mem() failed\n");
     }

-    /* find the portion of memory we need to keep safe */
-    save_start = oftree;
-    save_end = oftree_end;
-    if (rtas_base) {
-        if (save_start > rtas_base)
-            save_start = rtas_base;
-        if (save_end < rtas_end)
-            save_end = rtas_end;
-    }
-
- /* minimum heap has to reach to the end of all Xen required memory */
-    xh_pages = ALIGN_UP(save_end, PAGE_SIZE) >> PAGE_SHIFT;
-    xh_pages += opt_xenheap_megabytes << (20 - PAGE_SHIFT);
+    xh_pages = opt_xenheap_megabytes << (20 - PAGE_SHIFT);

     /* While we are allocating HTABS from The Xen Heap we need it to
      * be larger */
-    xh_pages  += nr_pages >> 5;
+    xh_pages += nr_pages >> 5;

     xenheap_phys_end = xh_pages << PAGE_SHIFT;
     printk("End of Xen Area: %luMiB (%luKiB)\n",
@@ -214,16 +148,22 @@ void memory_init(module_t *mod, int mcou

printk("End of RAM: %luMiB (%luKiB)\n", eomem >> 20, eomem >> 10);

-    /* Architecturally the first 4 pages are exception hendlers, we
-     * will also be copying down some code there */
+    /* Architecturally the first 4 pages are exception handlers. */
     heap_start = 4 << PAGE_SHIFT;
-    if (oftree < (ulong)_start)
-        heap_start = ALIGN_UP(oftree_end, PAGE_SIZE);
-

all the images below are in the bitmap, so can;t you use boot_of_mem_avail() to figure this out?

+    /* Avoid anything else in low memory. */
+    if (rtas_end < (ulong)_start)
+        heap_start = max_t(ulong, heap_start, rtas_end);
+    if (oftree_end < (ulong)_start)
+        heap_start = max_t(ulong, heap_start, oftree_end);
+
+    /* Install the boot allocator bitmap low. */
+    bitmap_start = heap_start;
     heap_start = init_boot_allocator(heap_start);
-    if (heap_start > (ulong)_start) {
+    bitmap_end = heap_start;
+    printk("boot allocator @ %lx - %lx\n", bitmap_start, bitmap_end);
+    if (bitmap_end > (ulong)_start) {
         panic("space below _start (%p) is not enough memory "
-              "for heap (0x%lx)\n", _start, heap_start);
+              "for heap (0x%lx)\n", _start, bitmap_end);
     }

     /* allow everything else to be allocated */
@@ -242,12 +182,37 @@ void memory_init(module_t *mod, int mcou

     numa_initmem_init(0, max_page);

+    /* Domain heap gets all the unclaimed memory. */
     end_boot_allocator();

-    /* Add memory between the beginning of the heap and the beginning
-     * of our text */
-    free_xenheap(heap_start, (ulong)_start);
-    setup_xenheap(mod, mcount);
+    /* Create initial xen heap. */
+    do {
+        pos = boot_of_mem_avail(pos, &start, &end);
+        if (end == ~0UL)
+            end = xenheap_phys_end;
+
+        /* Problem: the bitmap itself is not reserved. */
+        if ((start >= bitmap_start) && (start < bitmap_end)) {
+            /* Start is inside bitmap. */
+            start = bitmap_end;
+        }
+        if ((end > bitmap_start) && (end <= bitmap_end)) {
+            /* End is inside bitmap. */
+            end = bitmap_start;
+        }
+        if ((start < bitmap_start) && (end > bitmap_end)) {
+ /* Range encompasses bitmap. First free low part, then high. */
+            xenheap_size += bitmap_start - start;
+            DBG("xenheap: %016lx - %016lx\n", start, bitmap_start);
+            init_xenheap_pages(start, bitmap_start);
+            start = bitmap_end;
+        }
+
+        xenheap_size += end - start;
+        DBG("xenheap: %016lx - %016lx\n", start, end);
+        init_xenheap_pages(start, end);
+    } while (pos > 0);
+
     printk("Xen Heap: %luMiB (%luKiB)\n",
            xenheap_size >> 20, xenheap_size >> 10);

diff -r dbc74db14a4b -r d98b2fbc100c xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Tue Dec 12 14:35:07 2006 -0600
+++ b/xen/include/asm-powerpc/mm.h      Thu Jan 11 15:18:40 2007 -0600
@@ -35,6 +35,7 @@
 #define memguard_unguard_range(_p,_l)    ((void)0)

 extern unsigned long xenheap_phys_end;
+extern int boot_of_mem_avail(int pos, ulong *start, ulong *end);

 /*
  * Per-page-frame information.

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@lists.xensource.com
http://lists.xensource.com/xen-ppc-devel


_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@lists.xensource.com
http://lists.xensource.com/xen-ppc-devel

Reply via email to