Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=a2f3aa02576632cdb60bd3de1f4bf55e9ac65604
Commit:     a2f3aa02576632cdb60bd3de1f4bf55e9ac65604
Parent:     47a4d5be7c50b2e9b905abbe2b97dc87051c5a44
Author:     Dave Hansen <[EMAIL PROTECTED]>
AuthorDate: Wed Jan 10 23:15:30 2007 -0800
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Thu Jan 11 18:18:20 2007 -0800

    [PATCH] Fix sparsemem on Cell
    
    Fix an oops experienced on the Cell architecture when init-time functions,
    early_*(), are called at runtime.  It alters the call paths to make sure
    that the callers explicitly say whether the call is being made on behalf of
    a hotplug even, or happening at boot-time.
    
    It has been compile tested on ppc64, ia64, s390, i386 and x86_64.
    
    Acked-by: Arnd Bergmann <[EMAIL PROTECTED]>
    Signed-off-by: Dave Hansen <[EMAIL PROTECTED]>
    Cc: Yasunori Goto <[EMAIL PROTECTED]>
    Acked-by: Andy Whitcroft <[EMAIL PROTECTED]>
    Cc: Christoph Lameter <[EMAIL PROTECTED]>
    Cc: Martin Schwidefsky <[EMAIL PROTECTED]>
    Acked-by: Heiko Carstens <[EMAIL PROTECTED]>
    Cc: Benjamin Herrenschmidt <[EMAIL PROTECTED]>
    Cc: Paul Mackerras <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 arch/ia64/mm/init.c    |    5 +++--
 arch/s390/mm/vmem.c    |    3 ++-
 include/linux/mm.h     |    3 ++-
 include/linux/mmzone.h |    8 ++++++--
 mm/memory_hotplug.c    |    6 ++++--
 mm/page_alloc.c        |   25 +++++++++++++++++--------
 6 files changed, 34 insertions(+), 16 deletions(-)

diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1a3d8a2..1373fae 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -543,7 +543,8 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
 
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
-                                args->nid, args->zone, page_to_pfn(map_start));
+                                args->nid, args->zone, page_to_pfn(map_start),
+                                MEMMAP_EARLY);
        return 0;
 }
 
@@ -552,7 +553,7 @@ memmap_init (unsigned long size, int nid, unsigned long 
zone,
             unsigned long start_pfn)
 {
        if (!vmem_map)
-               memmap_init_zone(size, nid, zone, start_pfn);
+               memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
        else {
                struct page *start;
                struct memmap_init_callback_data args;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 7f2944d..cd3d93e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,7 +61,8 @@ void memmap_init(unsigned long size, int nid, unsigned long 
zone,
 
                if (map_start < map_end)
                        memmap_init_zone((unsigned long)(map_end - map_start),
-                                        nid, zone, page_to_pfn(map_start));
+                                        nid, zone, page_to_pfn(map_start),
+                                        MEMMAP_EARLY);
        }
 }
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a17b147..7691223 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -978,7 +978,8 @@ extern int early_pfn_to_nid(unsigned long pfn);
 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+                               unsigned long, enum memmap_context);
 extern void setup_per_zone_pages_min(void);
 extern void mem_init(void);
 extern void show_mem(void);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e339a73..b262f47 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -450,9 +450,13 @@ void build_all_zonelists(void);
 void wakeup_kswapd(struct zone *zone, int order);
 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
                int classzone_idx, int alloc_flags);
-
+enum memmap_context {
+       MEMMAP_EARLY,
+       MEMMAP_HOTPLUG,
+};
 extern int init_currently_empty_zone(struct zone *zone, unsigned long 
start_pfn,
-                                    unsigned long size);
+                                    unsigned long size,
+                                    enum memmap_context context);
 
 #ifdef CONFIG_HAVE_MEMORY_PRESENT
 void memory_present(int nid, unsigned long start, unsigned long end);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0c055a0..8427912 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long 
phys_start_pfn)
        zone_type = zone - pgdat->node_zones;
        if (!populated_zone(zone)) {
                int ret = 0;
-               ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
+               ret = init_currently_empty_zone(zone, phys_start_pfn,
+                                               nr_pages, MEMMAP_HOTPLUG);
                if (ret < 0)
                        return ret;
        }
-       memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
+       memmap_init_zone(nr_pages, nid, zone_type,
+                        phys_start_pfn, MEMMAP_HOTPLUG);
        return 0;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a49f96b..fc5b544 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned 
long size)
  * done. Non-atomic initialization, single-pass.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long 
zone,
-               unsigned long start_pfn)
+               unsigned long start_pfn, enum memmap_context context)
 {
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
-               if (!early_pfn_valid(pfn))
-                       continue;
-               if (!early_pfn_in_nid(pfn, nid))
-                       continue;
+               /*
+                * There can be holes in boot-time mem_map[]s
+                * handed to this function.  They do not
+                * exist on hotplugged memory.
+                */
+               if (context == MEMMAP_EARLY) {
+                       if (!early_pfn_valid(pfn))
+                               continue;
+                       if (!early_pfn_in_nid(pfn, nid))
+                               continue;
+               }
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, 
struct zone *zone,
 
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
-       memmap_init_zone((size), (nid), (zone), (start_pfn))
+       memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
 static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
 
 __meminit int init_currently_empty_zone(struct zone *zone,
                                        unsigned long zone_start_pfn,
-                                       unsigned long size)
+                                       unsigned long size,
+                                       enum memmap_context context)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        int ret;
@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct 
pglist_data *pgdat,
                if (!size)
                        continue;
 
-               ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+               ret = init_currently_empty_zone(zone, zone_start_pfn,
+                                               size, MEMMAP_EARLY);
                BUG_ON(ret);
                zone_start_pfn += size;
        }
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to