From a78e12a9ff31f2a73b87145ce7ad943a0f712708 Mon Sep 17 00:00:00 2001
From: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Date: Wed, 21 Aug 2013 15:23:08 +0800
Subject: [PATCH] mm/sparse: introduce alloc_usemap_and_memmap fix 

Pass function pointer to alloc_usemap_and_memmap() instead of true/false. 

Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
 mm/sparse.c |   54 +++++++++++++++++++++++++-----------------------------
 1 files changed, 25 insertions(+), 29 deletions(-)

diff --git a/mm/sparse.c b/mm/sparse.c
index 55e5752..06adf3c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -339,14 +339,16 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 
-static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+static void __init sparse_early_usemaps_alloc_node(void **usemap_map,
 				 unsigned long pnum_begin,
 				 unsigned long pnum_end,
 				 unsigned long usemap_count, int nodeid)
 {
 	void *usemap;
 	unsigned long pnum;
+	unsigned long **map;
 	int size = usemap_size();
+	map = (unsigned long **) usemap_map;
 
 	usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
 							  size * usemap_count);
@@ -358,9 +360,9 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 		if (!present_section_nr(pnum))
 			continue;
-		usemap_map[pnum] = usemap;
+		map[pnum] = usemap;
 		usemap += size;
-		check_usemap_section_nr(nodeid, usemap_map[pnum]);
+		check_usemap_section_nr(nodeid, map[pnum]);
 	}
 }
 
@@ -430,23 +432,16 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+static void __init sparse_early_mem_maps_alloc_node(void **map_map,
 				 unsigned long pnum_begin,
 				 unsigned long pnum_end,
 				 unsigned long map_count, int nodeid)
 {
-	sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
+	struct page **map = (struct page **)map_map;
+	sparse_mem_maps_populate_node(map, pnum_begin, pnum_end,
 					 map_count, nodeid);
 }
 #else
-
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
-				unsigned long pnum_begin,
-				unsigned long pnum_end,
-				unsigned long map_count, int nodeid)
-{
-}
-
 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
 {
 	struct page *map;
@@ -471,9 +466,10 @@ void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
 /**
  *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
  *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
- *  @use_map: true if memory allocated for pageblock flags, otherwise false
  */
-static void alloc_usemap_and_memmap(unsigned long **map, bool use_map)
+static void alloc_usemap_and_memmap(void (*alloc_func)
+				(void **, unsigned long, unsigned long,
+				unsigned long, int), void **map)
 {
 	unsigned long pnum;
 	unsigned long map_count;
@@ -504,24 +500,16 @@ static void alloc_usemap_and_memmap(unsigned long **map, bool use_map)
 			continue;
 		}
 		/* ok, we need to take cake of from pnum_begin to pnum - 1*/
-		if (use_map)
-			sparse_early_usemaps_alloc_node(map, pnum_begin, pnum,
-						 map_count, nodeid_begin);
-		else
-			sparse_early_mem_maps_alloc_node((struct page **)map,
-				pnum_begin, pnum, map_count, nodeid_begin);
+		alloc_func(map, pnum_begin, pnum,
+					map_count, nodeid_begin);
 		/* new start, update count etc*/
 		nodeid_begin = nodeid;
 		pnum_begin = pnum;
 		map_count = 1;
 	}
 	/* ok, last chunk */
-	if (use_map)
-		sparse_early_usemaps_alloc_node(map, pnum_begin,
-				NR_MEM_SECTIONS, map_count, nodeid_begin);
-	else
-		sparse_early_mem_maps_alloc_node((struct page **)map,
-			pnum_begin, NR_MEM_SECTIONS, map_count, nodeid_begin);
+	alloc_func(map, pnum_begin, NR_MEM_SECTIONS,
+					map_count, nodeid_begin);
 }
 
 /*
@@ -561,14 +553,16 @@ void __init sparse_init(void)
 	usemap_map = alloc_bootmem(size);
 	if (!usemap_map)
 		panic("can not allocate usemap_map\n");
-	alloc_usemap_and_memmap(usemap_map, true);
+	alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
+						(void **)usemap_map);
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 	size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
 	map_map = alloc_bootmem(size2);
 	if (!map_map)
 		panic("can not allocate map_map\n");
-	alloc_usemap_and_memmap((unsigned long **)map_map, false);
+	alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
+						(void **)map_map);
 #endif
 
 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
-- 
1.7.7.6

