From: Baoquan He <b...@redhat.com>

When called during boot the memmap_init_zone() function checks if each PFN
is valid and actually belongs to the node being initialized using
early_pfn_valid() and early_pfn_in_nid().

Each such check may cost up to O(log(n)) where n is the number of memory
banks, so for large amount of memory overall time spent in early_pfn*()
becomes substantial.

Since the information is anyway present in memblock, we can iterate over
memblock memory regions in memmap_init() and only call memmap_init_zone()
for PFN ranges that are know to be valid and in the appropriate node.

Signed-off-by: Baoquan He <b...@redhat.com>
Signed-off-by: Mike Rapoport <r...@linux.ibm.com>
---
 mm/page_alloc.c | 47 ++++++++++++++++-------------------------------
 1 file changed, 16 insertions(+), 31 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7f6a3081edb8..8d112defaead 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5939,23 +5939,6 @@ overlap_memmap_init(unsigned long zone, unsigned long 
*pfn)
        return false;
 }
 
-#ifdef CONFIG_SPARSEMEM
-/* Skip PFNs that belong to non-present sections */
-static inline __meminit unsigned long next_pfn(unsigned long pfn)
-{
-       const unsigned long section_nr = pfn_to_section_nr(++pfn);
-
-       if (present_section_nr(section_nr))
-               return pfn;
-       return section_nr_to_pfn(next_present_section_nr(section_nr));
-}
-#else
-static inline __meminit unsigned long next_pfn(unsigned long pfn)
-{
-       return pfn++;
-}
-#endif
-
 /*
  * Initially all pages are reserved - free ones are freed
  * up by memblock_free_all() once the early boot process is
@@ -5990,19 +5973,7 @@ void __meminit memmap_init_zone(unsigned long size, int 
nid, unsigned long zone,
 #endif
 
        for (pfn = start_pfn; pfn < end_pfn; ) {
-               /*
-                * There can be holes in boot-time mem_map[]s handed to this
-                * function.  They do not exist on hotplugged memory.
-                */
                if (context == MEMMAP_EARLY) {
-                       if (!early_pfn_valid(pfn)) {
-                               pfn = next_pfn(pfn);
-                               continue;
-                       }
-                       if (!early_pfn_in_nid(pfn, nid)) {
-                               pfn++;
-                               continue;
-                       }
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
                        if (defer_init(nid, pfn, end_pfn))
@@ -6118,9 +6089,23 @@ static void __meminit zone_init_free_lists(struct zone 
*zone)
 }
 
 void __meminit __weak memmap_init(unsigned long size, int nid,
-                                 unsigned long zone, unsigned long start_pfn)
+                                 unsigned long zone,
+                                 unsigned long range_start_pfn)
 {
-       memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
+       unsigned long start_pfn, end_pfn;
+       unsigned long range_end_pfn = range_start_pfn + size;
+       int i;
+
+       for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+               start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+               end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+
+               if (end_pfn > start_pfn) {
+                       size = end_pfn - start_pfn;
+                       memmap_init_zone(size, nid, zone, start_pfn,
+                                        MEMMAP_EARLY, NULL);
+               }
+       }
 }
 
 static int zone_batchsize(struct zone *zone)
-- 
2.26.1


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to