Due to the incoming divergence of x86_32 and x86_64, there is requirement
to set the allowed allocating range at the early boot stage.
This patch also includes minor change to remove redundat cond check, refer
to memblock_find_in_range_node(), memblock_find_in_range() has already
protect itself from the case: start > end.

Signed-off-by: Pingfan Liu <kernelf...@gmail.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: "Rafael J. Wysocki" <r...@rjwysocki.net>
Cc: Len Brown <l...@kernel.org>
Cc: Yinghai Lu <ying...@kernel.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Chao Fan <fanc.f...@cn.fujitsu.com>
Cc: Baoquan He <b...@redhat.com>
Cc: Juergen Gross <jgr...@suse.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Mike Rapoport <r...@linux.vnet.ibm.com>
Cc: Vlastimil Babka <vba...@suse.cz>
Cc: Michal Hocko <mho...@suse.com>
Cc: x...@kernel.org
Cc: linux-a...@vger.kernel.org
Cc: linux...@kvack.org
---
 arch/x86/mm/init.c | 24 +++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ef99f38..385b9cd 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -76,6 +76,14 @@ static unsigned long min_pfn_mapped;
 
 static bool __initdata can_use_brk_pgt = true;
 
+static unsigned long min_pfn_allowed;
+static unsigned long max_pfn_allowed;
+void set_alloc_range(unsigned long low, unsigned long high)
+{
+       min_pfn_allowed = low;
+       max_pfn_allowed = high;
+}
+
 /*
  * Pages returned are already directly mapped.
  *
@@ -100,12 +108,10 @@ __ref void *alloc_low_pages(unsigned int num)
        if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
                unsigned long ret = 0;
 
-               if (min_pfn_mapped < max_pfn_mapped) {
-                       ret = memblock_find_in_range(
-                                       min_pfn_mapped << PAGE_SHIFT,
-                                       max_pfn_mapped << PAGE_SHIFT,
-                                       PAGE_SIZE * num , PAGE_SIZE);
-               }
+               ret = memblock_find_in_range(
+                       min_pfn_allowed << PAGE_SHIFT,
+                       max_pfn_allowed << PAGE_SHIFT,
+                       PAGE_SIZE * num, PAGE_SIZE);
                if (ret)
                        memblock_reserve(ret, PAGE_SIZE * num);
                else if (can_use_brk_pgt)
@@ -588,14 +594,17 @@ static void __init memory_map_top_down(unsigned long 
map_start,
                        start = map_start;
                mapped_ram_size += init_range_memory_mapping(start,
                                                        last_start);
+               set_alloc_range(min_pfn_mapped, max_pfn_mapped);
                last_start = start;
                min_pfn_mapped = last_start >> PAGE_SHIFT;
                if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
        }
 
-       if (real_end < map_end)
+       if (real_end < map_end) {
                init_range_memory_mapping(real_end, map_end);
+               set_alloc_range(min_pfn_mapped, max_pfn_mapped);
+       }
 }
 
 /**
@@ -636,6 +645,7 @@ static void __init memory_map_bottom_up(unsigned long 
map_start,
                }
 
                mapped_ram_size += init_range_memory_mapping(start, next);
+               set_alloc_range(min_pfn_mapped, max_pfn_mapped);
                start = next;
 
                if (mapped_ram_size >= step_size)
-- 
2.7.4

Reply via email to