From: "Mike Rapoport (Microsoft)" <[email protected]>

With deferred initialization of struct page it will be necessary to
initialize memory map for KHO scratch regions early.

Add memmap_init_kho_scratch() method that will allow such initialization
in upcoming patches.

Signed-off-by: Mike Rapoport (Microsoft) <[email protected]>
---
 include/linux/memblock.h |  2 ++
 mm/internal.h            |  2 ++
 mm/memblock.c            | 22 ++++++++++++++++++++++
 mm/mm_init.c             | 11 ++++++++---
 4 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 14e4c6b73e2c..20887e199cdb 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -633,9 +633,11 @@ static inline void memtest_report_meminfo(struct seq_file 
*m) { }
 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
 void memblock_set_kho_scratch_only(void);
 void memblock_clear_kho_scratch_only(void);
+void memmap_init_kho_scratch_pages(void);
 #else
 static inline void memblock_set_kho_scratch_only(void) { }
 static inline void memblock_clear_kho_scratch_only(void) { }
+static inline void memmap_init_kho_scratch_pages(void) {}
 #endif
 
 #endif /* _LINUX_MEMBLOCK_H */
diff --git a/mm/internal.h b/mm/internal.h
index 109ef30fee11..986ad9c2a8b2 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1053,6 +1053,8 @@ DECLARE_STATIC_KEY_TRUE(deferred_pages);
 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
+void init_deferred_page(unsigned long pfn, int nid);
+
 enum mminit_level {
        MMINIT_WARNING,
        MMINIT_VERIFY,
diff --git a/mm/memblock.c b/mm/memblock.c
index 3d68b1fc2bd2..54bd95745381 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -945,6 +945,28 @@ __init_memblock void memblock_clear_kho_scratch_only(void)
 {
        kho_scratch_only = false;
 }
+
+void __init_memblock memmap_init_kho_scratch_pages(void)
+{
+       phys_addr_t start, end;
+       unsigned long pfn;
+       int nid;
+       u64 i;
+
+       if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
+               return;
+
+       /*
+        * Initialize struct pages for free scratch memory.
+        * The struct pages for reserved scratch memory will be set up in
+        * reserve_bootmem_region()
+        */
+       __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+                            MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
+               for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
+                       init_deferred_page(pfn, nid);
+       }
+}
 #endif
 
 /**
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c4b425125bad..04441c258b05 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -705,7 +705,7 @@ defer_init(int nid, unsigned long pfn, unsigned long 
end_pfn)
        return false;
 }
 
-static void __meminit init_deferred_page(unsigned long pfn, int nid)
+static void __meminit __init_deferred_page(unsigned long pfn, int nid)
 {
        pg_data_t *pgdat;
        int zid;
@@ -739,11 +739,16 @@ static inline bool defer_init(int nid, unsigned long pfn, 
unsigned long end_pfn)
        return false;
 }
 
-static inline void init_deferred_page(unsigned long pfn, int nid)
+static inline void __init_deferred_page(unsigned long pfn, int nid)
 {
 }
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
+void __meminit init_deferred_page(unsigned long pfn, int nid)
+{
+       __init_deferred_page(pfn, nid);
+}
+
 /*
  * Initialised pages do not have PageReserved set. This function is
  * called for each range allocated by the bootmem allocator and
@@ -760,7 +765,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start,
                if (pfn_valid(start_pfn)) {
                        struct page *page = pfn_to_page(start_pfn);
 
-                       init_deferred_page(start_pfn, nid);
+                       __init_deferred_page(start_pfn, nid);
 
                        /*
                         * no need for atomic set_bit because the struct
-- 
2.47.2


Reply via email to