From: "Mike Rapoport (Microsoft)" <[email protected]> Move calculations of zone limits to a dedicated arch_zone_limits_init() function.
Later MM core will use this function as an architecture specific callback during nodes and zones initialization and thus there won't be a need to call free_area_init() from every architecture. Signed-off-by: Mike Rapoport (Microsoft) <[email protected]> --- arch/alpha/mm/init.c | 15 ++++++++++----- include/linux/mm.h | 1 + 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 4c5ab9cd8a0a..cd0cb1abde5f 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -208,12 +208,8 @@ callback_init(void * kernel_end) return kernel_end; } -/* - * paging_init() sets up the memory map. - */ -void __init paging_init(void) +void __init arch_zone_limits_init(unsigned long *max_zone_pfn) { - unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; unsigned long dma_pfn; dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; @@ -221,8 +217,17 @@ void __init paging_init(void) max_zone_pfn[ZONE_DMA] = dma_pfn; max_zone_pfn[ZONE_NORMAL] = max_pfn; +} + +/* + * paging_init() sets up the memory map. + */ +void __init paging_init(void) +{ + unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; /* Initialize mem_map[]. */ + arch_zone_limits_init(max_zone_pfn); free_area_init(max_zone_pfn); /* Initialize the kernel's ZERO_PGE. */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 15076261d0c2..628c0e0ac313 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3552,6 +3552,7 @@ static inline unsigned long get_num_physpages(void) * free_area_init(max_zone_pfns); */ void free_area_init(unsigned long *max_zone_pfn); +void arch_zone_limits_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); -- 2.51.0
