From: "Mike Rapoport (Microsoft)" <[email protected]> On architectures that keep memblock after boot, freeing of reserved memory with free_reserved_area() is paired with an update of memblock arrays, usually by a call to memblock_free().
Make free_reserved_area() directly update memblock.reserved when ARCH_KEEP_MEMBLOCK is enabled. Remove the now-redundant explicit memblock_free() call from arm64::free_initmem() and the #ifdef CONFIG_ARCH_KEEP_MEMBLOCK block from the generic free_initrd_mem(). Signed-off-by: Mike Rapoport (Microsoft) <[email protected]> --- arch/arm64/mm/init.c | 3 --- init/initramfs.c | 7 ------- mm/memblock.c | 6 ++++++ 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 96711b8578fd..07b17c708702 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -385,9 +385,6 @@ void free_initmem(void) WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE)); WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE)); - /* Delete __init region from memblock.reserved. */ - memblock_free(lm_init_begin, lm_init_end - lm_init_begin); - free_reserved_area(lm_init_begin, lm_init_end, POISON_FREE_INITMEM, "unused kernel"); /* diff --git a/init/initramfs.c b/init/initramfs.c index 139baed06589..bca0922b2850 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -652,13 +652,6 @@ void __init reserve_initrd_mem(void) void __weak __init free_initrd_mem(unsigned long start, unsigned long end) { -#ifdef CONFIG_ARCH_KEEP_MEMBLOCK - unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE); - unsigned long aligned_end = ALIGN(end, PAGE_SIZE); - - memblock_free((void *)aligned_start, aligned_end - aligned_start); -#endif - free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, "initrd"); } diff --git a/mm/memblock.c b/mm/memblock.c index 87bd200a8cc9..9f372a8e82f7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -942,6 +942,12 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char end_pa = __pa(end - 1) + 1; } + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { + if (start_pa < end_pa) + memblock_remove_range(&memblock.reserved, + start_pa, end_pa - start_pa); + } + pages = __free_reserved_area(start_pa, end_pa, poison); if (pages && s) pr_info("Freeing %s memory: %ldK\n", s, K(pages)); -- 2.51.0
