From: "Mike Rapoport (Microsoft)" <[email protected]>

There are two functions that release pages to the buddy allocator late in
the boot: free_reserved_area() and memblock_free_late().

Currently they are using different underlying functionality,
free_reserved_area() runs each page being freed via free_reserved_page()
and memblock_free_late() uses memblock_free_pages() -> __free_pages_core(),
but in the end they both boil down to a loop that frees a range page by
page.

Extract the loop frees pages from free_reserved_area() into a helper and
use that helper in memblock_free_late().

Signed-off-by: Mike Rapoport (Microsoft) <[email protected]>
---
 mm/memblock.c | 55 +++++++++++++++++++++++++++------------------------
 1 file changed, 29 insertions(+), 26 deletions(-)

diff --git a/mm/memblock.c b/mm/memblock.c
index eb086724802a..ccdf3d225626 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -893,26 +893,12 @@ int __init_memblock memblock_remove(phys_addr_t base, 
phys_addr_t size)
        return memblock_remove_range(&memblock.memory, base, size);
 }
 
-unsigned long free_reserved_area(void *start, void *end, int poison, const 
char *s)
+static unsigned long __free_reserved_area(phys_addr_t start, phys_addr_t end,
+                                         int poison)
 {
-       phys_addr_t start_pa, end_pa;
        unsigned long pages = 0, pfn;
 
-       /*
-        * end is the first address past the region and it may be beyond what
-        * __pa() or __pa_symbol() can handle.
-        * Use the address included in the range for the conversion and add
-        * back 1 afterwards.
-        */
-       if (__is_kernel((unsigned long)start)) {
-               start_pa = __pa_symbol(start);
-               end_pa = __pa_symbol(end - 1) + 1;
-       } else {
-               start_pa = __pa(start);
-               end_pa = __pa(end - 1) + 1;
-       }
-
-       for_each_valid_pfn(pfn, PFN_UP(start_pa), PFN_DOWN(end_pa)) {
+       for_each_valid_pfn(pfn, PFN_UP(start), PFN_DOWN(end)) {
                struct page *page = pfn_to_page(pfn);
                void *direct_map_addr;
 
@@ -934,7 +920,29 @@ unsigned long free_reserved_area(void *start, void *end, 
int poison, const char
                free_reserved_page(page);
                pages++;
        }
+       return pages;
+}
+
+unsigned long free_reserved_area(void *start, void *end, int poison, const 
char *s)
+{
+       phys_addr_t start_pa, end_pa;
+       unsigned long pages;
+
+       /*
+        * end is the first address past the region and it may be beyond what
+        * __pa() or __pa_symbol() can handle.
+        * Use the address included in the range for the conversion and add back
+        * 1 afterwards.
+        */
+       if (__is_kernel((unsigned long)start)) {
+               start_pa = __pa_symbol(start);
+               end_pa = __pa_symbol(end - 1) + 1;
+       } else {
+               start_pa = __pa(start);
+               end_pa = __pa(end - 1) + 1;
+       }
 
+       pages = __free_reserved_area(start_pa, end_pa, poison);
        if (pages && s)
                pr_info("Freeing %s memory: %ldK\n", s, K(pages));
 
@@ -1810,20 +1818,15 @@ void *__init __memblock_alloc_or_panic(phys_addr_t 
size, phys_addr_t align,
  */
 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
 {
-       phys_addr_t cursor, end;
+       phys_addr_t end = base + size - 1;
 
-       end = base + size - 1;
        memblock_dbg("%s: [%pa-%pa] %pS\n",
                     __func__, &base, &end, (void *)_RET_IP_);
-       kmemleak_free_part_phys(base, size);
-       cursor = PFN_UP(base);
-       end = PFN_DOWN(base + size);
 
-       for (; cursor < end; cursor++) {
-               memblock_free_pages(cursor, 0);
-               totalram_pages_inc();
-       }
+       kmemleak_free_part_phys(base, size);
+       __free_reserved_area(base, base + size, -1);
 }
+
 /*
  * Remaining API functions
  */
-- 
2.53.0


Reply via email to