From: "Mike Rapoport (Microsoft)" <[email protected]>

free_reserved_area() is related to memblock as it frees reserved memory
back to the buddy allocator, similar to what memblock_free_late() does.

Move free_reserved_area() to mm/memblock.c to prepare for further
consolidation of the functions that free reserved memory.

No functional changes.

Signed-off-by: Mike Rapoport (Microsoft) <[email protected]>
---
 mm/memblock.c                     | 37 ++++++++++++++++++++++++++++++-
 mm/page_alloc.c                   | 36 ------------------------------
 tools/include/linux/mm.h          |  1 +
 tools/testing/memblock/internal.h | 34 +++++++++++++++++++++++++---
 4 files changed, 68 insertions(+), 40 deletions(-)

diff --git a/mm/memblock.c b/mm/memblock.c
index d4a02f1750e9..c0896efbee97 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -893,6 +893,42 @@ int __init_memblock memblock_remove(phys_addr_t base, 
phys_addr_t size)
        return memblock_remove_range(&memblock.memory, base, size);
 }
 
+unsigned long free_reserved_area(void *start, void *end, int poison, const 
char *s)
+{
+       void *pos;
+       unsigned long pages = 0;
+
+       start = (void *)PAGE_ALIGN((unsigned long)start);
+       end = (void *)((unsigned long)end & PAGE_MASK);
+       for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
+               struct page *page = virt_to_page(pos);
+               void *direct_map_addr;
+
+               /*
+                * 'direct_map_addr' might be different from 'pos'
+                * because some architectures' virt_to_page()
+                * work with aliases.  Getting the direct map
+                * address ensures that we get a _writeable_
+                * alias for the memset().
+                */
+               direct_map_addr = page_address(page);
+               /*
+                * Perform a kasan-unchecked memset() since this memory
+                * has not been initialized.
+                */
+               direct_map_addr = kasan_reset_tag(direct_map_addr);
+               if ((unsigned int)poison <= 0xFF)
+                       memset(direct_map_addr, poison, PAGE_SIZE);
+
+               free_reserved_page(page);
+       }
+
+       if (pages && s)
+               pr_info("Freeing %s memory: %ldK\n", s, K(pages));
+
+       return pages;
+}
+
 /**
  * memblock_free - free boot memory allocation
  * @ptr: starting address of the  boot memory allocation
@@ -1776,7 +1812,6 @@ void __init memblock_free_late(phys_addr_t base, 
phys_addr_t size)
                totalram_pages_inc();
        }
 }
-
 /*
  * Remaining API functions
  */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2d4b6f1a554e..df3d61253001 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6234,42 +6234,6 @@ void adjust_managed_page_count(struct page *page, long 
count)
 }
 EXPORT_SYMBOL(adjust_managed_page_count);
 
-unsigned long free_reserved_area(void *start, void *end, int poison, const 
char *s)
-{
-       void *pos;
-       unsigned long pages = 0;
-
-       start = (void *)PAGE_ALIGN((unsigned long)start);
-       end = (void *)((unsigned long)end & PAGE_MASK);
-       for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
-               struct page *page = virt_to_page(pos);
-               void *direct_map_addr;
-
-               /*
-                * 'direct_map_addr' might be different from 'pos'
-                * because some architectures' virt_to_page()
-                * work with aliases.  Getting the direct map
-                * address ensures that we get a _writeable_
-                * alias for the memset().
-                */
-               direct_map_addr = page_address(page);
-               /*
-                * Perform a kasan-unchecked memset() since this memory
-                * has not been initialized.
-                */
-               direct_map_addr = kasan_reset_tag(direct_map_addr);
-               if ((unsigned int)poison <= 0xFF)
-                       memset(direct_map_addr, poison, PAGE_SIZE);
-
-               free_reserved_page(page);
-       }
-
-       if (pages && s)
-               pr_info("Freeing %s memory: %ldK\n", s, K(pages));
-
-       return pages;
-}
-
 void free_reserved_page(struct page *page)
 {
        clear_page_tag_ref(page);
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index 028f3faf46e7..4407d8396108 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -17,6 +17,7 @@
 
 #define __va(x) ((void *)((unsigned long)(x)))
 #define __pa(x) ((unsigned long)(x))
+#define __pa_symbol(x) ((unsigned long)(x))
 
 #define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
 
diff --git a/tools/testing/memblock/internal.h 
b/tools/testing/memblock/internal.h
index 009b97bbdd22..b72be2968104 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -11,9 +11,22 @@ static int memblock_debug = 1;
 
 #define pr_warn_ratelimited(fmt, ...)    printf(fmt, ##__VA_ARGS__)
 
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
 bool mirrored_kernelcore = false;
 
 struct page {};
+static inline void *page_address(struct page *page)
+{
+       BUG();
+       return page;
+}
+
+static inline struct page *virt_to_page(void *virt)
+{
+       BUG();
+       return virt;
+}
 
 void memblock_free_pages(unsigned long pfn, unsigned int order)
 {
@@ -23,10 +36,25 @@ static inline void accept_memory(phys_addr_t start, 
unsigned long size)
 {
 }
 
-static inline unsigned long free_reserved_area(void *start, void *end,
-                                              int poison, const char *s)
+unsigned long free_reserved_area(void *start, void *end, int poison, const 
char *s);
+void free_reserved_page(struct page *page);
+
+static inline bool deferred_pages_enabled(void)
+{
+       return false;
+}
+
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn)                     \
+       for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
+
+static inline void *kasan_reset_tag(const void *addr)
+{
+       return (void *)addr;
+}
+
+static inline bool __is_kernel(unsigned long addr)
 {
-       return 0;
+       return false;
 }
 
 #endif
-- 
2.53.0


Reply via email to