If we fail to allocate vmemmap list, we don't keep track of allocated
vmemmap block buf. Hence on section deactivate we skip vmemmap block
buf free. This results in memory leak.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/mm/init_64.c | 35 ++++++++++++++++++++++++++++-------
 1 file changed, 28 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 152aa0200cef..8a198504c0e1 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -162,16 +162,16 @@ static __meminit struct vmemmap_backing * 
vmemmap_list_alloc(int node)
        return next++;
 }
 
-static __meminit void vmemmap_list_populate(unsigned long phys,
-                                           unsigned long start,
-                                           int node)
+static __meminit int vmemmap_list_populate(unsigned long phys,
+                                          unsigned long start,
+                                          int node)
 {
        struct vmemmap_backing *vmem_back;
 
        vmem_back = vmemmap_list_alloc(node);
        if (unlikely(!vmem_back)) {
-               WARN_ON(1);
-               return;
+               pr_debug("vmemap list allocation failed\n");
+               return -ENOMEM;
        }
 
        vmem_back->phys = phys;
@@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long 
phys,
        vmem_back->list = vmemmap_list;
 
        vmemmap_list = vmem_back;
+       return 0;
 }
 
 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long 
start,
@@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap 
*altmap, unsigned long star
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int 
node,
                struct vmem_altmap *altmap)
 {
+       bool altmap_alloc;
        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 
        /* Align to the page size of the linear mapping. */
@@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, 
unsigned long end, int node,
                        p = altmap_alloc_block_buf(page_size, altmap);
                        if (!p)
                                pr_debug("altmap block allocation failed, 
falling back to system memory");
+                       else
+                               altmap_alloc = true;
                }
-               if (!p)
+               if (!p) {
                        p = vmemmap_alloc_block_buf(page_size, node);
+                       altmap_alloc = false;
+               }
                if (!p)
                        return -ENOMEM;
 
-               vmemmap_list_populate(__pa(p), start, node);
+               if (vmemmap_list_populate(__pa(p), start, node)) {
+                       /*
+                        * If we don't populate vmemap list, we don't have
+                        * the ability to free the allocated vmemmap
+                        * pages in section_deactivate. Hence free them
+                        * here.
+                        */
+                       int nr_pfns = page_size >> PAGE_SHIFT;
+                       unsigned long page_order = get_order(page_size);
+
+                       if (altmap_alloc)
+                               vmem_altmap_free(altmap, nr_pfns);
+                       else
+                               free_pages((unsigned long)p, page_order);
+                       return -ENOMEM;
+               }
 
                pr_debug("      * %016lx..%016lx allocated at %p\n",
                         start, start + page_size, p);
-- 
2.26.2

Reply via email to