Annotate vital static information into kmemdump:
 - mem_section

Information on these variables is stored into dedicated kmemdump section.

Register dynamic information into kmemdump:
 - section
 - mem_section_usage

This information is being allocated for each node, so call
kmemdump_alloc_size that will allocate an unique kmemdump uid, and
register the address.

Signed-off-by: Eugen Hristev <[email protected]>
---
 mm/sparse.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/mm/sparse.c b/mm/sparse.c
index 3c012cf83cc2..04b1b679a2ad 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -15,6 +15,7 @@
 #include <linux/swapops.h>
 #include <linux/bootmem_info.h>
 #include <linux/vmstat.h>
+#include <linux/kmemdump.h>
 #include "internal.h"
 #include <asm/dma.h>
 
@@ -30,6 +31,7 @@ struct mem_section 
mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
        ____cacheline_internodealigned_in_smp;
 #endif
 EXPORT_SYMBOL(mem_section);
+KMEMDUMP_VAR_CORE(mem_section, sizeof(mem_section));
 
 #ifdef NODE_NOT_IN_PAGE_FLAGS
 /*
@@ -67,10 +69,11 @@ static noinline struct mem_section __ref 
*sparse_index_alloc(int nid)
                                   sizeof(struct mem_section);
 
        if (slab_is_available()) {
-               section = kzalloc_node(array_size, GFP_KERNEL, nid);
+               section = kmemdump_alloc_size(array_size, kzalloc_node,
+                                             array_size, GFP_KERNEL, nid);
        } else {
-               section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
-                                             nid);
+               section = kmemdump_alloc_size(array_size, memblock_alloc_node,
+                                             array_size, SMP_CACHE_BYTES, nid);
                if (!section)
                        panic("%s: Failed to allocate %lu bytes nid=%d\n",
                              __func__, array_size, nid);
@@ -252,7 +255,9 @@ static void __init memblocks_present(void)
 
                size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
                align = 1 << (INTERNODE_CACHE_SHIFT);
-               mem_section = memblock_alloc_or_panic(size, align);
+               mem_section = 
kmemdump_alloc_id_size(KMEMDUMP_ID_COREIMAGE_MEMSECT,
+                                                    size, 
memblock_alloc_or_panic,
+                                                    size, align);
        }
 #endif
 
@@ -338,7 +343,8 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data 
*pgdat,
        limit = goal + (1UL << PA_SECTION_SHIFT);
        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 again:
-       usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
+       usage = kmemdump_alloc_size(size, memblock_alloc_try_nid, size,
+                                   SMP_CACHE_BYTES, goal, limit, nid);
        if (!usage && limit) {
                limit = MEMBLOCK_ALLOC_ACCESSIBLE;
                goto again;
-- 
2.43.0


Reply via email to