Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
section active bitmask, each bit representing 2MB (SECTION_SIZE (128M) /
map_active bitmask length (64)).

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Mel Gorman <mgor...@techsingularity.net>
Cc: Vlastimil Babka <vba...@suse.cz>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Logan Gunthorpe <log...@deltatee.com>
Cc: Stephen Bates <stephen.ba...@microsemi.com>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 include/linux/mmzone.h |    3 +++
 mm/page_alloc.c        |    4 +++-
 mm/sparse.c            |   53 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 59 insertions(+), 1 deletion(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5a0117a72ec4..e282dc328ada 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1083,6 +1083,8 @@ struct mem_section_usage {
        unsigned long pageblock_flags[0];
 };
 
+void section_active_init(unsigned long pfn, unsigned long nr_pages);
+
 struct page;
 struct page_ext;
 struct mem_section {
@@ -1224,6 +1226,7 @@ void sparse_init(void);
 #else
 #define sparse_init()  do {} while (0)
 #define sparse_index_init(_sec, _nid)  do {} while (0)
+#define section_active_init(_pfn, _nr_pages) do {} while (0)
 #endif /* CONFIG_SPARSEMEM */
 
 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8a509e382f55..8dbfb131e358 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6308,10 +6308,12 @@ void __init free_area_init_nodes(unsigned long 
*max_zone_pfn)
 
        /* Print out the early node map */
        pr_info("Early memory node ranges\n");
-       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
                pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
                        (u64)start_pfn << PAGE_SHIFT,
                        ((u64)end_pfn << PAGE_SHIFT) - 1);
+               section_active_init(start_pfn, end_pfn - start_pfn);
+       }
 
        /* Initialise every node */
        mminit_verify_pageflags_layout();
diff --git a/mm/sparse.c b/mm/sparse.c
index 59966a3e8ff0..00fdb5d04680 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -168,6 +168,59 @@ void __meminit mminit_validate_memmodel_limits(unsigned 
long *start_pfn,
        }
 }
 
+static int section_active_index(phys_addr_t phys)
+{
+       return (phys & ~(PA_SECTION_MASK)) / SECTION_ACTIVE_SIZE;
+}
+
+static unsigned long section_active_mask(unsigned long pfn,
+               unsigned long nr_pages)
+{
+       int idx_start, idx_size;
+       phys_addr_t start, size;
+
+       if (!nr_pages)
+               return 0;
+
+       start = PFN_PHYS(pfn);
+       size = PFN_PHYS(min(nr_pages, PAGES_PER_SECTION
+                               - (pfn & ~PAGE_SECTION_MASK)));
+       size = ALIGN(size, SECTION_ACTIVE_SIZE);
+
+       idx_start = section_active_index(start);
+       idx_size = section_active_index(size);
+
+       if (idx_size == 0)
+               return -1;
+       return ((1UL << idx_size) - 1) << idx_start;
+}
+
+void section_active_init(unsigned long pfn, unsigned long nr_pages)
+{
+       int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
+       int i, start_sec = pfn_to_section_nr(pfn);
+
+       if (!nr_pages)
+               return;
+
+       for (i = start_sec; i <= end_sec; i++) {
+               struct mem_section *ms;
+               unsigned long mask;
+               unsigned long pfns;
+
+               pfns = min(nr_pages, PAGES_PER_SECTION
+                               - (pfn & ~PAGE_SECTION_MASK));
+               mask = section_active_mask(pfn, pfns);
+
+               ms = __nr_to_section(i);
+               pr_debug("%s: sec: %d mask: %#018lx\n", __func__, i, mask);
+               ms->usage->map_active = mask;
+
+               pfn += pfns;
+               nr_pages -= pfns;
+       }
+}
+
 /* Record a memory area against a node. */
 void __init memory_present(int nid, unsigned long start, unsigned long end)
 {

Reply via email to