Add the linear page mapping function for radix, used by memory hotplug.
This is similar to vmemmap_populate().

Signed-off-by: Reza Arbab <ar...@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/radix.h |  4 ++++
 arch/powerpc/mm/pgtable-book3s64.c         |  2 +-
 arch/powerpc/mm/pgtable-radix.c            | 19 +++++++++++++++++++
 3 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h 
b/arch/powerpc/include/asm/book3s/64/radix.h
index b4d1302..43c2571 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -291,5 +291,9 @@ static inline unsigned long radix__get_tree_size(void)
        }
        return rts_field;
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int radix__create_section_mapping(unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMORY_HOTPLUG */
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/powerpc/mm/pgtable-book3s64.c 
b/arch/powerpc/mm/pgtable-book3s64.c
index 653ff6c..2b13f6b 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -131,7 +131,7 @@ void mmu_cleanup_all(void)
 int create_section_mapping(unsigned long start, unsigned long end)
 {
        if (radix_enabled())
-               return -ENODEV;
+               return radix__create_section_mapping(start, end);
 
        return hash__create_section_mapping(start, end);
 }
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 54bd70e..8201d1f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -465,6 +465,25 @@ void radix__setup_initial_memory_limit(phys_addr_t 
first_memblock_base,
        memblock_set_current_limit(first_memblock_base + first_memblock_size);
 }
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+int radix__create_section_mapping(unsigned long start, unsigned long end)
+{
+       unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+
+       /* Align to the page size of the linear mapping. */
+       start = _ALIGN_DOWN(start, page_size);
+
+       for (; start < end; start += page_size) {
+               int rc = radix__map_kernel_page(start, __pa(start),
+                                               PAGE_KERNEL, page_size);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 int __meminit radix__vmemmap_create_mapping(unsigned long start,
                                      unsigned long page_size,
-- 
1.8.3.1

Reply via email to