set_memory_p() and set_memory_np() can fail.

As mentioned in linux/mm.h:

/*
 * To support DEBUG_PAGEALLOC architecture must ensure that
 * __kernel_map_pages() never fails
 */

So panic in case set_memory_p() or set_memory_np() fail
in __kernel_map_pages().

Link: https://github.com/KSPP/linux/issues/7
Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/include/asm/book3s/64/hash.h |  2 +-
 arch/powerpc/mm/book3s64/hash_utils.c     |  3 ++-
 arch/powerpc/mm/pageattr.c                | 10 +++++++---
 3 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hash.h 
b/arch/powerpc/include/asm/book3s/64/hash.h
index 6e70ae511631..8f47ae79f2a6 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -269,7 +269,7 @@ int hash__create_section_mapping(unsigned long start, 
unsigned long end,
                                 int nid, pgprot_t prot);
 int hash__remove_section_mapping(unsigned long start, unsigned long end);
 
-void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+int hash__kernel_map_pages(struct page *page, int numpages, int enable);
 
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
b/arch/powerpc/mm/book3s64/hash_utils.c
index 0626a25b0d72..01c3b4b65241 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2172,7 +2172,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, 
unsigned long lmi)
                                     mmu_kernel_ssize, 0);
 }
 
-void hash__kernel_map_pages(struct page *page, int numpages, int enable)
+int hash__kernel_map_pages(struct page *page, int numpages, int enable)
 {
        unsigned long flags, vaddr, lmi;
        int i;
@@ -2189,6 +2189,7 @@ void hash__kernel_map_pages(struct page *page, int 
numpages, int enable)
                        kernel_unmap_linear_page(vaddr, lmi);
        }
        local_irq_restore(flags);
+       return 0;
 }
 #endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
 
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 16b8d20d6ca8..62b678585878 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -106,17 +106,21 @@ int change_memory_attr(unsigned long addr, int numpages, 
long action)
 #ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
+       int err;
        unsigned long addr = (unsigned long)page_address(page);
 
        if (PageHighMem(page))
                return;
 
        if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
-               hash__kernel_map_pages(page, numpages, enable);
+               err = hash__kernel_map_pages(page, numpages, enable);
        else if (enable)
-               set_memory_p(addr, numpages);
+               err = set_memory_p(addr, numpages);
        else
-               set_memory_np(addr, numpages);
+               err = set_memory_np(addr, numpages);
+
+       if (err)
+               panic("%s: set_memory_%sp() failed\n", enable ? "" : "n");
 }
 #endif
 #endif
-- 
2.43.0

Reply via email to