If MODULES_{VADDR,END} are not defined set them to VMALLOC_START and VMALLOC_END respectively. This reduces the need for special cases. For example, powerpc's module_alloc() was previously predicated on MODULES_VADDR being defined but now is unconditionally defined.
This will be useful reducing conditional code in other places that need to allocate from the module region (i.e., kprobes). Signed-off-by: Jordan Niethe <jniet...@gmail.com> --- v10: New to series v11: - Consider more places MODULES_VADDR was being used --- arch/powerpc/include/asm/pgtable.h | 11 +++++++++++ arch/powerpc/kernel/module.c | 5 +---- arch/powerpc/mm/kasan/kasan_init_32.c | 10 +++++----- arch/powerpc/mm/ptdump/ptdump.c | 4 ++-- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index c6a676714f04..882fda779648 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -39,6 +39,17 @@ struct mm_struct; #define __S110 PAGE_SHARED_X #define __S111 PAGE_SHARED_X +#ifndef MODULES_VADDR +#define MODULES_VADDR VMALLOC_START +#define MODULES_END VMALLOC_END +#endif + +#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_STRICT_KERNEL_RWX) +#if TASK_SIZE > MODULES_VADDR +#error TASK_SIZE > MODULES_VADDR +#endif +#endif + #ifndef __ASSEMBLY__ /* Keep these as a macros to avoid include dependency mess */ diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index fab84024650c..c60c7457ff47 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -15,6 +15,7 @@ #include <linux/sort.h> #include <asm/setup.h> #include <asm/sections.h> +#include <linux/mm.h> static LIST_HEAD(module_bug_list); @@ -88,7 +89,6 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } -#ifdef MODULES_VADDR static __always_inline void * __module_alloc(unsigned long size, unsigned long start, unsigned long end) { @@ -102,8 +102,6 @@ void *module_alloc(unsigned long size) unsigned long limit = (unsigned long)_etext - SZ_32M; void *ptr = NULL; - BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); - /* First try within 32M limit from _etext to avoid branch trampolines */ if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) ptr = __module_alloc(size, limit, MODULES_END); @@ -113,4 +111,3 @@ void *module_alloc(unsigned long size) return ptr; } -#endif diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index cf8770b1a692..42c057366ac7 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -116,11 +116,11 @@ static void __init kasan_unmap_early_shadow_vmalloc(void) kasan_update_early_region(k_start, k_end, __pte(0)); -#ifdef MODULES_VADDR - k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR); - k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END); - kasan_update_early_region(k_start, k_end, __pte(0)); -#endif + if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { + k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR); + k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END); + kasan_update_early_region(k_start, k_end, __pte(0)); + } } void __init kasan_mmu_init(void) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index aca354fb670b..0431457f668f 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -73,7 +73,7 @@ struct addr_marker { static struct addr_marker address_markers[] = { { 0, "Start of kernel VM" }, -#ifdef MODULES_VADDR +#if defined(CONFIG_BOOK32_32) && defined(CONFIG_STRICT_KERNEL_RWX) { 0, "modules start" }, { 0, "modules end" }, #endif @@ -359,7 +359,7 @@ static void populate_markers(void) #else address_markers[i++].start_address = TASK_SIZE; #endif -#ifdef MODULES_VADDR +#if defined(CONFIG_BOOK32_32) && defined(CONFIG_STRICT_KERNEL_RWX) address_markers[i++].start_address = MODULES_VADDR; address_markers[i++].start_address = MODULES_END; #endif -- 2.25.1