From: "Mike Rapoport (IBM)" <r...@kernel.org>

Dynamic ftrace must allocate memory for code and this was impossible
without CONFIG_MODULES.

With jitalloc separated from the modules code, the jit_text_alloc() is
available regardless of CONFIG_MODULE.

Move jitalloc initialization to x86/mm/init.c so that it won't get
compiled away when CONFIG_MODULE=n and enable dynamic ftrace
unconditionally.

Signed-off-by: Mike Rapoport (IBM) <r...@kernel.org>
---
 arch/x86/Kconfig         |  1 +
 arch/x86/kernel/ftrace.c |  9 --------
 arch/x86/kernel/module.c | 44 --------------------------------------
 arch/x86/mm/init.c       | 46 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 47 insertions(+), 53 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 53bab123a8ee..fac4add6ce16 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -35,6 +35,7 @@ config X86_64
        select SWIOTLB
        select ARCH_HAS_ELFCORE_COMPAT
        select ZONE_DMA32
+       select JIT_ALLOC if DYNAMIC_FTRACE
 
 config FORCE_DYNAMIC_FTRACE
        def_bool y
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 157c8a799704..aa99536b824c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -261,7 +261,6 @@ void arch_ftrace_update_code(int command)
 /* Currently only x86_64 supports dynamic trampolines */
 #ifdef CONFIG_X86_64
 
-#ifdef CONFIG_MODULES
 /* Module allocation simplifies allocating memory for code */
 static inline void *alloc_tramp(unsigned long size)
 {
@@ -271,14 +270,6 @@ static inline void tramp_free(void *tramp)
 {
        jit_free(tramp);
 }
-#else
-/* Trampolines can only be created if modules are supported */
-static inline void *alloc_tramp(unsigned long size)
-{
-       return NULL;
-}
-static inline void tramp_free(void *tramp) { }
-#endif
 
 /* Defined as markers to the end of the ftrace default trampolines */
 extern void ftrace_regs_caller_end(void);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index cacca613b8bd..94a00dc103cd 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -19,7 +19,6 @@
 #include <linux/jump_label.h>
 #include <linux/random.h>
 #include <linux/memory.h>
-#include <linux/jitalloc.h>
 
 #include <asm/text-patching.h>
 #include <asm/page.h>
@@ -37,49 +36,6 @@ do {                                                 \
 } while (0)
 #endif
 
-#ifdef CONFIG_RANDOMIZE_BASE
-static unsigned long module_load_offset;
-
-/* Mutex protects the module_load_offset. */
-static DEFINE_MUTEX(module_kaslr_mutex);
-
-static unsigned long int get_module_load_offset(void)
-{
-       if (kaslr_enabled()) {
-               mutex_lock(&module_kaslr_mutex);
-               /*
-                * Calculate the module_load_offset the first time this
-                * code is called. Once calculated it stays the same until
-                * reboot.
-                */
-               if (module_load_offset == 0)
-                       module_load_offset =
-                               get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
-               mutex_unlock(&module_kaslr_mutex);
-       }
-       return module_load_offset;
-}
-#else
-static unsigned long int get_module_load_offset(void)
-{
-       return 0;
-}
-#endif
-
-static struct jit_alloc_params jit_alloc_params = {
-       .alignment      = JIT_ALLOC_ALIGN,
-       .flags          = JIT_ALLOC_KASAN_SHADOW,
-};
-
-struct jit_alloc_params *jit_alloc_arch_params(void)
-{
-       jit_alloc_params.text.pgprot = PAGE_KERNEL;
-       jit_alloc_params.text.start = MODULES_VADDR + get_module_load_offset();
-       jit_alloc_params.text.end = MODULES_END;
-
-       return &jit_alloc_params;
-}
-
 #ifdef CONFIG_X86_32
 int apply_relocate(Elf32_Shdr *sechdrs,
                   const char *strtab,
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 3cdac0f0055d..ffaf9a3840ce 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,6 +7,7 @@
 #include <linux/swapops.h>
 #include <linux/kmemleak.h>
 #include <linux/sched/task.h>
+#include <linux/jitalloc.h>
 
 #include <asm/set_memory.h>
 #include <asm/e820/api.h>
@@ -1084,3 +1085,48 @@ unsigned long arch_max_swapfile_size(void)
        return pages;
 }
 #endif
+
+#ifdef CONFIG_JIT_ALLOC
+#ifdef CONFIG_RANDOMIZE_BASE
+static unsigned long jit_load_offset;
+
+/* Mutex protects the jit_load_offset. */
+static DEFINE_MUTEX(jit_kaslr_mutex);
+
+static unsigned long int get_jit_load_offset(void)
+{
+       if (kaslr_enabled()) {
+               mutex_lock(&jit_kaslr_mutex);
+               /*
+                * Calculate the jit_load_offset the first time this
+                * code is called. Once calculated it stays the same until
+                * reboot.
+                */
+               if (jit_load_offset == 0)
+                       jit_load_offset =
+                               get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
+               mutex_unlock(&jit_kaslr_mutex);
+       }
+       return jit_load_offset;
+}
+#else
+static unsigned long int get_jit_load_offset(void)
+{
+       return 0;
+}
+#endif
+
+static struct jit_alloc_params jit_alloc_params = {
+       .alignment      = JIT_ALLOC_ALIGN,
+       .flags          = JIT_ALLOC_KASAN_SHADOW,
+};
+
+struct jit_alloc_params *jit_alloc_arch_params(void)
+{
+       jit_alloc_params.text.pgprot = PAGE_KERNEL;
+       jit_alloc_params.text.start = MODULES_VADDR + get_jit_load_offset();
+       jit_alloc_params.text.end = MODULES_END;
+
+       return &jit_alloc_params;
+}
+#endif /* CONFIG_JIT_ALLOC */
-- 
2.35.1

Reply via email to