Use the __vmalloc_node_range() to simplify x86's alloc_insn_page()
implementation.

Signed-off-by: Jisheng Zhang <jisheng.zh...@synaptics.com>
---
 arch/x86/kernel/kprobes/core.c | 24 ++++--------------------
 1 file changed, 4 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index df776cdca327..75081f3dbe44 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -383,26 +383,10 @@ static int prepare_boost(kprobe_opcode_t *buf, struct 
kprobe *p,
 /* Make page to RO mode when allocate it */
 void *alloc_insn_page(void)
 {
-       void *page;
-
-       page = module_alloc(PAGE_SIZE);
-       if (!page)
-               return NULL;
-
-       set_vm_flush_reset_perms(page);
-       /*
-        * First make the page read-only, and only then make it executable to
-        * prevent it from being W+X in between.
-        */
-       set_memory_ro((unsigned long)page, 1);
-
-       /*
-        * TODO: Once additional kernel code protection mechanisms are set, 
ensure
-        * that the page was not maliciously altered and it is still zeroed.
-        */
-       set_memory_x((unsigned long)page, 1);
-
-       return page;
+       return __vmalloc_node_range(PAGE_SIZE, PAGE_SIZE, VMALLOC_START,
+                       VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+                       VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+                       __builtin_return_address(0));
 }
 
 /* Recover page to RW mode before releasing it */
-- 
2.31.0

Reply via email to