From: Joerg Roedel <[email protected]>

This removes the __init annotations from pti_init() and everything it
calls on x86. The pti_init() function sets up the kernel-mappings
visible in the user-space page-table when PTI is enabled, which only
makes sense after the relevant kernel mappings have been finished.

The kernel mappings are finished when the appropriate read-only and
no-execute protections are established for the kernel text, rodata and
data sections, which happens after the init-code/data has been freed by
the kernel.

So to call pti_init() at the right place it can't be __init anymore.

Signed-off-by: Joerg Roedel <[email protected]>
---
 arch/x86/entry/vsyscall/vsyscall_64.c |  2 +-
 arch/x86/mm/pti.c                     | 16 ++++++++--------
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c 
b/arch/x86/entry/vsyscall/vsyscall_64.c
index 82ed001..6cd5cbf 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -340,7 +340,7 @@ int in_gate_area_no_mm(unsigned long addr)
  * vsyscalls but leave the page not present.  If so, we skip calling
  * this.
  */
-void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
+void set_vsyscall_pgtable_user_bits(pgd_t *root)
 {
        pgd_t *pgd;
        p4d_t *p4d;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4d418e7..8a80522 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -234,7 +234,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long 
address)
  *
  * Returns a pointer to a PTE on success, or NULL on failure.
  */
-static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
 {
        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
@@ -262,7 +262,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned 
long address)
        return pte;
 }
 
-static void __init pti_setup_vsyscall(void)
+static void pti_setup_vsyscall(void)
 {
        pte_t *pte, *target_pte;
        unsigned int level;
@@ -279,7 +279,7 @@ static void __init pti_setup_vsyscall(void)
        set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
 }
 #else
-static void __init pti_setup_vsyscall(void) { }
+static void pti_setup_vsyscall(void) { }
 #endif
 
 static void
@@ -348,7 +348,7 @@ pti_clone_pmds(unsigned long start, unsigned long end, 
pmdval_t clear)
  * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
  * next-level entry on 5-level systems.
  */
-static void __init pti_clone_p4d(unsigned long addr)
+static void pti_clone_p4d(unsigned long addr)
 {
        p4d_t *kernel_p4d, *user_p4d;
        pgd_t *kernel_pgd;
@@ -362,7 +362,7 @@ static void __init pti_clone_p4d(unsigned long addr)
 /*
  * Clone the CPU_ENTRY_AREA into the user space visible page table.
  */
-static void __init pti_clone_user_shared(void)
+static void pti_clone_user_shared(void)
 {
        pti_clone_p4d(CPU_ENTRY_AREA_BASE);
 }
@@ -370,7 +370,7 @@ static void __init pti_clone_user_shared(void)
 /*
  * Clone the ESPFIX P4D into the user space visible page table
  */
-static void __init pti_setup_espfix64(void)
+static void pti_setup_espfix64(void)
 {
 #ifdef CONFIG_X86_ESPFIX64
        pti_clone_p4d(ESPFIX_BASE_ADDR);
@@ -380,7 +380,7 @@ static void __init pti_setup_espfix64(void)
 /*
  * Clone the populated PMDs of the entry and irqentry text and force it RO.
  */
-static void __init pti_clone_entry_text(void)
+static void pti_clone_entry_text(void)
 {
        pti_clone_pmds((unsigned long) __entry_text_start,
                        (unsigned long) __irqentry_text_end,
@@ -486,7 +486,7 @@ void pti_set_kernel_image_nonglobal(void)
 /*
  * Initialize kernel page table isolation
  */
-void __init pti_init(void)
+void pti_init(void)
 {
        if (!static_cpu_has(X86_FEATURE_PTI))
                return;
-- 
2.7.4

Reply via email to