GLOBAL had several meanings and is going away. In this patch, convert
all the inner functions labels marked with GLOBAL to use
SYM_FUNC_INNER_LABEL instead.

Note that retint_user needs not be global, perhaps since commit
2ec67971facc ("x86/entry/64/compat: Remove most of the fast system call
machinery"), where entry_64_compat's caller was removed. So mark the
label as LOCAL.

Signed-off-by: Jiri Slaby <jsl...@suse.cz>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: x...@kernel.org
Cc: Andy Lutomirski <l...@amacapital.net>
---
 arch/x86/entry/entry_64.S                |  6 +++---
 arch/x86/entry/entry_64_compat.S         |  2 +-
 arch/x86/entry/vdso/vdso32/system_call.S |  2 +-
 arch/x86/kernel/ftrace_32.S              |  2 +-
 arch/x86/kernel/ftrace_64.S              | 16 ++++++++--------
 arch/x86/realmode/rm/reboot.S            |  2 +-
 6 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 086f0cb600ff..213127a44c7c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -146,7 +146,7 @@ ENTRY(entry_SYSCALL_64)
         * after the swapgs, so that it can do the swapgs
         * for the guest and jump here on syscall.
         */
-GLOBAL(entry_SYSCALL_64_after_swapgs)
+SYM_FUNC_INNER_LABEL(entry_SYSCALL_64_after_swapgs, SYM_V_GLOBAL)
 
        movq    %rsp, PER_CPU_VAR(rsp_scratch)
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -526,7 +526,7 @@ ret_from_intr:
        jz      retint_kernel
 
        /* Interrupt came from user space */
-GLOBAL(retint_user)
+SYM_FUNC_INNER_LABEL(retint_user, SYM_V_LOCAL)
        mov     %rsp,%rdi
        call    prepare_exit_to_usermode
        TRACE_IRQS_IRETQ
@@ -555,7 +555,7 @@ retint_kernel:
  * At this label, code paths which return to kernel and to user,
  * which come from interrupts/exception and from syscalls, merge.
  */
-GLOBAL(restore_regs_and_iret)
+SYM_FUNC_INNER_LABEL(restore_regs_and_iret, SYM_V_GLOBAL)
        RESTORE_EXTRA_REGS
 restore_c_regs_and_iret:
        RESTORE_C_REGS
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index ba6af2fba6a7..b7934ef3f5bb 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -131,7 +131,7 @@ ENTRY(entry_SYSENTER_compat)
        pushq   $X86_EFLAGS_FIXED
        popfq
        jmp     .Lsysenter_flags_fixed
-GLOBAL(__end_entry_SYSENTER_compat)
+SYM_FUNC_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_V_GLOBAL)
 ENDPROC(entry_SYSENTER_compat)
 
 /*
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S 
b/arch/x86/entry/vdso/vdso32/system_call.S
index ed4bc9731cbb..ce5b1dd32337 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -61,7 +61,7 @@ __kernel_vsyscall:
 
        /* Enter using int $0x80 */
        int     $0x80
-GLOBAL(int80_landing_pad)
+SYM_FUNC_INNER_LABEL(int80_landing_pad, SYM_V_GLOBAL)
 
        /*
         * Restore EDX and ECX in case they were clobbered.  EBP is not
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 89f8324e9a68..8ad07ddfa1c9 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -139,7 +139,7 @@ ENTRY(ftrace_regs_caller)
        movl    function_trace_op, %ecx         /* Save ftrace_pos in 3rd 
parameter */
        pushl   %esp                            /* Save pt_regs as 4th 
parameter */
 
-GLOBAL(ftrace_regs_call)
+SYM_FUNC_INNER_LABEL(ftrace_regs_call, SYM_V_GLOBAL)
        call    ftrace_stub
 
        addl    $4, %esp                        /* Skip pt_regs */
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 76c774a5e792..aef60bbe854d 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -153,14 +153,14 @@ ENTRY(ftrace_caller)
        /* save_mcount_regs fills in first two parameters */
        save_mcount_regs
 
-GLOBAL(ftrace_caller_op_ptr)
+SYM_FUNC_INNER_LABEL(ftrace_caller_op_ptr, SYM_V_GLOBAL)
        /* Load the ftrace_ops into the 3rd parameter */
        movq function_trace_op(%rip), %rdx
 
        /* regs go into 4th parameter (but make it NULL) */
        movq $0, %rcx
 
-GLOBAL(ftrace_call)
+SYM_FUNC_INNER_LABEL(ftrace_call, SYM_V_GLOBAL)
        call ftrace_stub
 
        restore_mcount_regs
@@ -173,10 +173,10 @@ GLOBAL(ftrace_call)
         * think twice before adding any new code or changing the
         * layout here.
         */
-GLOBAL(ftrace_epilogue)
+SYM_FUNC_INNER_LABEL(ftrace_epilogue, SYM_V_GLOBAL)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call)
+SYM_FUNC_INNER_LABEL(ftrace_graph_call, SYM_V_GLOBAL)
        jmp ftrace_stub
 #endif
 
@@ -193,7 +193,7 @@ ENTRY(ftrace_regs_caller)
        save_mcount_regs 8
        /* save_mcount_regs fills in first two parameters */
 
-GLOBAL(ftrace_regs_caller_op_ptr)
+SYM_FUNC_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_V_GLOBAL)
        /* Load the ftrace_ops into the 3rd parameter */
        movq function_trace_op(%rip), %rdx
 
@@ -220,7 +220,7 @@ GLOBAL(ftrace_regs_caller_op_ptr)
        /* regs go into 4th parameter */
        leaq (%rsp), %rcx
 
-GLOBAL(ftrace_regs_call)
+SYM_FUNC_INNER_LABEL(ftrace_regs_call, SYM_V_GLOBAL)
        call ftrace_stub
 
        /* Copy flags back to SS, to restore them */
@@ -250,7 +250,7 @@ GLOBAL(ftrace_regs_call)
         * The trampoline will add the code to jump
         * to the return.
         */
-GLOBAL(ftrace_regs_caller_end)
+SYM_FUNC_INNER_LABEL(ftrace_regs_caller_end, SYM_V_GLOBAL)
 
        jmp ftrace_epilogue
 
@@ -272,7 +272,7 @@ fgraph_trace:
        jnz ftrace_graph_caller
 #endif
 
-GLOBAL(ftrace_stub)
+SYM_FUNC_INNER_LABEL(ftrace_stub, SYM_V_GLOBAL)
        retq
 
 trace:
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index c8855d50f9c1..cebac961aa42 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -32,7 +32,7 @@ ENTRY(machine_real_restart_asm)
        movl    %eax, %cr0
        ljmpl   $__KERNEL32_CS, $pa_machine_real_restart_paging_off
 
-GLOBAL(machine_real_restart_paging_off)
+SYM_FUNC_INNER_LABEL(machine_real_restart_paging_off, SYM_V_GLOBAL)
        xorl    %eax, %eax
        xorl    %edx, %edx
        movl    $MSR_EFER, %ecx
-- 
2.12.2

Reply via email to