Re: [PATCH 1/9] x86/entry/head/32: use local labels
On Tue, Sep 20, 2016 at 05:57:16PM -0700, Andy Lutomirski wrote: > On Sep 20, 2016 10:03 AM, "Josh Poimboeuf"wrote: > > > > Add the local label prefix to all non-function named labels in head_32.S > > and entry_32.S. In addition to decluttering the symbol table, it also > > will help stack traces to be more sensible. For example, the last > > reported function in the idle task stack trace will be startup_32_smp() > > instead of is486(). > > > > I think that restore_all, at least, should stay. It's a common tail > for lots of functions. I haven't checked this patch for other cases > where the new label is worse than the old for debugging. Yes, for restore_all, I think you're right. I'll change it back. -- Josh
Re: [PATCH 1/9] x86/entry/head/32: use local labels
On Tue, Sep 20, 2016 at 05:57:16PM -0700, Andy Lutomirski wrote: > On Sep 20, 2016 10:03 AM, "Josh Poimboeuf" wrote: > > > > Add the local label prefix to all non-function named labels in head_32.S > > and entry_32.S. In addition to decluttering the symbol table, it also > > will help stack traces to be more sensible. For example, the last > > reported function in the idle task stack trace will be startup_32_smp() > > instead of is486(). > > > > I think that restore_all, at least, should stay. It's a common tail > for lots of functions. I haven't checked this patch for other cases > where the new label is worse than the old for debugging. Yes, for restore_all, I think you're right. I'll change it back. -- Josh
Re: [PATCH 1/9] x86/entry/head/32: use local labels
On Sep 20, 2016 10:03 AM, "Josh Poimboeuf"wrote: > > Add the local label prefix to all non-function named labels in head_32.S > and entry_32.S. In addition to decluttering the symbol table, it also > will help stack traces to be more sensible. For example, the last > reported function in the idle task stack trace will be startup_32_smp() > instead of is486(). > I think that restore_all, at least, should stay. It's a common tail for lots of functions. I haven't checked this patch for other cases where the new label is worse than the old for debugging. --Andy
Re: [PATCH 1/9] x86/entry/head/32: use local labels
On Sep 20, 2016 10:03 AM, "Josh Poimboeuf" wrote: > > Add the local label prefix to all non-function named labels in head_32.S > and entry_32.S. In addition to decluttering the symbol table, it also > will help stack traces to be more sensible. For example, the last > reported function in the idle task stack trace will be startup_32_smp() > instead of is486(). > I think that restore_all, at least, should stay. It's a common tail for lots of functions. I haven't checked this patch for other cases where the new label is worse than the old for debugging. --Andy
[PATCH 1/9] x86/entry/head/32: use local labels
Add the local label prefix to all non-function named labels in head_32.S and entry_32.S. In addition to decluttering the symbol table, it also will help stack traces to be more sensible. For example, the last reported function in the idle task stack trace will be startup_32_smp() instead of is486(). Signed-off-by: Josh Poimboeuf--- arch/x86/entry/entry_32.S | 55 --- arch/x86/kernel/head_32.S | 32 +-- 2 files changed, 44 insertions(+), 43 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index b75a8bc..378e912 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -64,7 +64,7 @@ # define preempt_stop(clobbers)DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF #else # define preempt_stop(clobbers) -# define resume_kernel restore_all +# define resume_kernel .Lrestore_all #endif .macro TRACE_IRQS_IRET @@ -255,7 +255,7 @@ ENTRY(ret_from_fork) /* When we fork, we trace the syscall return in the child, too. */ movl%esp, %eax callsyscall_return_slowpath - jmp restore_all + jmp .Lrestore_all /* kernel thread */ 1: movl%edi, %eax @@ -300,19 +300,19 @@ ENTRY(resume_userspace) TRACE_IRQS_OFF movl%esp, %eax callprepare_exit_to_usermode - jmp restore_all + jmp .Lrestore_all END(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) -need_resched: +.Lneed_resched: cmpl$0, PER_CPU_VAR(__preempt_count) - jnz restore_all + jnz .Lrestore_all testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz .Lrestore_all callpreempt_schedule_irq - jmp need_resched + jmp .Lneed_resched END(resume_kernel) #endif @@ -333,7 +333,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region) */ ENTRY(xen_sysenter_target) addl$5*4, %esp /* remove xen-provided frame */ - jmp sysenter_past_esp + jmp .Lsysenter_past_esp #endif /* @@ -370,7 +370,7 @@ ENTRY(xen_sysenter_target) */ ENTRY(entry_SYSENTER_32) movlTSS_sysenter_sp0(%esp), %esp -sysenter_past_esp: +.Lsysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl %ebp/* pt_regs->sp (stashed in bp) */ pushfl /* pt_regs->flags (except IF = 0) */ @@ -501,11 +501,11 @@ ENTRY(entry_INT80_32) calldo_int80_syscall_32 .Lsyscall_32_done: -restore_all: +.Lrestore_all: TRACE_IRQS_IRET -restore_all_notrace: +.Lrestore_all_notrace: #ifdef CONFIG_X86_ESPFIX32 - ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX + ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX movlPT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS /* @@ -517,22 +517,23 @@ restore_all_notrace: movbPT_CS(%esp), %al andl$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax cmpl$((SEGMENT_LDT << 8) | USER_RPL), %eax - je ldt_ss # returning to user-space with LDT SS + je .Lldt_ss # returning to user-space with LDT SS #endif -restore_nocheck: +.Lrestore_nocheck: RESTORE_REGS 4 # skip orig_eax/error_code -irq_return: +.Lirq_return: INTERRUPT_RETURN + .section .fixup, "ax" ENTRY(iret_exc ) pushl $0 # no error code pushl $do_iret_error jmp error_code .previous - _ASM_EXTABLE(irq_return, iret_exc) + _ASM_EXTABLE(.Lirq_return, iret_exc) #ifdef CONFIG_X86_ESPFIX32 -ldt_ss: +.Lldt_ss: /* * Setup and switch to ESPFIX stack * @@ -561,7 +562,7 @@ ldt_ss: */ DISABLE_INTERRUPTS(CLBR_EAX) lss (%esp), %esp/* switch to espfix segment */ - jmp restore_nocheck + jmp .Lrestore_nocheck #endif ENDPROC(entry_INT80_32) @@ -881,7 +882,7 @@ ftrace_call: popl%edx popl%ecx popl%eax -ftrace_ret: +.Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -951,7 +952,7 @@ GLOBAL(ftrace_regs_call) popl%gs addl$8, %esp/* Skip orig_ax and ip */ popf/* Pop flags at end (no addl to corrupt flags) */ - jmp ftrace_ret + jmp .Lftrace_ret popf jmp ftrace_stub @@ -962,7 +963,7 @@ ENTRY(mcount) jb ftrace_stub /* Paging not enabled yet? */ cmpl$ftrace_stub, ftrace_trace_function - jnz trace + jnz
[PATCH 1/9] x86/entry/head/32: use local labels
Add the local label prefix to all non-function named labels in head_32.S and entry_32.S. In addition to decluttering the symbol table, it also will help stack traces to be more sensible. For example, the last reported function in the idle task stack trace will be startup_32_smp() instead of is486(). Signed-off-by: Josh Poimboeuf --- arch/x86/entry/entry_32.S | 55 --- arch/x86/kernel/head_32.S | 32 +-- 2 files changed, 44 insertions(+), 43 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index b75a8bc..378e912 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -64,7 +64,7 @@ # define preempt_stop(clobbers)DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF #else # define preempt_stop(clobbers) -# define resume_kernel restore_all +# define resume_kernel .Lrestore_all #endif .macro TRACE_IRQS_IRET @@ -255,7 +255,7 @@ ENTRY(ret_from_fork) /* When we fork, we trace the syscall return in the child, too. */ movl%esp, %eax callsyscall_return_slowpath - jmp restore_all + jmp .Lrestore_all /* kernel thread */ 1: movl%edi, %eax @@ -300,19 +300,19 @@ ENTRY(resume_userspace) TRACE_IRQS_OFF movl%esp, %eax callprepare_exit_to_usermode - jmp restore_all + jmp .Lrestore_all END(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) -need_resched: +.Lneed_resched: cmpl$0, PER_CPU_VAR(__preempt_count) - jnz restore_all + jnz .Lrestore_all testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz .Lrestore_all callpreempt_schedule_irq - jmp need_resched + jmp .Lneed_resched END(resume_kernel) #endif @@ -333,7 +333,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region) */ ENTRY(xen_sysenter_target) addl$5*4, %esp /* remove xen-provided frame */ - jmp sysenter_past_esp + jmp .Lsysenter_past_esp #endif /* @@ -370,7 +370,7 @@ ENTRY(xen_sysenter_target) */ ENTRY(entry_SYSENTER_32) movlTSS_sysenter_sp0(%esp), %esp -sysenter_past_esp: +.Lsysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl %ebp/* pt_regs->sp (stashed in bp) */ pushfl /* pt_regs->flags (except IF = 0) */ @@ -501,11 +501,11 @@ ENTRY(entry_INT80_32) calldo_int80_syscall_32 .Lsyscall_32_done: -restore_all: +.Lrestore_all: TRACE_IRQS_IRET -restore_all_notrace: +.Lrestore_all_notrace: #ifdef CONFIG_X86_ESPFIX32 - ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX + ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX movlPT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS /* @@ -517,22 +517,23 @@ restore_all_notrace: movbPT_CS(%esp), %al andl$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax cmpl$((SEGMENT_LDT << 8) | USER_RPL), %eax - je ldt_ss # returning to user-space with LDT SS + je .Lldt_ss # returning to user-space with LDT SS #endif -restore_nocheck: +.Lrestore_nocheck: RESTORE_REGS 4 # skip orig_eax/error_code -irq_return: +.Lirq_return: INTERRUPT_RETURN + .section .fixup, "ax" ENTRY(iret_exc ) pushl $0 # no error code pushl $do_iret_error jmp error_code .previous - _ASM_EXTABLE(irq_return, iret_exc) + _ASM_EXTABLE(.Lirq_return, iret_exc) #ifdef CONFIG_X86_ESPFIX32 -ldt_ss: +.Lldt_ss: /* * Setup and switch to ESPFIX stack * @@ -561,7 +562,7 @@ ldt_ss: */ DISABLE_INTERRUPTS(CLBR_EAX) lss (%esp), %esp/* switch to espfix segment */ - jmp restore_nocheck + jmp .Lrestore_nocheck #endif ENDPROC(entry_INT80_32) @@ -881,7 +882,7 @@ ftrace_call: popl%edx popl%ecx popl%eax -ftrace_ret: +.Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -951,7 +952,7 @@ GLOBAL(ftrace_regs_call) popl%gs addl$8, %esp/* Skip orig_ax and ip */ popf/* Pop flags at end (no addl to corrupt flags) */ - jmp ftrace_ret + jmp .Lftrace_ret popf jmp ftrace_stub @@ -962,7 +963,7 @@ ENTRY(mcount) jb ftrace_stub /* Paging not enabled yet? */ cmpl$ftrace_stub, ftrace_trace_function - jnz trace + jnz .Ltrace #ifdef