On 2026-02-03 09:19, Jens Remus wrote:
> From: Josh Poimboeuf <[email protected]>
> 
> Add CFI_STARTPROC and CFI_ENDPROC annotations to the SYM_FUNC_* macros
> so the VDSO asm functions don't need to add them manually.  Note this
> only affects VDSO, the CFI_* macros are empty for the kernel proper.
> 
> [ Jens Remus: Reword commit subject and message as suggested by Josh. ]
> 
> Signed-off-by: Josh Poimboeuf <[email protected]>
> Signed-off-by: Steven Rostedt (Google) <[email protected]>
> Signed-off-by: Jens Remus <[email protected]>
> ---
>  arch/x86/entry/vdso/common/vdso-layout.lds.S  |  2 +-
>  .../x86/entry/vdso/vdso64/vgetrandom-chacha.S |  2 --
>  arch/x86/entry/vdso/vdso64/vsgx.S             |  4 ---
>  arch/x86/include/asm/linkage.h                | 33 +++++++++++++++----
>  arch/x86/include/asm/vdso.h                   |  1 -
>  5 files changed, 28 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/entry/vdso/common/vdso-layout.lds.S 
> b/arch/x86/entry/vdso/common/vdso-layout.lds.S
> index a1e30be3e83d..856b8b9d278c 100644
> --- a/arch/x86/entry/vdso/common/vdso-layout.lds.S
> +++ b/arch/x86/entry/vdso/common/vdso-layout.lds.S
> @@ -1,5 +1,5 @@
>  /* SPDX-License-Identifier: GPL-2.0 */
> -#include <asm/vdso.h>
> +#include <asm/page_types.h>
>  #include <asm/vdso/vsyscall.h>
>  #include <vdso/datapage.h>
>  
> diff --git a/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S 
> b/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S
> index cc82da9216fb..a33212594731 100644
> --- a/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S
> +++ b/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S
> @@ -22,7 +22,6 @@ CONSTANTS:  .octa 0x6b20657479622d323320646e61707865
>   *   rcx: number of 64-byte blocks to write to output
>   */
>  SYM_FUNC_START(__arch_chacha20_blocks_nostack)
> -     CFI_STARTPROC
>  .set output,         %rdi
>  .set key,            %rsi
>  .set counter,        %rdx
> @@ -175,5 +174,4 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
>       pxor            temp,temp
>  
>       ret
> -     CFI_ENDPROC
>  SYM_FUNC_END(__arch_chacha20_blocks_nostack)
> diff --git a/arch/x86/entry/vdso/vdso64/vsgx.S 
> b/arch/x86/entry/vdso/vdso64/vsgx.S
> index 37a3d4c02366..c0342238c976 100644
> --- a/arch/x86/entry/vdso/vdso64/vsgx.S
> +++ b/arch/x86/entry/vdso/vdso64/vsgx.S
> @@ -24,8 +24,6 @@
>  .section .text, "ax"
>  
>  SYM_FUNC_START(__vdso_sgx_enter_enclave)
> -     /* Prolog */
> -     .cfi_startproc
>       push    %rbp
>       .cfi_adjust_cfa_offset  8
>       .cfi_rel_offset         %rbp, 0
> @@ -143,8 +141,6 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
>       jle     .Lout
>       jmp     .Lenter_enclave
>  
> -     .cfi_endproc
> -
>  _ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
>  
>  SYM_FUNC_END(__vdso_sgx_enter_enclave)
> diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
> index 9d38ae744a2e..9d7f90c57451 100644
> --- a/arch/x86/include/asm/linkage.h
> +++ b/arch/x86/include/asm/linkage.h
> @@ -40,6 +40,10 @@
>  
>  #ifdef __ASSEMBLER__
>  
> +#ifndef LINKER_SCRIPT
> +#include <asm/dwarf2.h>
> +#endif
> +
>  #if defined(CONFIG_MITIGATION_RETHUNK) && !defined(__DISABLE_EXPORTS) && 
> !defined(BUILD_VDSO)
>  #define RET  jmp __x86_return_thunk
>  #else /* CONFIG_MITIGATION_RETPOLINE */
> @@ -112,34 +116,51 @@
>  # define SYM_FUNC_ALIAS_MEMFUNC      SYM_FUNC_ALIAS
>  #endif
>  
> +#define __SYM_FUNC_START                             \
> +     CFI_STARTPROC ASM_NL
> +
> +#define __SYM_FUNC_END                                       \
> +     CFI_ENDPROC ASM_NL
> +
>  /* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
>  #define SYM_TYPED_FUNC_START(name)                           \
>       SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)        \
> +     __SYM_FUNC_START                                        \
>       ENDBR
>  
>  /* SYM_FUNC_START -- use for global functions */
>  #define SYM_FUNC_START(name)                         \
> -     SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)
> +     SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)      \
> +     __SYM_FUNC_START
>  
>  /* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
>  #define SYM_FUNC_START_NOALIGN(name)                 \
> -     SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
> +     SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)       \
> +     __SYM_FUNC_START
>  
>  /* SYM_FUNC_START_LOCAL -- use for local functions */
>  #define SYM_FUNC_START_LOCAL(name)                   \
> -     SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN)
> +     SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN)       \
> +     __SYM_FUNC_START
>  
>  /* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
>  #define SYM_FUNC_START_LOCAL_NOALIGN(name)           \
> -     SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
> +     SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)        \
> +     __SYM_FUNC_START
>  
>  /* SYM_FUNC_START_WEAK -- use for weak functions */
>  #define SYM_FUNC_START_WEAK(name)                    \
> -     SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN)
> +     SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN)        \
> +     __SYM_FUNC_START
>  
>  /* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
>  #define SYM_FUNC_START_WEAK_NOALIGN(name)            \
> -     SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
> +     SYM_START(name, SYM_L_WEAK, SYM_A_NONE)         \
> +     __SYM_FUNC_START
> +
> +#define SYM_FUNC_END(name)                           \
> +     __SYM_FUNC_END                                  \
> +     SYM_END(name, SYM_T_FUNC)
>  
>  /*
>   * Expose 'sym' to the startup code in arch/x86/boot/startup/, by emitting an
> diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
> index e8afbe9faa5b..498ac423741c 100644
> --- a/arch/x86/include/asm/vdso.h
> +++ b/arch/x86/include/asm/vdso.h
> @@ -2,7 +2,6 @@
>  #ifndef _ASM_X86_VDSO_H
>  #define _ASM_X86_VDSO_H
>  
> -#include <asm/page_types.h>
>  #include <linux/linkage.h>
>  #include <linux/init.h>
>  

Makes sense to me.

Acked-by: H. Peter Anvin (Intel) <[email protected]>


Reply via email to