The following commit has been merged into the x86/asm branch of tip:

Commit-ID:     6ec2a968247e51535e08dbbbfc8f53c95a48cde0
Gitweb:        
https://git.kernel.org/tip/6ec2a968247e51535e08dbbbfc8f53c95a48cde0
Author:        Jiri Slaby <[email protected]>
AuthorDate:    Fri, 11 Oct 2019 13:50:43 +02:00
Committer:     Borislav Petkov <[email protected]>
CommitterDate: Fri, 18 Oct 2019 09:53:19 +02:00

x86/asm: Annotate relocate_kernel_{32,64}.c

There are functions in relocate_kernel_{32,64}.c which are not
annotated. This makes automatic annotations on them rather hard. So
annotate all the functions now.

Note that these are not C-like functions, so FUNC is not used. Instead
CODE markers are used. Also the functions are not aligned, so the
NOALIGN versions are used:

- SYM_CODE_START_NOALIGN
- SYM_CODE_START_LOCAL_NOALIGN
- SYM_CODE_END

The result is:
  0000   108 NOTYPE  GLOBAL DEFAULT    1 relocate_kernel
  006c   165 NOTYPE  LOCAL  DEFAULT    1 identity_mapped
  0146   127 NOTYPE  LOCAL  DEFAULT    1 swap_pages
  0111    53 NOTYPE  LOCAL  DEFAULT    1 virtual_mapped

Signed-off-by: Jiri Slaby <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Cc: Alexios Zavras <[email protected]>
Cc: Allison Randal <[email protected]>
Cc: Enrico Weigelt <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: [email protected]
Cc: Thomas Gleixner <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
 arch/x86/kernel/relocate_kernel_32.S | 13 ++++++++-----
 arch/x86/kernel/relocate_kernel_64.S | 13 ++++++++-----
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/relocate_kernel_32.S 
b/arch/x86/kernel/relocate_kernel_32.S
index ee26df0..94b3388 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -35,8 +35,7 @@
 #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
 
        .text
-       .globl relocate_kernel
-relocate_kernel:
+SYM_CODE_START_NOALIGN(relocate_kernel)
        /* Save the CPU context, used for jumping back */
 
        pushl   %ebx
@@ -93,8 +92,9 @@ relocate_kernel:
        addl    $(identity_mapped - relocate_kernel), %eax
        pushl   %eax
        ret
+SYM_CODE_END(relocate_kernel)
 
-identity_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        /* set return address to 0 if not preserving context */
        pushl   $0
        /* store the start address on the stack */
@@ -191,8 +191,9 @@ identity_mapped:
        addl    $(virtual_mapped - relocate_kernel), %eax
        pushl   %eax
        ret
+SYM_CODE_END(identity_mapped)
 
-virtual_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        movl    CR4(%edi), %eax
        movl    %eax, %cr4
        movl    CR3(%edi), %eax
@@ -208,9 +209,10 @@ virtual_mapped:
        popl    %esi
        popl    %ebx
        ret
+SYM_CODE_END(virtual_mapped)
 
        /* Do the copies */
-swap_pages:
+SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
        movl    8(%esp), %edx
        movl    4(%esp), %ecx
        pushl   %ebp
@@ -270,6 +272,7 @@ swap_pages:
        popl    %ebx
        popl    %ebp
        ret
+SYM_CODE_END(swap_pages)
 
        .globl kexec_control_code_size
 .set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/relocate_kernel_64.S 
b/arch/x86/kernel/relocate_kernel_64.S
index c51ccff..ef3ba99 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -38,8 +38,7 @@
        .text
        .align PAGE_SIZE
        .code64
-       .globl relocate_kernel
-relocate_kernel:
+SYM_CODE_START_NOALIGN(relocate_kernel)
        /*
         * %rdi indirection_page
         * %rsi page_list
@@ -103,8 +102,9 @@ relocate_kernel:
        addq    $(identity_mapped - relocate_kernel), %r8
        pushq   %r8
        ret
+SYM_CODE_END(relocate_kernel)
 
-identity_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        /* set return address to 0 if not preserving context */
        pushq   $0
        /* store the start address on the stack */
@@ -209,8 +209,9 @@ identity_mapped:
        movq    $virtual_mapped, %rax
        pushq   %rax
        ret
+SYM_CODE_END(identity_mapped)
 
-virtual_mapped:
+SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        movq    RSP(%r8), %rsp
        movq    CR4(%r8), %rax
        movq    %rax, %cr4
@@ -228,9 +229,10 @@ virtual_mapped:
        popq    %rbp
        popq    %rbx
        ret
+SYM_CODE_END(virtual_mapped)
 
        /* Do the copies */
-swap_pages:
+SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
        movq    %rdi, %rcx      /* Put the page_list in %rcx */
        xorl    %edi, %edi
        xorl    %esi, %esi
@@ -283,6 +285,7 @@ swap_pages:
        jmp     0b
 3:
        ret
+SYM_CODE_END(swap_pages)
 
        .globl kexec_control_code_size
 .set kexec_control_code_size, . - relocate_kernel

Reply via email to