Fix the following asmvalidate warnings:

   asmvalidate: arch/x86/lib/clear_page_64.o: clear_page()+0x0: unsupported 
jump to outside of function
   asmvalidate: arch/x86/lib/clear_page_64.o: alternative jump to outside the 
scope of original function clear_page
   asmvalidate: arch/x86/lib/copy_page_64.o: copy_page()+0x0: unsupported jump 
to outside of function
   asmvalidate: arch/x86/lib/memcpy_64.o: memcpy()+0x0: unsupported jump to 
outside of function
   asmvalidate: arch/x86/lib/memcpy_64.o: __memcpy()+0x0: unsupported jump to 
outside of function
   asmvalidate: arch/x86/lib/memcpy_64.o: alternative jump to outside the scope 
of original function memcpy
   asmvalidate: arch/x86/lib/memset_64.o: memset()+0x0: unsupported jump to 
outside of function
   asmvalidate: arch/x86/lib/memset_64.o: __memset()+0x0: unsupported jump to 
outside of function
   asmvalidate: arch/x86/lib/memset_64.o: alternative jump to outside the scope 
of original function memset

Change the annotations for clear_page(), copy_page(), memcpy(), and
memset() so that they don't jump outside of their function boundaries.

Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
---
 arch/x86/lib/clear_page_64.S |  9 +++------
 arch/x86/lib/copy_page_64.S  |  5 ++---
 arch/x86/lib/memcpy_64.S     | 10 ++++------
 arch/x86/lib/memset_64.S     | 10 ++++------
 4 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index a2fe51b..c342566 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -22,10 +22,8 @@ ENTRY(clear_page)
        xorl %eax,%eax
        rep stosq
        ret
-ENDPROC(clear_page)
-
-ENTRY(clear_page_orig)
 
+clear_page_orig:
        xorl   %eax,%eax
        movl   $4096/64,%ecx
        .p2align 4
@@ -44,11 +42,10 @@ ENTRY(clear_page_orig)
        jnz     .Lloop
        nop
        ret
-ENDPROC(clear_page_orig)
 
-ENTRY(clear_page_c_e)
+clear_page_c_e:
        movl $4096,%ecx
        xorl %eax,%eax
        rep stosb
        ret
-ENDPROC(clear_page_c_e)
+ENDPROC(clear_page)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 009f982..81d5cba 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -16,9 +16,8 @@ ENTRY(copy_page)
        movl    $4096/8, %ecx
        rep     movsq
        ret
-ENDPROC(copy_page)
 
-ENTRY(copy_page_regs)
+copy_page_regs:
        subq    $2*8,   %rsp
        movq    %rbx,   (%rsp)
        movq    %r12,   1*8(%rsp)
@@ -83,4 +82,4 @@ ENTRY(copy_page_regs)
        movq    1*8(%rsp), %r12
        addq    $2*8, %rsp
        ret
-ENDPROC(copy_page_regs)
+ENDPROC(copy_page)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 16698bb..64d00ec 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -37,21 +37,18 @@ ENTRY(memcpy)
        movl %edx, %ecx
        rep movsb
        ret
-ENDPROC(memcpy)
-ENDPROC(__memcpy)
 
 /*
  * memcpy_erms() - enhanced fast string memcpy. This is faster and
  * simpler than memcpy. Use memcpy_erms when possible.
  */
-ENTRY(memcpy_erms)
+memcpy_erms:
        movq %rdi, %rax
        movq %rdx, %rcx
        rep movsb
        ret
-ENDPROC(memcpy_erms)
 
-ENTRY(memcpy_orig)
+memcpy_orig:
        movq %rdi, %rax
 
        cmpq $0x20, %rdx
@@ -176,4 +173,5 @@ ENTRY(memcpy_orig)
 
 .Lend:
        retq
-ENDPROC(memcpy_orig)
+ENDPROC(memcpy)
+ENDPROC(__memcpy)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2661fad..a0d9f3f 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -41,8 +41,6 @@ ENTRY(__memset)
        rep stosb
        movq %r9,%rax
        ret
-ENDPROC(memset)
-ENDPROC(__memset)
 
 /*
  * ISO C memset - set a memory block to a byte value. This function uses
@@ -55,16 +53,15 @@ ENDPROC(__memset)
  *
  * rax   original destination
  */
-ENTRY(memset_erms)
+memset_erms:
        movq %rdi,%r9
        movb %sil,%al
        movq %rdx,%rcx
        rep stosb
        movq %r9,%rax
        ret
-ENDPROC(memset_erms)
 
-ENTRY(memset_orig)
+memset_orig:
        movq %rdi,%r10
 
        /* expand byte value  */
@@ -135,4 +132,5 @@ ENTRY(memset_orig)
        subq %r8,%rdx
        jmp .Lafter_bad_alignment
 .Lfinal:
-ENDPROC(memset_orig)
+ENDPROC(memset)
+ENDPROC(__memset)
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to