On Wed, Jun 17, 2020 at 04:49:49PM +0200, Peter Zijlstra wrote:

> I had the below, except of course that yields another objtool
> complaint, and I was still looking at that.

This cures it.

diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 5fbb90a80d239..fe0d6f1b28d7c 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2746,7 +2746,7 @@ int check(const char *_objname, bool orc)
 
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
-       file.c_file = find_section_by_name(file.elf, ".comment");
+       file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
        file.ignore_unreachables = no_unreachable;
        file.hints = false;


> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
> index af75109485c26..a7d1570905727 100644
> --- a/arch/x86/kernel/traps.c
> +++ b/arch/x86/kernel/traps.c
> @@ -690,13 +690,13 @@ struct bad_iret_stack *fixup_bad_iret(struct 
> bad_iret_stack *s)
>               (struct bad_iret_stack 
> *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
>  
>       /* Copy the IRET target to the temporary storage. */
> -     memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
> +     __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
>  
>       /* Copy the remainder of the stack from the current stack. */
> -     memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
> +     __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
>  
>       /* Update the entry stack */
> -     memcpy(new_stack, &tmp, sizeof(tmp));
> +     __memcpy(new_stack, &tmp, sizeof(tmp));
>  
>       BUG_ON(!user_mode(&new_stack->regs));
>       return new_stack;
> diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
> index 56b243b14c3a2..bbcc05bcefadb 100644
> --- a/arch/x86/lib/memcpy_64.S
> +++ b/arch/x86/lib/memcpy_64.S
> @@ -8,6 +8,8 @@
>  #include <asm/alternative-asm.h>
>  #include <asm/export.h>
>  
> +.pushsection .noinstr.text, "ax"
> +
>  /*
>   * We build a jump to memcpy_orig by default which gets NOPped out on
>   * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
> @@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
>       retq
>  SYM_FUNC_END(memcpy_orig)
>  
> +.popsection
> +
>  #ifndef CONFIG_UML
>  
>  MCSAFE_TEST_CTL

Reply via email to