Re: [PATCH v9 14/18] arm64: kexec: offset for relocation function

2020-05-07 Thread James Morse
Hi Pavel,

On 26/03/2020 03:24, Pavel Tatashin wrote:
> Soon, relocation function will share the same page with EL2 vectors.

The EL2 vectors would only be executed with the MMU off, so they don't need to 
be mapped
anywhere in particular. (this is something hibernate probably does sloppily).


> Add offset within this page to arm64_relocate_new_kernel, and also
> the total size of relocation code which will include both the function
> and the EL2 vectors.

See arch/arm64/kernel/vmlinux.lds.S and sections.h for an example of how the 
idmap,
hibernate and the non-KVM hyp code do this.

If we're going to change this, I'd prefer it be the same as the other users...


> diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
> index 990185744148..d944c2e289b2 100644
> --- a/arch/arm64/include/asm/kexec.h
> +++ b/arch/arm64/include/asm/kexec.h
> @@ -90,6 +90,13 @@ static inline void crash_prepare_suspend(void) {}
>  static inline void crash_post_resume(void) {}
>  #endif
>  
> +#if defined(CONFIG_KEXEC_CORE)
> +/* The beginning and size of relcation code to stage 2 kernel */
> +extern const unsigned long kexec_relocate_code_size;
> +extern const unsigned char kexec_relocate_code_start[];
> +extern const unsigned long kexec_kern_reloc_offset;
> +#endif

This makes these things visible globally ... but drops the arm64_ prefix!


> diff --git a/arch/arm64/kernel/relocate_kernel.S 
> b/arch/arm64/kernel/relocate_kernel.S
> index 22ccdcb106d3..aa9f2b2cd77c 100644
> --- a/arch/arm64/kernel/relocate_kernel.S
> +++ b/arch/arm64/kernel/relocate_kernel.S
> @@ -14,6 +14,9 @@
>  #include 
>  #include 
>  
> +.globl kexec_relocate_code_start
> +kexec_relocate_code_start:

Which of the fancy new asm-annotations should this be?



> @@ -86,13 +89,16 @@ ENTRY(arm64_relocate_new_kernel)
>  .ltorg
>  END(arm64_relocate_new_kernel)
>  
> -.Lcopy_end:
> +.Lkexec_relocate_code_end:
>  .org KEXEC_CONTROL_PAGE_SIZE
>  .align 3 /* To keep the 64-bit values below naturally aligned. */

>  /*
> - * arm64_relocate_new_kernel_size - Number of bytes to copy to the
> + * kexec_relocate_code_size - Number of bytes to copy to the
>   * control_code_page.
>   */
> -.globl arm64_relocate_new_kernel_size
> -arm64_relocate_new_kernel_size:
> - .quad   .Lcopy_end - arm64_relocate_new_kernel
> +.globl kexec_relocate_code_size
> +kexec_relocate_code_size:
> + .quad   .Lkexec_relocate_code_end - kexec_relocate_code_start
> +.globl kexec_kern_reloc_offset
> +kexec_kern_reloc_offset:
> + .quad   arm64_relocate_new_kernel - kexec_relocate_code_start
> 

Can't we calculate this from the start/end markers? These variables held in 
assembly
generated code is pretty manky.


Thanks,

James

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH v9 14/18] arm64: kexec: offset for relocation function

2020-03-25 Thread Pavel Tatashin
Soon, relocation function will share the same page with EL2 vectors.
Add offset within this page to arm64_relocate_new_kernel, and also
the total size of relocation code which will include both the function
and the EL2 vectors.

Signed-off-by: Pavel Tatashin 
---
 arch/arm64/include/asm/kexec.h  |  7 +++
 arch/arm64/kernel/machine_kexec.c   | 13 -
 arch/arm64/kernel/relocate_kernel.S | 16 +++-
 3 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 990185744148..d944c2e289b2 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -90,6 +90,13 @@ static inline void crash_prepare_suspend(void) {}
 static inline void crash_post_resume(void) {}
 #endif
 
+#if defined(CONFIG_KEXEC_CORE)
+/* The beginning and size of relcation code to stage 2 kernel */
+extern const unsigned long kexec_relocate_code_size;
+extern const unsigned char kexec_relocate_code_start[];
+extern const unsigned long kexec_kern_reloc_offset;
+#endif
+
 /*
  * kern_reloc_arg is passed to kernel relocation function as an argument.
  * headkimage->head, allows to traverse through relocation 
segments.
diff --git a/arch/arm64/kernel/machine_kexec.c 
b/arch/arm64/kernel/machine_kexec.c
index b1122eea627e..ab571fca9bd1 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -23,10 +23,6 @@
 
 #include "cpu-reset.h"
 
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
 /**
  * kexec_image_info - For debugging output.
  */
@@ -82,9 +78,8 @@ int machine_kexec_post_load(struct kimage *kimage)
if (!kern_reloc_arg)
return -ENOMEM;
 
-   memcpy(reloc_code, arm64_relocate_new_kernel,
-  arm64_relocate_new_kernel_size);
-   kimage->arch.kern_reloc = __pa(reloc_code);
+   memcpy(reloc_code, kexec_relocate_code_start, kexec_relocate_code_size);
+   kimage->arch.kern_reloc = __pa(reloc_code) + kexec_kern_reloc_offset;
kimage->arch.kern_reloc_arg = __pa(kern_reloc_arg);
kern_reloc_arg->head = kimage->head;
kern_reloc_arg->entry_addr = kimage->start;
@@ -189,7 +184,7 @@ void machine_kexec(struct kimage *kimage)
"Some CPUs may be stale, kdump will be unreliable.\n");
 
/* Flush the reboot_code_buffer in preparation for its execution. */
-   __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
+   __flush_dcache_area(reboot_code_buffer, kexec_relocate_code_size);
 
/*
 * Although we've killed off the secondary CPUs, we don't update
@@ -198,7 +193,7 @@ void machine_kexec(struct kimage *kimage)
 * the offline CPUs. Therefore, we must use the __* variant here.
 */
__flush_icache_range((uintptr_t)reboot_code_buffer,
-arm64_relocate_new_kernel_size);
+kexec_relocate_code_size);
 
/* Flush the kimage list and its buffers. */
kexec_list_flush(kimage);
diff --git a/arch/arm64/kernel/relocate_kernel.S 
b/arch/arm64/kernel/relocate_kernel.S
index 22ccdcb106d3..aa9f2b2cd77c 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -14,6 +14,9 @@
 #include 
 #include 
 
+.globl kexec_relocate_code_start
+kexec_relocate_code_start:
+
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
  *
@@ -86,13 +89,16 @@ ENTRY(arm64_relocate_new_kernel)
 .ltorg
 END(arm64_relocate_new_kernel)
 
-.Lcopy_end:
+.Lkexec_relocate_code_end:
 .org   KEXEC_CONTROL_PAGE_SIZE
 .align 3   /* To keep the 64-bit values below naturally aligned. */
 /*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
+ * kexec_relocate_code_size - Number of bytes to copy to the
  * control_code_page.
  */
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
-   .quad   .Lcopy_end - arm64_relocate_new_kernel
+.globl kexec_relocate_code_size
+kexec_relocate_code_size:
+   .quad   .Lkexec_relocate_code_end - kexec_relocate_code_start
+.globl kexec_kern_reloc_offset
+kexec_kern_reloc_offset:
+   .quad   arm64_relocate_new_kernel - kexec_relocate_code_start
-- 
2.17.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec