Re: [RFC PATCH 6/6] kvm: arm64: Remove hyp_symbol_addr

2020-11-24 Thread Ard Biesheuvel
On Thu, 19 Nov 2020 at 17:26, David Brazdil  wrote:
>
> The helper was used to force PC-relative addressing in hyp code because
> absolute addressing via constant-pools used to generate kernel VAs. This
> was cumbersome and required programmers to remember to use the helper
> whenever they wanted to take a pointer.
>
> Now that hyp relocations are fixed up, there is no need for the helper
> any logner. Remove it.
>
> Signed-off-by: David Brazdil 

Acked-by: Ard Biesheuvel 

> ---
>  arch/arm64/include/asm/kvm_asm.h | 20 
>  arch/arm64/kvm/hyp/include/hyp/switch.h  |  4 ++--
>  arch/arm64/kvm/hyp/nvhe/hyp-smp.c|  4 ++--
>  arch/arm64/kvm/hyp/nvhe/psci-relay.c |  4 ++--
>  arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c |  2 +-
>  5 files changed, 7 insertions(+), 27 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h 
> b/arch/arm64/include/asm/kvm_asm.h
> index 1a86581e581e..1961d23c0c40 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -203,26 +203,6 @@ extern void __vgic_v3_init_lrs(void);
>
>  extern u32 __kvm_get_mdcr_el2(void);
>
> -/*
> - * Obtain the PC-relative address of a kernel symbol
> - * s: symbol
> - *
> - * The goal of this macro is to return a symbol's address based on a
> - * PC-relative computation, as opposed to a loading the VA from a
> - * constant pool or something similar. This works well for HYP, as an
> - * absolute VA is guaranteed to be wrong. Only use this if trying to
> - * obtain the address of a symbol (i.e. not something you obtained by
> - * following a pointer).
> - */
> -#define hyp_symbol_addr(s) \
> -   ({  \
> -   typeof(s) *addr;\
> -   asm("adrp   %0, %1\n"   \
> -   "add%0, %0, :lo12:%1\n" \
> -   : "=r" (addr) : "S" ());  \
> -   addr;   \
> -   })
> -
>  #define __KVM_EXTABLE(from, to)  
>   \
> "   .pushsection__kvm_ex_table, \"a\"\n"\
> "   .align  3\n"\
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h 
> b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index 84473574c2e7..54f4860cd87c 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
> struct exception_table_entry *entry, *end;
> unsigned long elr_el2 = read_sysreg(elr_el2);
>
> -   entry = hyp_symbol_addr(__start___kvm_ex_table);
> -   end = hyp_symbol_addr(__stop___kvm_ex_table);
> +   entry = &__start___kvm_ex_table;
> +   end = &__stop___kvm_ex_table;
>
> while (entry < end) {
> addr = (unsigned long)>insn + entry->insn;
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c 
> b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
> index ceb427aabb91..6870d9f3d4b7 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
> @@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
> if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
> hyp_panic();
>
> -   cpu_base_array = (unsigned 
> long*)hyp_symbol_addr(kvm_arm_hyp_percpu_base);
> +   cpu_base_array = (unsigned long*)(_arm_hyp_percpu_base[0]);
> this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
> -   elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start);
> +   elf_base = (unsigned long)&__per_cpu_start;
> return this_cpu_base - elf_base;
>  }
> diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c 
> b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> index 313ef42f0eab..f64380a49a72 100644
> --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> @@ -147,7 +147,7 @@ static int psci_cpu_suspend(u64 func_id, struct 
> kvm_cpu_context *host_ctxt)
>  * point if it is a deep sleep state.
>  */
> ret = psci_call(func_id, power_state,
> -   __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
> +   __hyp_pa(__kvm_hyp_cpu_entry),
> __hyp_pa(cpu_params));
>
> release_reset_state(cpu_state);
> @@ -182,7 +182,7 @@ static int psci_cpu_on(u64 func_id, struct 
> kvm_cpu_context *host_ctxt)
> return PSCI_RET_ALREADY_ON;
>
> ret = psci_call(func_id, mpidr,
> -   __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
> +   __hyp_pa(__kvm_hyp_cpu_entry),
> __hyp_pa(cpu_params));
>
> /*
> diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 
> 

[RFC PATCH 6/6] kvm: arm64: Remove hyp_symbol_addr

2020-11-19 Thread David Brazdil
The helper was used to force PC-relative addressing in hyp code because
absolute addressing via constant-pools used to generate kernel VAs. This
was cumbersome and required programmers to remember to use the helper
whenever they wanted to take a pointer.

Now that hyp relocations are fixed up, there is no need for the helper
any logner. Remove it.

Signed-off-by: David Brazdil 
---
 arch/arm64/include/asm/kvm_asm.h | 20 
 arch/arm64/kvm/hyp/include/hyp/switch.h  |  4 ++--
 arch/arm64/kvm/hyp/nvhe/hyp-smp.c|  4 ++--
 arch/arm64/kvm/hyp/nvhe/psci-relay.c |  4 ++--
 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c |  2 +-
 5 files changed, 7 insertions(+), 27 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 1a86581e581e..1961d23c0c40 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -203,26 +203,6 @@ extern void __vgic_v3_init_lrs(void);
 
 extern u32 __kvm_get_mdcr_el2(void);
 
-/*
- * Obtain the PC-relative address of a kernel symbol
- * s: symbol
- *
- * The goal of this macro is to return a symbol's address based on a
- * PC-relative computation, as opposed to a loading the VA from a
- * constant pool or something similar. This works well for HYP, as an
- * absolute VA is guaranteed to be wrong. Only use this if trying to
- * obtain the address of a symbol (i.e. not something you obtained by
- * following a pointer).
- */
-#define hyp_symbol_addr(s) \
-   ({  \
-   typeof(s) *addr;\
-   asm("adrp   %0, %1\n"   \
-   "add%0, %0, :lo12:%1\n" \
-   : "=r" (addr) : "S" ());  \
-   addr;   \
-   })
-
 #define __KVM_EXTABLE(from, to)
\
"   .pushsection__kvm_ex_table, \"a\"\n"\
"   .align  3\n"\
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h 
b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 84473574c2e7..54f4860cd87c 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
 
-   entry = hyp_symbol_addr(__start___kvm_ex_table);
-   end = hyp_symbol_addr(__stop___kvm_ex_table);
+   entry = &__start___kvm_ex_table;
+   end = &__stop___kvm_ex_table;
 
while (entry < end) {
addr = (unsigned long)>insn + entry->insn;
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index ceb427aabb91..6870d9f3d4b7 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
hyp_panic();
 
-   cpu_base_array = (unsigned 
long*)hyp_symbol_addr(kvm_arm_hyp_percpu_base);
+   cpu_base_array = (unsigned long*)(_arm_hyp_percpu_base[0]);
this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
-   elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start);
+   elf_base = (unsigned long)&__per_cpu_start;
return this_cpu_base - elf_base;
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c 
b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 313ef42f0eab..f64380a49a72 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -147,7 +147,7 @@ static int psci_cpu_suspend(u64 func_id, struct 
kvm_cpu_context *host_ctxt)
 * point if it is a deep sleep state.
 */
ret = psci_call(func_id, power_state,
-   __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
+   __hyp_pa(__kvm_hyp_cpu_entry),
__hyp_pa(cpu_params));
 
release_reset_state(cpu_state);
@@ -182,7 +182,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context 
*host_ctxt)
return PSCI_RET_ALREADY_ON;
 
ret = psci_call(func_id, mpidr,
-   __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
+   __hyp_pa(__kvm_hyp_cpu_entry),
__hyp_pa(cpu_params));
 
/*
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 
b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 8f0585640241..87a54375bd6e 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
}
 
rd =