For VHE-specific hypervisor code, kern_hyp_va() is a NOP.

Actually, it is a whole range of NOPs. It'd be much better if
this code simply didn't exist. Let's just do that.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/include/asm/kvm_mmu.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7784081088e7..e32725e90360 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -63,6 +63,7 @@
  * specific registers encoded in the instructions).
  */
 .macro kern_hyp_va     reg
+#ifndef __KVM_VHE_HYPERVISOR__
 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
        and     \reg, \reg, #1          /* mask with va_mask */
        ror     \reg, \reg, #1          /* rotate to the first tag bit */
@@ -70,6 +71,7 @@ alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
        add     \reg, \reg, #0, lsl 12  /* insert the top 12 bits of the tag */
        ror     \reg, \reg, #63         /* rotate back */
 alternative_cb_end
+#endif
 .endm
 
 /*
@@ -126,6 +128,7 @@ void kvm_apply_hyp_relocations(void);
 
 static __always_inline unsigned long __kern_hyp_va(unsigned long v)
 {
+#ifndef __KVM_VHE_HYPERVISOR__
        asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
                                    "ror %0, %0, #1\n"
                                    "add %0, %0, #0\n"
@@ -134,6 +137,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned 
long v)
                                    ARM64_ALWAYS_SYSTEM,
                                    kvm_update_va_mask)
                     : "+r" (v));
+#endif
        return v;
 }
 
-- 
2.34.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to