These macros should be used to avoid repeating the context save and
restore sequences.

Signed-off-by: Andrew Scull <[email protected]>
---
 arch/arm64/include/asm/kvm_asm.h    | 39 +++++++++++++++++++++++++++++
 arch/arm64/kvm/hyp/entry.S          | 39 -----------------------------
 arch/arm64/kvm/hyp/nvhe/hyp-start.S |  2 --
 3 files changed, 39 insertions(+), 41 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ff27c18b9fd6..1b2c718fa58f 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -212,6 +212,45 @@ extern char 
__smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
        add     \reg, \reg, #HOST_DATA_CONTEXT
 .endm
 
+#define CPU_XREG_OFFSET(x)     (CPU_USER_PT_REGS + 8*x)
+#define CPU_LR_OFFSET          CPU_XREG_OFFSET(30)
+#define CPU_SP_EL0_OFFSET      (CPU_LR_OFFSET + 8)
+
+/*
+ * We treat x18 as callee-saved as the host may use it as a platform
+ * register (e.g. for shadow call stack).
+ */
+.macro save_callee_saved_regs ctxt
+       str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
+       stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+       stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+       stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+       stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+       stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+       stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro restore_callee_saved_regs ctxt
+       // We require \ctxt is not x18-x28
+       ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
+       ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+       ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+       ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+       ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+       ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+       ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro save_sp_el0 ctxt, tmp
+       mrs     \tmp,   sp_el0
+       str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
+.endm
+
+.macro restore_sp_el0 ctxt, tmp
+       ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
+       msr     sp_el0, \tmp
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index da349c152791..63d484059c01 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -7,7 +7,6 @@
 #include <linux/linkage.h>
 
 #include <asm/alternative.h>
-#include <asm/asm-offsets.h>
 #include <asm/assembler.h>
 #include <asm/fpsimdmacros.h>
 #include <asm/kvm.h>
@@ -16,46 +15,8 @@
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_ptrauth.h>
 
-#define CPU_XREG_OFFSET(x)     (CPU_USER_PT_REGS + 8*x)
-#define CPU_SP_EL0_OFFSET      (CPU_XREG_OFFSET(30) + 8)
-
        .text
 
-/*
- * We treat x18 as callee-saved as the host may use it as a platform
- * register (e.g. for shadow call stack).
- */
-.macro save_callee_saved_regs ctxt
-       str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
-       stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
-       stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
-       stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
-       stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
-       stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
-       stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
-.endm
-
-.macro restore_callee_saved_regs ctxt
-       // We require \ctxt is not x18-x28
-       ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
-       ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
-       ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
-       ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
-       ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
-       ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
-       ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
-.endm
-
-.macro save_sp_el0 ctxt, tmp
-       mrs     \tmp,   sp_el0
-       str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
-.endm
-
-.macro restore_sp_el0 ctxt, tmp
-       ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
-       msr     sp_el0, \tmp
-.endm
-
 /*
  * u64 __guest_enter(struct kvm_vcpu *vcpu,
  *                  struct kvm_cpu_context *host_ctxt);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-start.S 
b/arch/arm64/kvm/hyp/nvhe/hyp-start.S
index dd955e022963..d7744dcfd184 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-start.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-start.S
@@ -13,8 +13,6 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_ptrauth.h>
 
-#define CPU_LR_OFFSET (CPU_USER_PT_REGS + (8 * 30))
-
 /*
  * Initialize ptrauth in the hyp ctxt by populating it with the keys of the
  * host, which are the keys currently installed.
-- 
2.27.0.389.gc38d7665816-goog

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to