From: Fuad Tabba <ta...@google.com>

Factor out logic that resets a vcpu's core registers, including
additional PSCI handling. This code will be reused when resetting
VMs in protected mode.

Signed-off-by: Fuad Tabba <ta...@google.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 41 +++++++++++++++++++++++++
 arch/arm64/kvm/reset.c               | 45 +++++-----------------------
 2 files changed, 48 insertions(+), 38 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 82515b015eb4..2a79c861b8e0 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -522,4 +522,45 @@ static inline unsigned long psci_affinity_mask(unsigned 
long affinity_level)
        return 0;
 }
 
+/* Reset a vcpu's core registers. */
+static inline void kvm_reset_vcpu_core(struct kvm_vcpu *vcpu)
+{
+       u32 pstate;
+
+       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+               pstate = VCPU_RESET_PSTATE_SVC;
+       } else {
+               pstate = VCPU_RESET_PSTATE_EL1;
+       }
+
+       /* Reset core registers */
+       memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
+       memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
+       vcpu->arch.ctxt.spsr_abt = 0;
+       vcpu->arch.ctxt.spsr_und = 0;
+       vcpu->arch.ctxt.spsr_irq = 0;
+       vcpu->arch.ctxt.spsr_fiq = 0;
+       vcpu_gp_regs(vcpu)->pstate = pstate;
+}
+
+/* PSCI reset handling for a vcpu. */
+static inline void kvm_reset_vcpu_psci(struct kvm_vcpu *vcpu,
+                                      struct vcpu_reset_state *reset_state)
+{
+       unsigned long target_pc = reset_state->pc;
+
+       /* Gracefully handle Thumb2 entry point */
+       if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+               target_pc &= ~1UL;
+               vcpu_set_thumb(vcpu);
+       }
+
+       /* Propagate caller endianness */
+       if (reset_state->be)
+               kvm_vcpu_set_be(vcpu);
+
+       *vcpu_pc(vcpu) = target_pc;
+       vcpu_set_reg(vcpu, 0, reset_state->r0);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 6bc979aece3c..4d223fae996d 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -109,7 +109,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
                kfree(buf);
                return ret;
        }
-       
+
        vcpu->arch.sve_state = buf;
        vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
        return 0;
@@ -202,7 +202,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        struct vcpu_reset_state reset_state;
        int ret;
        bool loaded;
-       u32 pstate;
 
        mutex_lock(&vcpu->kvm->lock);
        reset_state = vcpu->arch.reset_state;
@@ -240,29 +239,13 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                goto out;
        }
 
-       switch (vcpu->arch.target) {
-       default:
-               if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
-                       pstate = VCPU_RESET_PSTATE_SVC;
-               } else {
-                       pstate = VCPU_RESET_PSTATE_EL1;
-               }
-
-               if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-               break;
+       if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
+               ret = -EINVAL;
+               goto out;
        }
 
        /* Reset core registers */
-       memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
-       memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
-       vcpu->arch.ctxt.spsr_abt = 0;
-       vcpu->arch.ctxt.spsr_und = 0;
-       vcpu->arch.ctxt.spsr_irq = 0;
-       vcpu->arch.ctxt.spsr_fiq = 0;
-       vcpu_gp_regs(vcpu)->pstate = pstate;
+       kvm_reset_vcpu_core(vcpu);
 
        /* Reset system registers */
        kvm_reset_sys_regs(vcpu);
@@ -271,22 +254,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
         * Additional reset state handling that PSCI may have imposed on us.
         * Must be done after all the sys_reg reset.
         */
-       if (reset_state.reset) {
-               unsigned long target_pc = reset_state.pc;
-
-               /* Gracefully handle Thumb2 entry point */
-               if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
-                       target_pc &= ~1UL;
-                       vcpu_set_thumb(vcpu);
-               }
-
-               /* Propagate caller endianness */
-               if (reset_state.be)
-                       kvm_vcpu_set_be(vcpu);
-
-               *vcpu_pc(vcpu) = target_pc;
-               vcpu_set_reg(vcpu, 0, reset_state.r0);
-       }
+       if (reset_state.reset)
+               kvm_reset_vcpu_psci(vcpu, &reset_state);
 
        /* Reset timer */
        ret = kvm_timer_vcpu_reset(vcpu);
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to