Pending virtual interrupts are saved in the saving phase and restoration
enables or disables traps depending on whether or not the host is being
restored.

Signed-off-by: Andrew Scull <[email protected]>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h |  2 +-
 arch/arm64/kvm/hyp/nvhe/switch.c        | 28 +++++++++++--------------
 arch/arm64/kvm/hyp/vhe/switch.c         |  2 +-
 3 files changed, 14 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h 
b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 84fd6b0601b2..65a29d029c53 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -113,7 +113,7 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu)
                write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
 }
 
-static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
+static inline void __save_traps(struct kvm_vcpu *vcpu)
 {
        /*
         * If we pended a virtual abort, preserve it until it gets
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 260c5cbb6717..0f7670dabf50 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -45,10 +45,8 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
        write_sysreg(val, cptr_el2);
 }
 
-static void __deactivate_traps(struct kvm_vcpu *host_vcpu, struct kvm_vcpu 
*vcpu)
+static void __deactivate_traps(struct kvm_vcpu *host_vcpu)
 {
-       ___deactivate_traps(vcpu);
-
        __deactivate_traps_common();
 
        write_sysreg(host_vcpu->arch.mdcr_el2, mdcr_el2);
@@ -56,6 +54,14 @@ static void __deactivate_traps(struct kvm_vcpu *host_vcpu, 
struct kvm_vcpu *vcpu
        write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 
+static void __restore_traps(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.ctxt.is_host)
+               __deactivate_traps(vcpu);
+       else
+               __activate_traps(vcpu);
+}
+
 static void __restore_stage2(struct kvm_vcpu *vcpu)
 {
        if (vcpu->arch.hcr_el2 & HCR_VM)
@@ -134,6 +140,7 @@ static void __vcpu_save_state(struct kvm_vcpu *vcpu, bool 
save_debug)
 
        __fpsimd_save_fpexc32(vcpu);
 
+       __save_traps(vcpu);
        __debug_save_spe(vcpu);
 
        if (save_debug)
@@ -143,14 +150,6 @@ static void __vcpu_save_state(struct kvm_vcpu *vcpu, bool 
save_debug)
 
 static void __vcpu_restore_state(struct kvm_vcpu *vcpu, bool restore_debug)
 {
-       struct kvm_vcpu *running_vcpu;
-
-       /*
-        * Restoration is not yet pure so it still makes use of the previously
-        * running vcpu.
-        */
-       running_vcpu = __hyp_this_cpu_read(kvm_hyp_running_vcpu);
-
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                u64 val;
 
@@ -179,10 +178,7 @@ static void __vcpu_restore_state(struct kvm_vcpu *vcpu, 
bool restore_debug)
        __sysreg32_restore_state(vcpu);
        __sysreg_restore_state_nvhe(&vcpu->arch.ctxt);
 
-       if (vcpu->arch.ctxt.is_host)
-               __deactivate_traps(vcpu, running_vcpu);
-       else
-               __activate_traps(vcpu);
+       __restore_traps(vcpu);
 
        __hyp_vgic_restore_state(vcpu);
        __timer_restore_traps(vcpu);
@@ -260,7 +256,7 @@ void __noreturn hyp_panic(void)
 
        if (vcpu != host_vcpu) {
                __timer_restore_traps(host_vcpu);
-               __deactivate_traps(host_vcpu, vcpu);
+               __restore_traps(host_vcpu);
                __restore_stage2(host_vcpu);
                __sysreg_restore_state_nvhe(&host_vcpu->arch.ctxt);
        }
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index bc372629e1c1..bc5939581f61 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -67,7 +67,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
 {
        extern char vectors[];  /* kernel exception vectors */
 
-       ___deactivate_traps(vcpu);
+       __save_traps(vcpu);
 
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
 
-- 
2.27.0.389.gc38d7665816-goog

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to