If the guest context is loaded when a panic is triggered, restore the
hyp context so e.g. the shadow call stack works when hyp_panic() is
called and SP_EL0 is valid when the host's panic() is called.

Use the hyp context's __hyp_running_vcpu field to track when hyp
transitions to and from the guest vcpu so the exception handlers know
whether the context needs to be restored.

Signed-off-by: Andrew Scull <[email protected]>
---
 arch/arm64/include/asm/kvm_asm.h | 10 ++++++++++
 arch/arm64/kvm/hyp/entry.S       | 24 ++++++++++++++++++++++++
 arch/arm64/kvm/hyp/hyp-entry.S   | 12 +++++-------
 arch/arm64/kvm/hyp/nvhe/host.S   |  5 +++++
 4 files changed, 44 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 81f29a2c361a..106279a6d027 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -198,6 +198,16 @@ extern char 
__smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
        ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 .endm
 
+.macro get_loaded_vcpu vcpu, ctxt
+       hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
+       ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+.endm
+
+.macro set_loaded_vcpu vcpu, ctxt, tmp
+       hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
+       str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index e95cd7f2ff86..872ac89ffc1e 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -86,6 +86,8 @@ alternative_else_nop_endif
        ret
 
 1:
+       set_loaded_vcpu x0, x1, x2
+
        add     x29, x0, #VCPU_CONTEXT
 
        // Macro ptrauth_switch_to_guest format:
@@ -116,6 +118,26 @@ alternative_else_nop_endif
        eret
        sb
 
+SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
+       // x2-x29,lr: vcpu regs
+       // vcpu x0-x1 on the stack
+
+       // If the hyp context is loaded, go straight to hyp_panic
+       get_loaded_vcpu x0, x1
+       cbz     x0, hyp_panic
+
+       // The hyp context is saved so make sure it is restored to allow
+       // hyp_panic to run at hyp and, subsequently, panic to run in the host.
+       // This makes use of __guest_exit to avoid duplication but sets the
+       // return address to tail call into hyp_panic. As a side effect, the
+       // current state is saved to the guest context but it will only be
+       // accurate if the guest had been completely restored.
+       hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1
+       adr     x1, hyp_panic
+       str     x1, [x0, #CPU_XREG_OFFSET(30)]
+
+       get_vcpu_ptr    x1, x0
+
 SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // x0: return code
        // x1: vcpu
@@ -163,6 +185,8 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // Now restore the hyp regs
        restore_callee_saved_regs x2
 
+       set_loaded_vcpu xzr, x1, x2
+
 alternative_if ARM64_HAS_RAS_EXTN
        // If we have the RAS extensions we can consume a pending error
        // without an unmask-SError and isb. The ESB-instruction consumed any
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 6e14873680a8..b7a1ea221f6e 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -96,7 +96,7 @@ el2_sync:
 
        /* if this was something else, then panic! */
        tst     x0, #PSR_IL_BIT
-       b.eq    hyp_panic
+       b.eq    __guest_exit_panic
 
        /* Let's attempt a recovery from the illegal exception return */
        get_vcpu_ptr    x1, x0
@@ -105,8 +105,6 @@ el2_sync:
 
 
 el2_error:
-       ldp     x0, x1, [sp], #16
-
        /*
         * Only two possibilities:
         * 1) Either we come from the exit path, having just unmasked
@@ -124,12 +122,13 @@ el2_error:
        cmp     x0, x1
        adr     x1, abort_guest_exit_end
        ccmp    x0, x1, #4, ne
-       b.ne    hyp_panic
+       b.ne    __guest_exit_panic
+       ldp     x0, x1, [sp], #16
        mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
        eret
        sb
 
-.macro invalid_vector  label, target = hyp_panic
+.macro invalid_vector  label, target = __guest_exit_panic
        .align  2
 SYM_CODE_START(\label)
        b \target
@@ -170,10 +169,9 @@ check_preamble_length 661b, 662b
 .macro invalid_vect target
        .align 7
 661:
-       b       \target
        nop
+       stp     x0, x1, [sp, #-16]!
 662:
-       ldp     x0, x1, [sp], #16
        b       \target
 
 check_preamble_length 661b, 662b
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 5a7380c342c8..d4e8b8084020 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -72,6 +72,11 @@ SYM_FUNC_END(__hyp_do_panic)
 
 .macro invalid_host_vect
        .align 7
+       /* If a guest is loaded, panic out of it. */
+       stp     x0, x1, [sp, #-16]!
+       get_loaded_vcpu x0, x1
+       cbnz    x0, __guest_exit_panic
+       add     sp, sp, #16
        b       hyp_panic
 .endm
 
-- 
2.28.0.220.ged08abb693-goog

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to