We currently assume that an exception is delivered to EL1, always.
Once we emulate EL2, this no longer will be the case. To prepare
for this, add a target_mode parameter.

While we're at it, merge the computing of the target PC and PSTATE in
a single function that updates both PC and CPSR after saving their
previous values in the corresponding ELR/SPSR. This ensures that they
are updated in the correct order (a pretty common source of bugs...).

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/kvm/inject_fault.c | 75 ++++++++++++++++++-----------------
 1 file changed, 38 insertions(+), 37 deletions(-)

diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index d3ebf8bca4b89..3dbcbc839b9c3 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -26,28 +26,12 @@ enum exception_type {
        except_type_serror      = 0x180,
 };
 
-static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
-{
-       u64 exc_offset;
-
-       switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
-       case PSR_MODE_EL1t:
-               exc_offset = CURRENT_EL_SP_EL0_VECTOR;
-               break;
-       case PSR_MODE_EL1h:
-               exc_offset = CURRENT_EL_SP_ELx_VECTOR;
-               break;
-       case PSR_MODE_EL0t:
-               exc_offset = LOWER_EL_AArch64_VECTOR;
-               break;
-       default:
-               exc_offset = LOWER_EL_AArch32_VECTOR;
-       }
-
-       return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
-}
-
 /*
+ * This performs the exception entry at a given EL (@target_mode), stashing PC
+ * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
+ * The EL passed to this function *must* be a non-secure, privileged mode with
+ * bit 0 being set (PSTATE.SP == 1).
+ *
  * When an exception is taken, most PSTATE fields are left unchanged in the
  * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
  * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
@@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum 
exception_type type)
  * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
  * MSB to LSB.
  */
-static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
+static void enter_exception(struct kvm_vcpu *vcpu, unsigned long target_mode,
+                           enum exception_type type)
 {
-       unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
-       unsigned long old, new;
+       unsigned long sctlr, vbar, old, new, mode;
+       u64 exc_offset;
+
+       mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+       if      (mode == target_mode)
+               exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+       else if ((mode | 1) == target_mode)
+               exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+       else if (!(mode & PSR_MODE32_BIT))
+               exc_offset = LOWER_EL_AArch64_VECTOR;
+       else
+               exc_offset = LOWER_EL_AArch32_VECTOR;
+
+       switch (target_mode) {
+       case PSR_MODE_EL1h:
+               vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
+               sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+               vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
+               break;
+       default:
+               /* Don't do that */
+               BUG();
+       }
+
+       *vcpu_pc(vcpu) = vbar + exc_offset + type;
 
        old = *vcpu_cpsr(vcpu);
        new = 0;
@@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu 
*vcpu)
        new |= PSR_I_BIT;
        new |= PSR_F_BIT;
 
-       new |= PSR_MODE_EL1h;
+       new |= target_mode;
 
-       return new;
+       *vcpu_cpsr(vcpu) = new;
+       vcpu_write_spsr(vcpu, old);
 }
 
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long 
addr)
@@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool 
is_iabt, unsigned long addr
        bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
        u32 esr = 0;
 
-       vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
-       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
-       *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
-       vcpu_write_spsr(vcpu, cpsr);
+       enter_exception(vcpu, PSR_MODE_EL1h, except_type_sync);
 
        vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 
@@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool 
is_iabt, unsigned long addr
 
 static void inject_undef64(struct kvm_vcpu *vcpu)
 {
-       unsigned long cpsr = *vcpu_cpsr(vcpu);
        u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 
-       vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
-       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
-
-       *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
-       vcpu_write_spsr(vcpu, cpsr);
+       enter_exception(vcpu, PSR_MODE_EL1h, except_type_sync);
 
        /*
         * Build an unknown exception, depending on the instruction
-- 
2.26.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to