This supports SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall.
They are used by guest to notify the completion of event in its
handler. The previously interrupted or preempted context is restored
like below.

   * x0 - x17, PC and PState are restored to what values we had in
     the interrupted or preempted context.

   * If it's SDEI_EVENT_COMPLETE_AND_RESUME hypercall, IRQ exception
     is injected.

Signed-off-by: Gavin Shan <[email protected]>
---
 arch/arm64/include/asm/kvm_emulate.h |  1 +
 arch/arm64/include/asm/kvm_host.h    |  1 +
 arch/arm64/kvm/hyp/exception.c       |  7 +++
 arch/arm64/kvm/inject_fault.c        | 29 ++++++++++
 arch/arm64/kvm/sdei.c                | 79 ++++++++++++++++++++++++++++
 5 files changed, 117 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index d62405ce3e6d..ca9de9f24923 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -37,6 +37,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
 
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_irq(struct kvm_vcpu *vcpu);
 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 951264d4b64d..ac475d3b9151 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -431,6 +431,7 @@ struct kvm_vcpu_arch {
 #define KVM_ARM64_EXCEPT_AA32_UND      (0 << 9)
 #define KVM_ARM64_EXCEPT_AA32_IABT     (1 << 9)
 #define KVM_ARM64_EXCEPT_AA32_DABT     (2 << 9)
+#define KVM_ARM64_EXCEPT_AA32_IRQ      (3 << 9)
 /* For AArch64: */
 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ  (1 << 9)
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index c5d009715402..f425ea11e4f6 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -313,6 +313,9 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
                case KVM_ARM64_EXCEPT_AA32_DABT:
                        enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
                        break;
+               case KVM_ARM64_EXCEPT_AA32_IRQ:
+                       enter_exception32(vcpu, PSR_AA32_MODE_IRQ, 24);
+                       break;
                default:
                        /* Err... */
                        break;
@@ -323,6 +326,10 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
                      KVM_ARM64_EXCEPT_AA64_EL1):
                        enter_exception64(vcpu, PSR_MODE_EL1h, 
except_type_sync);
                        break;
+               case (KVM_ARM64_EXCEPT_AA64_ELx_IRQ |
+                     KVM_ARM64_EXCEPT_AA64_EL1):
+                       enter_exception64(vcpu, PSR_MODE_EL1h, except_type_irq);
+                       break;
                default:
                        /*
                         * Only EL1_SYNC makes sense so far, EL2_{SYNC,IRQ}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index b47df73e98d7..c8a8791bdf28 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -66,6 +66,13 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
        vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
 }
 
+static void inject_irq64(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1     |
+                            KVM_ARM64_EXCEPT_AA64_ELx_IRQ |
+                            KVM_ARM64_PENDING_EXCEPTION);
+}
+
 #define DFSR_FSC_EXTABT_LPAE   0x10
 #define DFSR_FSC_EXTABT_nLPAE  0x08
 #define DFSR_LPAE              BIT(9)
@@ -77,6 +84,12 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
                             KVM_ARM64_PENDING_EXCEPTION);
 }
 
+static void inject_irq32(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IRQ |
+                            KVM_ARM64_PENDING_EXCEPTION);
+}
+
 /*
  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  * pseudocode.
@@ -160,6 +173,22 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
                inject_undef64(vcpu);
 }
 
+/**
+ * kvm_inject_irq - inject an IRQ into the guest
+ * @vcpu: The vCPU in which to inject IRQ
+ *
+ * Inject IRQs to the target vCPU. It is assumed that this code is
+ * called from the VCPU thread and that the VCPU therefore is not
+ * currently executing guest code.
+ */
+void kvm_inject_irq(struct kvm_vcpu *vcpu)
+{
+       if (vcpu_el1_is_32bit(vcpu))
+               inject_irq32(vcpu);
+       else
+               inject_irq64(vcpu);
+}
+
 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
 {
        vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 9d18fee59751..ebdbe7810cf0 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -243,6 +243,79 @@ static unsigned long hypercall_context(struct kvm_vcpu 
*vcpu)
        return ret;
 }
 
+static unsigned long hypercall_complete(struct kvm_vcpu *vcpu, bool resume)
+{
+       struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+       struct kvm_sdei_exposed_event *exposed_event;
+       struct kvm_sdei_event *event;
+       struct kvm_sdei_vcpu_context *context;
+       unsigned int i;
+       unsigned long ret = SDEI_SUCCESS;
+
+       spin_lock(&vsdei->lock);
+
+       /* Check if there is any event being handled */
+       context = &vsdei->context[SDEI_EVENT_PRIORITY_CRITICAL];
+       context = context->event ? context : NULL;
+       context = context ? : &vsdei->context[SDEI_EVENT_PRIORITY_NORMAL];
+       context = context->event ? context : NULL;
+       if (!context) {
+               ret = SDEI_DENIED;
+               goto unlock;
+       }
+
+       /* Restore registers: x0 -> x17, PC, PState */
+       for (i = 0; i < ARRAY_SIZE(context->regs); i++)
+               vcpu_set_reg(vcpu, i, context->regs[i]);
+
+       *vcpu_cpsr(vcpu) = context->pstate;
+       *vcpu_pc(vcpu) = context->pc;
+
+       /* Inject interrupt if needed */
+       if (resume)
+               kvm_inject_irq(vcpu);
+
+       /*
+        * Decrease the event count and invalidate the event in the
+        * vcpu context.
+        */
+       event = context->event;
+       exposed_event = event->exposed_event;
+       context->event = NULL;
+       event->event_count--;
+       if (kvm_sdei_is_critical(exposed_event->priority))
+               vsdei->critical_event_count--;
+       else
+               vsdei->normal_event_count--;
+
+       /*
+        * We need to check if the event is pending for unregistration.
+        * In that case, the event should be disabled and unregistered.
+        * All the pending events are cancelled either.
+        */
+       if (kvm_sdei_is_unregister_pending(event)) {
+               if (kvm_sdei_is_critical(exposed_event->priority))
+                       vsdei->critical_event_count -= event->event_count;
+               else
+                       vsdei->normal_event_count -= event->event_count;
+
+               event->event_count = 0;
+               kvm_sdei_clear_enabled(event);
+               kvm_sdei_clear_registered(event);
+               kvm_sdei_clear_unregister_pending(event);
+       }
+
+       /* Another request if we have more events to be handled */
+       if (vsdei->critical_event_count > 0 ||
+           vsdei->normal_event_count > 0)
+               kvm_make_request(KVM_REQ_SDEI, vcpu);
+
+unlock:
+       spin_unlock(&vsdei->lock);
+
+       return ret;
+}
+
 static unsigned long hypercall_unregister(struct kvm_vcpu *vcpu)
 {
        struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
@@ -445,6 +518,12 @@ int kvm_sdei_call(struct kvm_vcpu *vcpu)
        case SDEI_1_0_FN_SDEI_EVENT_CONTEXT:
                ret = hypercall_context(vcpu);
                break;
+       case SDEI_1_0_FN_SDEI_EVENT_COMPLETE:
+               ret = hypercall_complete(vcpu, false);
+               break;
+       case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME:
+               ret = hypercall_complete(vcpu, true);
+               break;
        case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER:
                ret = hypercall_unregister(vcpu);
                break;
-- 
2.23.0

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to