The "first run" part of the vgic init is pretty cumbersome, as
it leaks all over the place. Reduce its footprint by moving it
to an actual per-vcpu "first run" callback, and let it deal
with the resource mapping.

This allows the vgic_ready() macro to be made vgic-private,
and placed in the common vgic code instead of the architecture
backends.

Signed-off-by: Marc Zyngier <[email protected]>
---
 arch/arm64/include/asm/kvm_irq.h |  4 ++++
 arch/arm64/kvm/arm.c             | 12 +++---------
 arch/arm64/kvm/vgic/vgic-init.c  | 13 ++++++++++---
 arch/arm64/kvm/vgic/vgic-v2.c    |  5 -----
 arch/arm64/kvm/vgic/vgic-v3.c    |  4 ----
 arch/arm64/kvm/vgic/vgic.h       |  2 ++
 include/kvm/arm_vgic.h           |  2 --
 7 files changed, 19 insertions(+), 23 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_irq.h b/arch/arm64/include/asm/kvm_irq.h
index e7a244176ade..7d888f10aabe 100644
--- a/arch/arm64/include/asm/kvm_irq.h
+++ b/arch/arm64/include/asm/kvm_irq.h
@@ -25,6 +25,7 @@ struct kvm_irqchip_flow {
        void (*irqchip_vcpu_load)(struct kvm_vcpu *);
        void (*irqchip_vcpu_put)(struct kvm_vcpu *);
        int  (*irqchip_vcpu_pending_irq)(struct kvm_vcpu *);
+       int  (*irqchip_vcpu_first_run)(struct kvm_vcpu *);
 };
 
 /*
@@ -74,4 +75,7 @@ struct kvm_irqchip_flow {
 #define kvm_irqchip_vcpu_pending_irq(v)                        \
        __vcpu_irqchip_action_ret((v), vcpu_pending_irq, (v))
 
+#define kvm_irqchip_vcpu_first_run(v)                  \
+       __vcpu_irqchip_action_ret((v), vcpu_first_run, (v))
+
 #endif
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3496d200e488..0db71d2a38a4 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -503,15 +503,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
        vcpu->arch.has_run_once = true;
 
        if (likely(irqchip_in_kernel(kvm))) {
-               /*
-                * Map the VGIC hardware resources before running a vcpu the
-                * first time on this VM.
-                */
-               if (unlikely(!vgic_ready(kvm))) {
-                       ret = kvm_vgic_map_resources(kvm);
-                       if (ret)
-                               return ret;
-               }
+               ret = kvm_irqchip_vcpu_first_run(vcpu);
+               if (ret)
+                       return ret;
        } else {
                /*
                 * Tell the rest of the code that there are userspace irqchip
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 8bb847045ef9..8ec8064467a7 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -12,6 +12,7 @@
 #include <asm/kvm_mmu.h>
 #include "vgic.h"
 
+static int kvm_vgic_vcpu_first_run(struct kvm_vcpu *vcpu);
 static int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
 static void kvm_vgic_destroy(struct kvm *kvm);
 
@@ -23,6 +24,7 @@ static struct kvm_irqchip_flow vgic_irqchip_flow = {
        .irqchip_vcpu_load              = kvm_vgic_load,
        .irqchip_vcpu_put               = kvm_vgic_put,
        .irqchip_vcpu_pending_irq       = kvm_vgic_vcpu_pending_irq,
+       .irqchip_vcpu_first_run         = kvm_vgic_vcpu_first_run,
 };
 
 /*
@@ -440,14 +442,17 @@ int vgic_lazy_init(struct kvm *kvm)
  * Also map the virtual CPU interface into the VM.
  * v2/v3 derivatives call vgic_init if not already done.
  * vgic_ready() returns true if this function has succeeded.
- * @kvm: kvm struct pointer
+ * @vcpu: vcpu struct pointer
  */
-int kvm_vgic_map_resources(struct kvm *kvm)
+static int kvm_vgic_vcpu_first_run(struct kvm_vcpu *vcpu)
 {
+       struct kvm *kvm = vcpu->kvm;
+       struct vgic_dist *dist = &kvm->arch.vgic;
        int ret = 0;
 
        mutex_lock(&kvm->lock);
-       if (!irqchip_in_kernel(kvm))
+
+       if (vgic_ready(kvm))
                goto out;
 
        if (irqchip_is_gic_v2(kvm))
@@ -457,6 +462,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
 
        if (ret)
                __kvm_vgic_destroy(kvm);
+       else
+               dist->ready = true;
 
 out:
        mutex_unlock(&kvm->lock);
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index ebf53a4e1296..a6aaffd2124f 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -306,9 +306,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
        int ret = 0;
 
-       if (vgic_ready(kvm))
-               goto out;
-
        if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
            IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
                kvm_err("Need to set vgic cpu and dist addresses first\n");
@@ -348,8 +345,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
                }
        }
 
-       dist->ready = true;
-
 out:
        return ret;
 }
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index c6fdb1222453..d176ad9bab85 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -496,9 +496,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
        int ret = 0;
        int c;
 
-       if (vgic_ready(kvm))
-               goto out;
-
        kvm_for_each_vcpu(c, vcpu, kvm) {
                struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
@@ -538,7 +535,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
 
        if (kvm_vgic_global_state.has_gicv4_1)
                vgic_v4_configure_vsgis(kvm);
-       dist->ready = true;
 
 out:
        return ret;
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index c5511823eec5..48e9efda9d8b 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -98,6 +98,8 @@
 #define DEBUG_SPINLOCK_BUG_ON(p)
 #endif
 
+#define vgic_ready(k)          ((k)->arch.vgic.ready)
+
 /* Requires the irq_lock to be held by the caller. */
 static inline bool irq_is_pending(struct vgic_irq *irq)
 {
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b2adf9cca334..fad523007e2b 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -336,7 +336,6 @@ extern struct static_key_false vgic_v3_cpuif_trap;
 
 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
 int kvm_vgic_create(struct kvm *kvm, u32 type);
-int kvm_vgic_map_resources(struct kvm *kvm);
 int kvm_vgic_hyp_init(void);
 void kvm_vgic_init_cpu_hardware(void);
 
@@ -348,7 +347,6 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned 
int vintid);
 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
 
 #define vgic_initialized(k)    ((k)->arch.vgic.initialized)
-#define vgic_ready(k)          ((k)->arch.vgic.ready)
 #define vgic_valid_spi(k, i)   (((i) >= VGIC_NR_PRIVATE_IRQS) && \
                        ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
 
-- 
2.27.0

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to