GICv4 needs to know which VCPU is currently scheduled to be able to
deliver VLPIs. Implement switching of VPEs on VCPU context switch by
extending the existing save/restore mechanism used for GICv2 and GICv3.

Scheduling a VPE is done by setting up the VPENDBASER and VPROPBASER
registers to the appropriate tables for the currently running VCPU. When
scheduling out, preserve the IDAI and PendingLast bits from VPENDBASER.

Signed-off-by: Mykyta Poturai <[email protected]>
---
 xen/arch/arm/gic-v2.c                  |   2 +-
 xen/arch/arm/gic-v3.c                  |   9 +-
 xen/arch/arm/gic-v4-its.c              | 138 +++++++++++++++++++++++++
 xen/arch/arm/gic-vgic.c                |   6 ++
 xen/arch/arm/include/asm/gic.h         |   2 +-
 xen/arch/arm/include/asm/gic_v3_defs.h |   6 ++
 xen/arch/arm/include/asm/gic_v4_its.h  |   9 ++
 xen/arch/arm/include/asm/vgic.h        |   2 +
 8 files changed, 170 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index 0cd41eac12..c16fa5d67e 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -181,7 +181,7 @@ static void gicv2_save_state(struct vcpu *v)
     writel_gich(0, GICH_HCR);
 }
 
-static void gicv2_restore_state(const struct vcpu *v)
+static void gicv2_restore_state(struct vcpu *v)
 {
     int i;
 
diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
index d4af332b0e..07736179db 100644
--- a/xen/arch/arm/gic-v3.c
+++ b/xen/arch/arm/gic-v3.c
@@ -133,7 +133,7 @@ bool gic_is_gicv4(void)
 #endif
 
 /* per-cpu re-distributor base */
-static DEFINE_PER_CPU(void __iomem*, rbase);
+DEFINE_PER_CPU(void __iomem*, rbase);
 
 #define GICD                   (gicv3.map_dbase)
 #define GICD_RDIST_BASE        (this_cpu(rbase))
@@ -475,13 +475,15 @@ static void gicv3_save_state(struct vcpu *v)
      * are now visible to the system register interface
      */
     dsb(sy);
+    if ( gic_is_gicv4() )
+        vgic_v4_put(v, false);
     gicv3_save_lrs(v);
     save_aprn_regs(&v->arch.gic);
     v->arch.gic.v3.vmcr = READ_SYSREG(ICH_VMCR_EL2);
     v->arch.gic.v3.sre_el1 = READ_SYSREG(ICC_SRE_EL1);
 }
 
-static void gicv3_restore_state(const struct vcpu *v)
+static void gicv3_restore_state(struct vcpu *v)
 {
     register_t val;
 
@@ -510,6 +512,9 @@ static void gicv3_restore_state(const struct vcpu *v)
     restore_aprn_regs(&v->arch.gic);
     gicv3_restore_lrs(v);
 
+    if ( gic_is_gicv4() )
+        vgic_v4_load(v);
+
     /*
      * Make sure all stores are visible the GIC
      */
diff --git a/xen/arch/arm/gic-v4-its.c b/xen/arch/arm/gic-v4-its.c
index fac3b44a94..6a550a65b2 100644
--- a/xen/arch/arm/gic-v4-its.c
+++ b/xen/arch/arm/gic-v4-its.c
@@ -18,6 +18,7 @@
  * GNU General Public License for more details.
  */
 
+#include <xen/delay.h>
 #include <xen/errno.h>
 #include <xen/sched.h>
 #include <xen/spinlock.h>
@@ -44,6 +45,21 @@ void __init gicv4_its_vpeid_allocator_init(void)
         panic("Could not allocate VPEID bitmap space\n");
 }
 
+static void __iomem *gic_data_rdist_vlpi_base(unsigned int cpu)
+{
+    /*
+     * Each Redistributor defines two 64KB frames in the physical address map.
+     * In GICv4, there are two additional 64KB frames.
+     * The frames for each Redistributor must be contiguous and must be
+     * ordered as follows:
+     * 1. RD_base
+     * 2. SGI_base
+     * 3. VLPI_base
+     * 4. Reserved
+     */
+    return GICD_RDIST_BASE_CPU(cpu) + SZ_128K;
+}
+
 static int __init its_alloc_vpeid(struct its_vpe *vpe)
 {
     int id;
@@ -571,3 +587,125 @@ int its_send_cmd_vinv(struct host_its *its, struct 
its_device *dev,
 
     return gicv3_its_wait_commands(its);
 }
+
+static uint64_t read_vpend_dirty_clean(void __iomem *vlpi_base,
+                                       unsigned int count)
+{
+    uint64_t val;
+    bool clean;
+
+    do {
+        val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+        /* Poll GICR_VPENDBASER.Dirty until it reads 0. */
+        clean = !(val & GICR_VPENDBASER_Dirty);
+        if ( !clean )
+        {
+            count--;
+            cpu_relax();
+            udelay(1);
+        }
+    } while ( !clean && count );
+
+    if ( !clean )
+    {
+        printk(XENLOG_WARNING "ITS virtual pending table not totally 
parsed\n");
+        val |= GICR_VPENDBASER_PendingLast;
+    }
+
+    return val;
+}
+
+/*
+ * When a vPE is made resident, the GIC starts parsing the virtual pending
+ * table to deliver pending interrupts. This takes place asynchronously,
+ * and can at times take a long while.
+ */
+static void its_wait_vpt_parse_complete(void __iomem *vlpi_base)
+{
+    if ( !gic_support_vptValidDirty() )
+        return;
+
+    read_vpend_dirty_clean(vlpi_base, 500);
+}
+
+static uint64_t its_clear_vpend_valid(void __iomem *vlpi_base, uint64_t clr,
+                                      uint64_t set)
+{
+    unsigned int count = 1000000;    /* 1s! */
+    uint64_t val;
+
+    /*
+     * Clearing the Valid bit informs the Redistributor that a context
+     * switch is taking place.
+     */
+    val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+    val &= ~GICR_VPENDBASER_Valid;
+    val &= ~clr;
+    val |= set;
+    gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+    return read_vpend_dirty_clean(vlpi_base, count);
+}
+
+static void its_make_vpe_resident(struct its_vpe *vpe, unsigned int cpu)
+{
+    void __iomem *vlpi_base = gic_data_rdist_vlpi_base(cpu);
+    uint64_t val;
+
+    /* Switch in this VM's virtual property table. */
+    val  = virt_to_maddr(vpe->its_vm->vproptable) & GENMASK(51, 12);
+    val |= gicv3_its_get_cacheability() << 
GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT;
+    val |= gicv3_its_get_shareability() << GICR_VPROPBASER_SHAREABILITY_SHIFT;
+    val |= GIC_BASER_CACHE_SameAsInner << 
GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT;
+    val |= (HOST_LPIS_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+    gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+    /* Switch in this VCPU's VPT. */
+    val  = virt_to_maddr(vpe->vpendtable) & GENMASK(51, 16);
+    val |= gicv3_its_get_cacheability() << 
GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT;
+    val |= gicv3_its_get_shareability() << GICR_VPENDBASER_SHAREABILITY_SHIFT;
+    val |= GIC_BASER_CACHE_SameAsInner << 
GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT;
+    /*
+     * When the GICR_VPENDBASER.Valid bit is written from 0 to 1,
+     * this bit is RES1.
+     */
+    val |= GICR_VPENDBASER_PendingLast;
+    val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
+    val |= GICR_VPENDBASER_Valid;
+    gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+    its_wait_vpt_parse_complete(vlpi_base);
+}
+
+static void its_make_vpe_non_resident(struct its_vpe *vpe, unsigned int cpu)
+{
+    void __iomem *vlpi_base = gic_data_rdist_vlpi_base(cpu);
+    uint64_t val;
+
+    val = its_clear_vpend_valid(vlpi_base, 0, 0);
+    vpe->idai = val & GICR_VPENDBASER_IDAI;
+    vpe->pending_last = val & GICR_VPENDBASER_PendingLast;
+}
+
+void vgic_v4_load(struct vcpu *vcpu)
+{
+    struct its_vpe *vpe = vcpu->arch.vgic.its_vpe;
+
+
+    if ( vpe->resident )
+        return;
+
+    its_make_vpe_resident(vpe, vcpu->processor);
+    vpe->resident = true;
+}
+
+void vgic_v4_put(struct vcpu *vcpu, bool need_db)
+{
+    struct its_vpe *vpe = vcpu->arch.vgic.its_vpe;
+
+    if ( !vpe->resident )
+        return;
+
+    its_make_vpe_non_resident(vpe, vcpu->processor);
+    vpe->resident = false;
+}
diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
index ea48c5375a..44db142dbd 100644
--- a/xen/arch/arm/gic-vgic.c
+++ b/xen/arch/arm/gic-vgic.c
@@ -377,6 +377,12 @@ int vgic_vcpu_pending_irq(struct vcpu *v)
         }
     }
 
+#ifdef CONFIG_GICV4
+    if ( gic_is_gicv4() )
+        if ( v->arch.vgic.its_vpe->pending_last )
+            rc = 1;
+#endif
+
 out:
     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
     return rc;
diff --git a/xen/arch/arm/include/asm/gic.h b/xen/arch/arm/include/asm/gic.h
index afb1cc3751..04a20bdca5 100644
--- a/xen/arch/arm/include/asm/gic.h
+++ b/xen/arch/arm/include/asm/gic.h
@@ -362,7 +362,7 @@ struct gic_hw_operations {
     /* Save GIC registers */
     void (*save_state)(struct vcpu *v);
     /* Restore GIC registers */
-    void (*restore_state)(const struct vcpu *v);
+    void (*restore_state)(struct vcpu *v);
     /* Dump GIC LR register information */
     void (*dump_state)(const struct vcpu *v);
 
diff --git a/xen/arch/arm/include/asm/gic_v3_defs.h 
b/xen/arch/arm/include/asm/gic_v3_defs.h
index 3a7d18ef59..0db75309cf 100644
--- a/xen/arch/arm/include/asm/gic_v3_defs.h
+++ b/xen/arch/arm/include/asm/gic_v3_defs.h
@@ -257,6 +257,12 @@ struct rdist_region {
     bool single_rdist;
 };
 
+/* per-cpu re-distributor base */
+DECLARE_PER_CPU(void __iomem*, rbase);
+
+#define GICD_RDIST_BASE             (this_cpu(rbase))
+#define GICD_RDIST_BASE_CPU(cpu)    (per_cpu(rbase, cpu))
+
 #endif /* __ASM_ARM_GIC_V3_DEFS_H__ */
 
 /*
diff --git a/xen/arch/arm/include/asm/gic_v4_its.h 
b/xen/arch/arm/include/asm/gic_v4_its.h
index ba81b25bde..37b6b92f0c 100644
--- a/xen/arch/arm/include/asm/gic_v4_its.h
+++ b/xen/arch/arm/include/asm/gic_v4_its.h
@@ -56,6 +56,15 @@ void gicv4_its_vpeid_allocator_init(void);
 #define GICR_VPROPBASER                              0x0070
 #define GICR_VPENDBASER                              0x0078
 
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT         56
+#define GICR_VPROPBASER_SHAREABILITY_SHIFT               10
+#define GICR_VPROPBASER_SHAREABILITY_MASK                \
+        (3UL << GICR_VPROPBASER_SHAREABILITY_SHIFT)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT          7
+#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK           \
+        (7UL << GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT)
+#define GICR_VPROPBASER_IDBITS_MASK                    0x1f
+
 #define GICR_VPENDBASER_Dirty                   (1UL << 60)
 #define GICR_VPENDBASER_PendingLast             (1UL << 61)
 #define GICR_VPENDBASER_IDAI                    (1UL << 62)
diff --git a/xen/arch/arm/include/asm/vgic.h b/xen/arch/arm/include/asm/vgic.h
index 580310fec4..9ef667decb 100644
--- a/xen/arch/arm/include/asm/vgic.h
+++ b/xen/arch/arm/include/asm/vgic.h
@@ -417,6 +417,8 @@ bool gic_is_gicv4(void);
 int vgic_v4_its_vm_init(struct domain *d);
 void vgic_v4_free_its_vm(struct domain *d);
 int vgic_v4_its_vpe_init(struct vcpu *vcpu);
+void vgic_v4_load(struct vcpu *vcpu);
+void vgic_v4_put(struct vcpu *vcpu, bool need_db);
 #endif /* !CONFIG_NEW_VGIC */
 
 /*** Common VGIC functions used by Xen arch code ****/
-- 
2.51.2

Reply via email to