Signed-off-by: Andre Przywara <[email protected]>
---
 virt/kvm/arm/vgic/vgic_mmio.c | 81 +++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 79 insertions(+), 2 deletions(-)

diff --git a/virt/kvm/arm/vgic/vgic_mmio.c b/virt/kvm/arm/vgic/vgic_mmio.c
index e62366e..0688a69 100644
--- a/virt/kvm/arm/vgic/vgic_mmio.c
+++ b/virt/kvm/arm/vgic/vgic_mmio.c
@@ -129,15 +129,92 @@ static int vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+/*
+ * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
+ * of the enabled bit, so there is only one function for both here.
+ */
+static int vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
+                                struct kvm_io_device *this,
+                                gpa_t addr, int len, void *val)
+{
+       struct vgic_io_device *iodev = container_of(this,
+                                                   struct vgic_io_device, dev);
+       u32 intid = (addr - iodev->base_addr) * 8;
+       u32 value = 0;
+       int i;
+
+       if (iodev->redist_vcpu)
+               vcpu = iodev->redist_vcpu;
+
+       /* Loop over all IRQs affected by this read */
+       for (i = 0; i < len * 8; i++) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               if (irq->enabled)
+                       value |= (1U << i);
+       }
+
+       write_mask32(value, addr & 3, len, val);
+       return 0;
+}
+
+static int vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
+                                  struct kvm_io_device *this,
+                                  gpa_t addr, int len, const void *val)
+{
+       struct vgic_io_device *iodev = container_of(this,
+                                                   struct vgic_io_device, dev);
+       u32 intid = (addr - iodev->base_addr) * 8;
+       int i;
+
+       if (iodev->redist_vcpu)
+               vcpu = iodev->redist_vcpu;
+
+       for_each_set_bit(i, val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               spin_lock(&irq->irq_lock);
+               irq->enabled = true;
+               vgic_queue_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+static int vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
+                                  struct kvm_io_device *this,
+                                  gpa_t addr, int len, const void *val)
+{
+       struct vgic_io_device *iodev = container_of(this,
+                                                   struct vgic_io_device, dev);
+       u32 intid = (addr - iodev->base_addr) * 8;
+       int i;
+
+       if (iodev->redist_vcpu)
+               vcpu = iodev->redist_vcpu;
+
+       for_each_set_bit(i, val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               spin_lock(&irq->irq_lock);
+
+               irq->enabled = false;
+               /* TODO: Does the exit/entry code take care of "unqueuing"? */
+
+               spin_unlock(&irq->irq_lock);
+       }
+       return 0;
+}
+
 struct vgic_register_region vgic_v2_dist_registers[] = {
        REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
                vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
                vgic_mmio_read_raz, vgic_mmio_write_wi, 1),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
-               vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1),
+               vgic_mmio_read_enable, vgic_mmio_write_senable, 1),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
-               vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1),
+               vgic_mmio_read_enable, vgic_mmio_write_cenable, 1),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
                vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
-- 
2.7.3

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to