Add a new flag to vgic_init_pending_irq() so that we can record whether a virtual interrupt is tied to a hardware one. The ITS code sets the flag when mapping an LPI on GICv4 systems and the cleanup path skips removing such IRQs from the virtual queues to avoid interfering with hardware-backed injections.
Signed-off-by: Mykyta Poturai <[email protected]> --- xen/arch/arm/include/asm/vgic.h | 15 ++++++++++++++- xen/arch/arm/vgic-v3-its.c | 4 ++-- xen/arch/arm/vgic.c | 10 ++++++---- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/include/asm/vgic.h b/xen/arch/arm/include/asm/vgic.h index a874a02d70..77323b2584 100644 --- a/xen/arch/arm/include/asm/vgic.h +++ b/xen/arch/arm/include/asm/vgic.h @@ -94,11 +94,23 @@ struct pending_irq * TODO: when implementing irq migration, taking only the current * vgic lock is not going to be enough. */ struct list_head lr_queue; + bool hw; /* Tied to HW IRQ */ }; #define NR_INTERRUPT_PER_RANK 32 #define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1) +#ifdef CONFIG_GICV4 +static inline bool pirq_is_tied_to_hw(struct pending_irq *pirq) +{ + ASSERT(pirq); + return pirq->hw; +} + +#else +#define pirq_is_tied_to_hw(pirq) ((void)pirq, false) +#endif + /* Represents state corresponding to a block of 32 interrupts */ struct vgic_irq_rank { spinlock_t lock; /* Covers access to all other members of this struct */ @@ -360,7 +372,8 @@ static inline paddr_t vgic_dist_base(const struct vgic_dist *vgic) extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq); extern void vgic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p); extern void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p); -extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq); +extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq, + bool hw); extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq); extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq); extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c index bc738614bb..576e7fd4b0 100644 --- a/xen/arch/arm/vgic-v3-its.c +++ b/xen/arch/arm/vgic-v3-its.c @@ -610,7 +610,7 @@ static int its_discard_event(struct virt_its *its, /* Cleanup the pending_irq and disconnect it from the LPI. */ vgic_remove_irq_from_queues(vcpu, p); - vgic_init_pending_irq(p, INVALID_LPI); + vgic_init_pending_irq(p, INVALID_LPI, false); spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags); @@ -749,7 +749,7 @@ static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr) if ( !pirq ) goto out_remove_mapping; - vgic_init_pending_irq(pirq, intid); + vgic_init_pending_irq(pirq, intid, gic_is_gicv4()); /* * Now read the guest's property table to initialize our cached state. diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 6647071ad4..0da8c1a425 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -112,7 +112,7 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq) return vgic_get_rank(v, rank); } -void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq) +void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq, bool hw) { /* The lpi_vcpu_id field must be big enough to hold a VCPU ID. */ BUILD_BUG_ON(BIT(sizeof(p->lpi_vcpu_id) * 8, UL) < MAX_VIRT_CPUS); @@ -122,6 +122,8 @@ void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq) INIT_LIST_HEAD(&p->lr_queue); p->irq = virq; p->lpi_vcpu_id = INVALID_VCPU_ID; + /* Whether virtual irq is tied to a HW one. */ + p->hw = hw; } static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index, @@ -202,7 +204,7 @@ static int init_vgic_espi(struct domain *d) for ( i = d->arch.vgic.nr_spis, idx = 0; i < vgic_num_spi_lines(d); i++, idx++ ) vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], - espi_idx_to_intid(idx)); + espi_idx_to_intid(idx), false); for ( i = 0; i < DOMAIN_NR_EXT_RANKS(d); i++ ) vgic_rank_init(&d->arch.vgic.ext_shared_irqs[i], @@ -304,7 +306,7 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis) return -ENOMEM; for (i=0; i<d->arch.vgic.nr_spis; i++) - vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32); + vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32, false); /* SPIs are routed to VCPU0 by default */ for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ ) @@ -381,7 +383,7 @@ int vcpu_vgic_init(struct vcpu *v) memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs)); for (i = 0; i < 32; i++) - vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i); + vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i, false); INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs); INIT_LIST_HEAD(&v->arch.vgic.lr_pending); -- 2.51.2
