On Fri, Mar 22, 2013 at 01:24:05PM +0800, Yang Zhang wrote:
> From: Yang Zhang <[email protected]>
>
> Current interrupt coalescing logci which only used by RTC has conflict
> with Posted Interrupt.
> This patch introduces a new mechinism to use eoi to track interrupt:
> When delivering an interrupt to vcpu, the pending_eoi set to number of
> vcpu that received the interrupt. And decrease it when each vcpu writing
> eoi. No subsequent RTC interrupt can deliver to vcpu until all vcpus
> write eoi.
>
> Signed-off-by: Yang Zhang <[email protected]>
> ---
> virt/kvm/ioapic.c | 40 +++++++++++++++++++++++++++++++++++++++-
> 1 files changed, 39 insertions(+), 1 deletions(-)
>
> diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
> index c991e58..df16daf 100644
> --- a/virt/kvm/ioapic.c
> +++ b/virt/kvm/ioapic.c
> @@ -114,6 +114,29 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic)
> ioapic->rtc_status.pending_eoi = pending_eoi;
> }
>
> +static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
> + struct rtc_status *rtc_status, int irq)
> +{
> + if (irq != RTC_GSI)
> + return;
> +
> + if (test_and_clear_bit(vcpu->vcpu_id, rtc_status->dest_map))
> + --rtc_status->pending_eoi;
> +
> + WARN_ON(rtc_status->pending_eoi < 0);
> +}
> +
> +static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
> +{
> + if (irq != RTC_GSI)
> + return false;
> +
> + if (ioapic->rtc_status.pending_eoi > 0)
> + return true; /* coalesced */
> +
> + return false;
> +}
> +
> static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
> {
> union kvm_ioapic_redirect_entry *pent;
> @@ -229,6 +252,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int
> irq)
> {
> union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
> struct kvm_lapic_irq irqe;
> + int ret;
>
> ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
> "vector=%x trig_mode=%x\n",
> @@ -244,7 +268,14 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int
> irq)
> irqe.level = 1;
> irqe.shorthand = 0;
>
> - return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
> + if (irq == RTC_GSI) {
> + ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
> + ioapic->rtc_status.dest_map);
> + ioapic->rtc_status.pending_eoi = ret;
We should track status only if IRQ_STATUS ioctl was used to inject an
interrupt.
> + } else
> + ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
> +
> + return ret;
> }
>
> int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
> @@ -268,6 +299,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int
> irq, int irq_source_id,
> ret = 1;
> } else {
> int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
> +
> + if (rtc_irq_check(ioapic, irq)) {
> + ret = 0; /* coalesced */
> + goto out;
> + }
> ioapic->irr |= mask;
> if ((edge && old_irr != ioapic->irr) ||
> (!edge && !entry.fields.remote_irr))
> @@ -275,6 +311,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int
> irq, int irq_source_id,
> else
> ret = 0; /* report coalesced interrupt */
> }
> +out:
> trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
> spin_unlock(&ioapic->lock);
>
> @@ -302,6 +339,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
> if (ent->fields.vector != vector)
> continue;
>
> + rtc_irq_ack_eoi(vcpu, &ioapic->rtc_status, i);
> /*
> * We are dropping lock while calling ack notifiers because ack
> * notifier callbacks for assigned devices call into IOAPIC
> --
> 1.7.1
--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html