On 12/02/2016 14:59, Suravee Suthikulpanit wrote:
> +static bool avic_check_irr_pending(struct kvm_vcpu *vcpu)
> +{
> +     int i;
> +     u32 irr;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     for (i = 0; i < 8; i++) {
> +             irr = *(avic_get_bk_page_entry(svm,
> +                                     APIC_IRR + (0x10 * i)));
> +             if (irr)
> +                     return true;
> +     }
> +
> +     return false;
> +}
> +
> +static bool svm_avic_check_ppr(struct vcpu_svm *svm)
> +{
> +     u32 tpr = *(avic_get_bk_page_entry(svm, APIC_TASKPRI));
> +     u32 ppr = *(avic_get_bk_page_entry(svm, APIC_PROCPRI));
> +
> +     if (ppr && (ppr != tpr))
> +             return true;
> +
> +     return false;
> +}
> +
> +/* Note: Returns true means do not block */
> +static bool svm_apicv_intr_pending (struct kvm_vcpu *vcpu)
> +{
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     if (!avic)
> +             return false;
> +
> +     if (atomic_read(&svm->avic_pending_cnt))
> +             return true;
> +
> +     return avic_check_irr_pending(vcpu);
> +}
> +
> +static void avic_post_vmrun(struct kvm_vcpu *vcpu)
> +{
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     if (!avic)
> +             return;
> +
> +     if (atomic_read(&svm->avic_pending_cnt)) {
> +             if (svm_avic_check_ppr(svm))
> +                     return;
> +             if (avic_check_irr_pending(vcpu))
> +                     return;
> +             /*
> +              * At this point, if there is no interrupt pending.
> +              * So, we decrement the pending count
> +              */
> +             atomic_dec(&svm->avic_pending_cnt);
> +     }
> +}
> +
>  static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  {
>       struct vcpu_svm *svm = to_svm(vcpu);
> @@ -4588,6 +4686,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>       if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
>               kvm_after_handle_nmi(&svm->vcpu);
>  
> +     avic_post_vmrun(vcpu);
> +
>       sync_cr8_to_lapic(vcpu);
>  
>       svm->next_rip = 0;
> @@ -5050,7 +5150,9 @@ static struct kvm_x86_ops svm_x86_ops = {
>  
>       .sched_in = svm_sched_in,
>  
> +     .apicv_intr_pending = svm_apicv_intr_pending,
>       .pmu_ops = &amd_pmu_ops,
> +     .deliver_posted_interrupt = svm_deliver_avic_intr,
>  };
>  
>  static int __init svm_init(void)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 4244c2b..2def290 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -8087,7 +8087,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
>       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
>               kvm_x86_ops->check_nested_events(vcpu, false);
>  
> -     return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
> +     return (kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu) ||
> +             (kvm_x86_ops->apicv_intr_pending &&
> +              kvm_x86_ops->apicv_intr_pending(vcpu)));
>  }

I think this is not necessary.  What you need is to make kvm_lapic's
regs field point to the backing page.  Then when the processor writes to
IRR, kvm_apic_has_interrupt (called through kvm_vcpu_has_events) will
see it.

avic_pending_cnt shouldn't be necessary either.

Paolo

Reply via email to