Gregory Haskins wrote:
>  static void kvm_vcpu_irqsink_init(struct kvm_vcpu *vcpu)
> diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
> index a9d917a..c79bfc0 100644
> --- a/drivers/kvm/svm.c
> +++ b/drivers/kvm/svm.c
> @@ -1544,9 +1544,43 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct 
> kvm_run *kvm_run)
>       int r;
>  
>  again:
> +     /*
> +      * We disable interrupts until the next VMEXIT to eliminate a race
> +      * condition for delivery of virtual interrutps.  Note that this is
> +      * probably not as bad as it sounds, as interrupts will still invoke
> +      * a VMEXIT once transitioned to GUEST mode (and thus exit this lock
> +      * scope) even if they are disabled.
> +      */
> +     local_irq_disable();
> +
>       spin_lock(&vcpu->irq.lock);
>  
>       /*
> +      * If there are any signals pending (virtual interrupt related or
> +      * otherwise), don't even bother trying to enter guest mode...
> +      */
> +     if (signal_pending(current)) {
> +             kvm_run->exit_reason = KVM_EXIT_INTR;
> +             spin_unlock(&vcpu->irq.lock);
> +             local_irq_enable();
> +             r = -EINTR;
> +             /*
> +              * FIXME: We probably want to move this whole lock-block below
> +              * the host->guest state loading so we don't restore when
> +              * the system was never saved to begin with
> +              */
> +             goto out;
> +     }
> +
> +     /*
> +      * There are optimizations we can make when signaling interrupts
> +      * if we know the VCPU is in GUEST mode, so record the guest's
> +      * CPU to both serve as an indicator of vcpu state and a target
> +      * for our interrupts
> +      */
> +     vcpu->irq.guest_cpu = task_cpu(current);
> +
> +     /*
>        * We must inject interrupts (if any) while the irq_lock
>        * is held
>        */
> @@ -1688,6 +1722,15 @@ again:
>  #endif
>               : "cc", "memory" );
>  
> +     /*
> +      * Signal that we have transitioned back to host mode
> +      */
> +     spin_lock(&vcpu->irq.lock);
> +     vcpu->irq.guest_cpu = -1;
> +     spin_unlock(&vcpu->irq.lock);
>   

One issue right here. spin_lock() may touch per-cpu data (for 
debugging), but %gs has not been restored yet.  Moving the block after 
host state restore still has problems, though.

> +
> +     local_irq_enable();
> +
>       if (vcpu->fpu_active) {
>               fx_save(vcpu->guest_fx_image);
>               fx_restore(vcpu->host_fx_image);
> @@ -1734,20 +1777,23 @@ again:
>       if (r > 0) {
>               if (signal_pending(current)) {
>                       ++vcpu->stat.signal_exits;
> -                     post_kvm_run_save(vcpu, kvm_run);
>                       kvm_run->exit_reason = KVM_EXIT_INTR;
> -                     return -EINTR;
> +                     r = -EINTR;
> +                     goto out;
>               }
>  
>               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
>                       ++vcpu->stat.request_irq_exits;
>                       post_kvm_run_save(vcpu, kvm_run);
>                       kvm_run->exit_reason = KVM_EXIT_INTR;
> -                     return -EINTR;
> +                     r = -EINTR;
> +                     goto out;
>               }
>               kvm_resched(vcpu);
>               goto again;
>       }
> +
> + out:
>       post_kvm_run_save(vcpu, kvm_run);
>       return r;
>  }
>   

-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to