On Tue, Jul 20, 2010 at 04:17:07PM +0300, Avi Kivity wrote:
> Change the interrupt injection code to work from preemptible, interrupts
> enabled context. This works by adding a ->cancel_injection() operation
> that undoes an injection in case we were not able to actually enter the guest
> (this condition could never happen with atomic injection).
>
> Signed-off-by: Avi Kivity <[email protected]>
> ---
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/svm.c | 12 ++++++++++++
> arch/x86/kvm/vmx.c | 11 +++++++++++
> arch/x86/kvm/x86.c | 27 ++++++++++++++-------------
> 4 files changed, 38 insertions(+), 13 deletions(-)
>
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4709,6 +4709,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> if (unlikely(r))
> goto out;
>
> + inject_pending_event(vcpu);
> +
> + /* enable NMI/IRQ window open exits if needed */
> + if (vcpu->arch.nmi_pending)
> + kvm_x86_ops->enable_nmi_window(vcpu);
> + else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> + kvm_x86_ops->enable_irq_window(vcpu);
> +
> + if (kvm_lapic_enabled(vcpu)) {
> + update_cr8_intercept(vcpu);
> + kvm_lapic_sync_to_vapic(vcpu);
> + }
> +
> preempt_disable();
>
> kvm_x86_ops->prepare_guest_switch(vcpu);
> @@ -4727,23 +4740,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> smp_wmb();
> local_irq_enable();
> preempt_enable();
> + kvm_x86_ops->cancel_injection(vcpu);
> r = 1;
> goto out;
> }
>
> - inject_pending_event(vcpu);
> -
> - /* enable NMI/IRQ window open exits if needed */
> - if (vcpu->arch.nmi_pending)
> - kvm_x86_ops->enable_nmi_window(vcpu);
> - else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> - kvm_x86_ops->enable_irq_window(vcpu);
> -
> - if (kvm_lapic_enabled(vcpu)) {
> - update_cr8_intercept(vcpu);
> - kvm_lapic_sync_to_vapic(vcpu);
> - }
> -
> srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>
> kvm_guest_enter();
This breaks
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
/* This may race with setting of irr in __apic_accept_irq() and
* value returned may be wrong, but kvm_vcpu_kick() in
* __apic_accept_irq
* will cause vmexit immediately and the value will be
* recalculated
* on the next vmentry.
*/
(also valid for nmi_pending and PIC). Can't simply move
atomic_set(guest_mode, 1) in preemptible section as that would make it
possible for kvm_vcpu_kick to IPI stale vcpu->cpu.
Also should undo vmx.rmode.* ?
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html