Avi,
There was a discussion regarding PIT timer migration sometime ago, you
said:
"I'm not sure hrtimer migration would work 100% reliably (suppose it
fired just after a vcpu migration) so I think a queue_work is better."
I fail to see any problem with a timer firing either before or after the
request bit is set in the vcpu's request mask during vcpu_load().
Whatever the case, nothing will stop __vcpu_run() from migrating the
timer to vcpu0 (in PIT's case), and any instances that fired on a
different CPU between vcpu_load() and hrtimer_cancel/start will be
injected in kvm_inject_pending_timer_irqs(). Is there anything
you can think of?
-------
Migrate the PIT timer to the physical CPU which vcpu0 is scheduled on,
similarly to what is done for the LAPIC timers, otherwise PIT interrupts
will be delayed until an unrelated event causes an exit.
Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>
Index: kvm.realtip/arch/x86/kvm/i8254.c
===================================================================
--- kvm.realtip.orig/arch/x86/kvm/i8254.c
+++ kvm.realtip/arch/x86/kvm/i8254.c
@@ -200,7 +200,6 @@ static int __pit_timer_fn(struct kvm_kpi
atomic_inc(&pt->pending);
smp_mb__after_atomic_inc();
- /* FIXME: handle case where the guest is in guest mode */
if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(&vcpu0->wq);
@@ -237,6 +236,20 @@ static enum hrtimer_restart pit_timer_fn
return HRTIMER_NORESTART;
}
+void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+ struct hrtimer *timer;
+
+ if (!pit)
+ return;
+
+ timer = &pit->pit_state.pit_timer.timer;
+ if (hrtimer_cancel(timer))
+ hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+}
+
+
static void destroy_pit_timer(struct kvm_kpit_timer *pt)
{
pr_debug("pit: execute del timer!\n");
Index: kvm.realtip/arch/x86/kvm/irq.h
===================================================================
-- kvm.realtip.orig/arch/x86/kvm/irq.h
+++ kvm.realtip/arch/x86/kvm/irq.h
@@ -84,6 +84,7 @@ void kvm_timer_intr_post(struct kvm_vcpu
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
+void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
int pit_has_pending_timer(struct kvm_vcpu *vcpu);
int apic_has_pending_timer(struct kvm_vcpu *vcpu);
Index: kvm.realtip/arch/x86/kvm/svm.c
===================================================================
--- kvm.realtip.orig/arch/x86/kvm/svm.c
+++ kvm.realtip/arch/x86/kvm/svm.c
@@ -690,7 +690,7 @@ static void svm_vcpu_load(struct kvm_vcp
delta = vcpu->arch.host_tsc - tsc_this;
svm->vmcb->control.tsc_offset += delta;
vcpu->cpu = cpu;
- kvm_migrate_apic_timer(vcpu);
+ kvm_migrate_timers(vcpu);
}
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Index: kvm.realtip/arch/x86/kvm/vmx.c
===================================================================
--- kvm.realtip.orig/arch/x86/kvm/vmx.c
+++ kvm.realtip/arch/x86/kvm/vmx.c
@@ -616,7 +616,7 @@ static void vmx_vcpu_load(struct kvm_vcp
if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
- kvm_migrate_apic_timer(vcpu);
+ kvm_migrate_timers(vcpu);
vpid_sync_vcpu_all(vmx);
local_irq_disable();
list_add(&vmx->local_vcpus_link,
Index: kvm.realtip/arch/x86/kvm/x86.c
===================================================================
--- kvm.realtip.orig/arch/x86/kvm/x86.c
+++ kvm.realtip/arch/x86/kvm/x86.c
@@ -2744,8 +2744,10 @@ again:
goto out;
if (vcpu->requests) {
- if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
+ if (test_and_clear_bit(KVM_REQ_MIGRATE_APIC, &vcpu->requests))
__kvm_migrate_apic_timer(vcpu);
+ if (test_and_clear_bit(KVM_REQ_MIGRATE_PIT, &vcpu->requests))
+ __kvm_migrate_pit_timer(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
Index: kvm.realtip/include/linux/kvm_host.h
===================================================================
--- kvm.realtip.orig/include/linux/kvm_host.h
+++ kvm.realtip/include/linux/kvm_host.h
@@ -29,10 +29,11 @@
* vcpu->requests bit members
*/
#define KVM_REQ_TLB_FLUSH 0
-#define KVM_REQ_MIGRATE_TIMER 1
+#define KVM_REQ_MIGRATE_APIC 1
#define KVM_REQ_REPORT_TPR_ACCESS 2
#define KVM_REQ_MMU_RELOAD 3
#define KVM_REQ_TRIPLE_FAULT 4
+#define KVM_REQ_MIGRATE_PIT 5
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
@@ -294,9 +295,11 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn
return (gpa_t)gfn << PAGE_SHIFT;
}
-static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
- set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
+ set_bit(KVM_REQ_MIGRATE_APIC, &vcpu->requests);
+ if (vcpu->vcpu_id == 0)
+ set_bit(KVM_REQ_MIGRATE_PIT, &vcpu->requests);
}
enum kvm_stat_kind {
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html