If async page fault is received by idle task or when preemp_count is
not zero guest cannot reschedule, so do sti; hlt and wait for page to be
ready. vcpu can still process interrupts while it waits for the page to
be ready.

Signed-off-by: Gleb Natapov <[email protected]>
---
 arch/x86/kernel/kvm.c |   31 +++++++++++++++++++++++++++----
 1 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 09444c9..0836d9a 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -63,6 +63,7 @@ struct kvm_task_sleep_node {
        struct hlist_node link;
        wait_queue_head_t wq;
        u32 token;
+       int cpu;
 };
 
 static struct kvm_task_sleep_head {
@@ -91,6 +92,11 @@ static void apf_task_wait(struct task_struct *tsk, u32 token)
        struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
        struct kvm_task_sleep_node n, *e;
        DEFINE_WAIT(wait);
+       int cpu, idle;
+
+       cpu = get_cpu();
+       idle = idle_cpu(cpu);
+       put_cpu();
 
        spin_lock(&b->lock);
        e = _find_apf_task(b, token);
@@ -105,15 +111,30 @@ static void apf_task_wait(struct task_struct *tsk, u32 
token)
        n.token = token;
        init_waitqueue_head(&n.wq);
        hlist_add_head(&n.link, &b->list);
+       if (idle || preempt_count() > 1)
+               n.cpu = smp_processor_id();
+       else
+               n.cpu = -1;
        spin_unlock(&b->lock);
 
        for (;;) {
-               prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+               if (n.cpu < 0)
+                       prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
                if (hlist_unhashed(&n.link))
                        break;
-               schedule();
+
+               if (n.cpu < 0) {
+                       schedule();
+               } else {
+                       /*
+                        * We cannot reschedule. So halt.
+                        */
+                       native_safe_halt();
+                       local_irq_disable();
+               }
        }
-       finish_wait(&n.wq, &wait);
+       if (n.cpu < 0)
+               finish_wait(&n.wq, &wait);
 
        return;
 }
@@ -146,7 +167,9 @@ again:
                hlist_add_head(&n->link, &b->list);
        } else {
                hlist_del_init(&n->link);
-               if (waitqueue_active(&n->wq))
+               if (n->cpu >= 0)
+                       smp_send_reschedule(n->cpu);
+               else if (waitqueue_active(&n->wq))
                        wake_up(&n->wq);
        }
        spin_unlock(&b->lock);
-- 
1.6.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to