Anthony Liguori wrote:
> Marcelo Tosatti wrote:
>> diff --git a/qemu/qemu-kvm-ia64.c b/qemu/qemu-kvm-ia64.c
>> index 4d0ddd7..d227d22 100644
>> --- a/qemu/qemu-kvm-ia64.c
>> +++ b/qemu/qemu-kvm-ia64.c
>> @@ -61,3 +61,7 @@ int kvm_arch_try_push_interrupts(void *opaque)
>>  void kvm_arch_update_regs_for_sipi(CPUState *env)
>>  {
>>  }
>> +
>> +void kvm_arch_cpu_reset(CPUState *env)
>> +{
>> +}
>> diff --git a/qemu/qemu-kvm-powerpc.c b/qemu/qemu-kvm-powerpc.c
>> index 14ed945..024b18c 100644
>> --- a/qemu/qemu-kvm-powerpc.c
>> +++ b/qemu/qemu-kvm-powerpc.c
>> @@ -213,3 +213,7 @@ int handle_powerpc_dcr_write(int vcpu, uint32_t dcrn, 
>> uint32_t data)
>>  
>>      return 0; /* XXX ignore failed DCR ops */
>>  }
>> +
>> +void kvm_arch_cpu_reset(CPUState *env)
>> +{
>> +}
>> diff --git a/qemu/qemu-kvm-x86.c b/qemu/qemu-kvm-x86.c
>> index 9a771ff..28eb5c2 100644
>> --- a/qemu/qemu-kvm-x86.c
>> +++ b/qemu/qemu-kvm-x86.c
>> @@ -689,3 +689,19 @@ int handle_tpr_access(void *opaque, int vcpu,
>>      kvm_tpr_access_report(cpu_single_env, rip, is_write);
>>      return 0;
>>  }
>> +
>> +void kvm_arch_cpu_reset(CPUState *env)
>> +{
>> +    struct kvm_mp_state mp_state = { .mp_state = KVM_MP_STATE_UNINITIALIZED 
>> };
>> +
>> +    kvm_arch_load_regs(env);
>> +    if (env->cpu_index != 0) {
>> +    if (kvm_irqchip_in_kernel(kvm_context))
>> +        kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
>> +    else {
>> +        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
>> +        env->hflags |= HF_HALTED_MASK;
>> +        env->exception_index = EXCP_HLT;
>> +    }
>> +    }
>> +}
>> diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
>> index 3cc6d8e..1e1f065 100644
>> --- a/qemu/qemu-kvm.c
>> +++ b/qemu/qemu-kvm.c
>> @@ -31,8 +31,6 @@ kvm_context_t kvm_context;
>>  
>>  extern int smp_cpus;
>>  
>> -static int qemu_kvm_reset_requested;
>> -
>>  pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
>>  pthread_cond_t qemu_aio_cond = PTHREAD_COND_INITIALIZER;
>>  pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
>> @@ -52,7 +50,6 @@ struct vcpu_info {
>>      int signalled;
>>      int stop;
>>      int stopped;
>> -    int reload_regs;
>>      int created;
>>  } vcpu_info[256];
>>  
>> @@ -242,21 +239,29 @@ static void pause_all_threads(void)
>>  {
>>      int i;
>>  
>> +    if (cpu_single_env) {
>> +        fprintf(stderr, "qemu-kvm: pause_all_threads from vcpu context\n");
>> +        exit(0);
>> +    }
>> +
>>      for (i = 0; i < smp_cpus; ++i) {
>>      vcpu_info[i].stop = 1;
>>      pthread_kill(vcpu_info[i].thread, SIG_IPI);
>>      }
>> -    while (!all_threads_paused()) {
>> -    CPUState *env = cpu_single_env;
>> +    while (!all_threads_paused())
>>      pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
>> -    cpu_single_env = env;
>> -    }
>> +    cpu_single_env = NULL;
>>  }
>>   
> 
> Personally, I prefer it the old way.  All of the open-coded 
> cpu_single_env's are tough to understand and I believe error-prone.  I 
> think a strategy of explicitly preserving cpu_single_env whenever we 
> drop qemu_mutex is more robust.  Explicitly setting cpu_single_env = 
> NULL happens to work because this is only called from the io thread.  
> It's less clear to a casual reader why it's necessary.
> 
> In fact, I'd be much more inclined to see a wrapper around 
> pthread_cond_wait() so that we never explicitly had to set cpu_single_env.

You mean something like this?
(Hunks may have some offsets, the patch is from the middle of my queue.)

Marcelo, I'm fine with your approach. I even switched my
vm_stop-on-breakpoint patch to this pattern, and it seems to work
nicely. Will roll out an updated series later (but probably not today -
weather is too fine here 8)).

---
 qemu/qemu-kvm.c |   32 +++++++++++++++++++-------------
 1 file changed, 19 insertions(+), 13 deletions(-)

Index: b/qemu/qemu-kvm.c
===================================================================
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -12,6 +12,7 @@ int kvm_allowed = 1;
 int kvm_irqchip = 1;
 int kvm_pit = 1;
 
+#include <assert.h>
 #include <string.h>
 #include "hw/hw.h"
 #include "sysemu.h"
@@ -65,6 +66,14 @@ static inline unsigned long kvm_get_thre
     return syscall(SYS_gettid);
 }
 
+static void qemu_cond_wait(pthread_cond_t *cond)
+{
+    CPUState *env = cpu_single_env;
+
+    pthread_cond_wait(cond, &qemu_mutex);
+    cpu_single_env = env;
+}
+
 CPUState *qemu_kvm_cpu_env(int index)
 {
     return vcpu_info[index].env;
@@ -247,21 +256,22 @@ static void pause_all_threads(void)
 {
     int i;
 
+    assert(!cpu_single_env);
+
     for (i = 0; i < smp_cpus; ++i) {
        vcpu_info[i].stop = 1;
        pthread_kill(vcpu_info[i].thread, SIG_IPI);
     }
-    while (!all_threads_paused()) {
-       CPUState *env = cpu_single_env;
-       pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
-       cpu_single_env = env;
-    }
+    while (!all_threads_paused())
+       qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
     int i;
 
+    assert(!cpu_single_env);
+
     for (i = 0; i < smp_cpus; ++i) {
        vcpu_info[i].stop = 0;
        vcpu_info[i].stopped = 0;
@@ -380,7 +390,7 @@ static void *ap_main_loop(void *_env)
 
     /* and wait for machine initialization */
     while (!qemu_system_ready)
-       pthread_cond_wait(&qemu_system_cond, &qemu_mutex);
+       qemu_cond_wait(&qemu_system_cond);
     pthread_mutex_unlock(&qemu_mutex);
 
     kvm_main_loop_cpu(env);
@@ -392,7 +402,7 @@ void kvm_init_new_ap(int cpu, CPUState *
     pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
 
     while (vcpu_info[cpu].created == 0)
-       pthread_cond_wait(&qemu_vcpu_cond, &qemu_mutex);
+       qemu_cond_wait(&qemu_vcpu_cond);
 }
 
 int kvm_init_ap(void)
@@ -901,8 +911,6 @@ void qemu_kvm_aio_wait_start(void)
 
 void qemu_kvm_aio_wait(void)
 {
-    CPUState *cpu_single = cpu_single_env;
-
     if (!cpu_single_env) {
        if (io_thread_sigfd != -1) {
            fd_set rfds;
@@ -919,10 +927,8 @@ void qemu_kvm_aio_wait(void)
                sigfd_handler((void *)(unsigned long)io_thread_sigfd);
        }
        qemu_aio_poll();
-    } else {
-        pthread_cond_wait(&qemu_aio_cond, &qemu_mutex);
-        cpu_single_env = cpu_single;
-    }
+    } else
+        qemu_cond_wait(&qemu_aio_cond);
 }
 
 void qemu_kvm_aio_wait_end(void)



-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to