Hello community,

here is the log from the commit of package qemu for openSUSE:Factory checked in 
at 2012-07-22 15:20:35
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/qemu (Old)
 and      /work/SRC/openSUSE:Factory/.qemu.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "qemu", Maintainer is "[email protected]"

Changes:
--------
--- /work/SRC/openSUSE:Factory/qemu/qemu.changes        2012-06-29 
11:34:36.000000000 +0200
+++ /work/SRC/openSUSE:Factory/.qemu.new/qemu.changes   2012-07-22 
15:21:31.000000000 +0200
@@ -1,0 +2,25 @@
+Wed Jul 11 21:16:34 UTC 2012 - [email protected]
+
+- fix segfault even better
+
+-------------------------------------------------------------------
+Wed Jul 11 14:52:02 UTC 2012 - [email protected]
+
+- linux-user: improve locking even across tb flushes
+
+-------------------------------------------------------------------
+Tue Jul 10 18:43:31 UTC 2012 - [email protected]
+
+- linux-user: pin multi-threaded applications to a single host cpu
+
+-------------------------------------------------------------------
+Tue Jul 10 16:42:54 UTC 2012 - [email protected]
+
+- linux-user: improve lock
+
+-------------------------------------------------------------------
+Thu Jul  5 15:37:05 UTC 2012 - [email protected]
+
+- linux-user: add tcg lock for multi-threaded guest programs
+
+-------------------------------------------------------------------

New:
----
  0025-linux-user-lock-tcg.patch.patch
  0026-linux-user-Run-multi-threaded-code-.patch
  0027-linux-user-lock-tb-flushing-too.pat.patch
  0028-XXX-merge-with-segmentation-fault-p.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ qemu.spec ++++++
--- /var/tmp/diff_new_pack.U4d1Jo/_old  2012-07-22 15:21:32.000000000 +0200
+++ /var/tmp/diff_new_pack.U4d1Jo/_new  2012-07-22 15:21:32.000000000 +0200
@@ -48,6 +48,10 @@
 Patch0022:      0022-use-libexecdir-instead-of-ignoring-.patch
 Patch0023:      0023-linux-user-Ignore-broken-loop-ioctl.patch
 Patch0024:      0024-linux-user-fix-segmentation-fault-p.patch
+Patch0025:      0025-linux-user-lock-tcg.patch.patch
+Patch0026:      0026-linux-user-Run-multi-threaded-code-.patch
+Patch0027:      0027-linux-user-lock-tb-flushing-too.pat.patch
+Patch0028:      0028-XXX-merge-with-segmentation-fault-p.patch
 # this is to make lint happy
 Source300:      rpmlintrc
 Source302:      bridge.conf
@@ -179,6 +183,10 @@
 %patch0022 -p1
 %patch0023 -p1
 %patch0024 -p1
+%patch0025 -p1
+%patch0026 -p1
+%patch0027 -p1
+%patch0028 -p1
 
 %build
 # build QEMU

++++++ 0025-linux-user-lock-tcg.patch.patch ++++++
>From 9a66bfa1a21b7429229be3d52b1d6ea08b141d36 Mon Sep 17 00:00:00 2001
From: Alexander Graf <[email protected]>
Date: Thu, 5 Jul 2012 17:31:39 +0200
Subject: [PATCH] linux-user: lock tcg

The tcg code generator is not thread safe. Lock its generation between
different threads.

Signed-off-by: Alexander Graf <[email protected]>
---
 linux-user/mmap.c |    3 +++
 tcg/tcg.c         |   36 ++++++++++++++++++++++++++++++++++--
 tcg/tcg.h         |    6 ++++++
 3 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 3f5e1d7..83e9eda 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -30,6 +30,7 @@
 
 #include "qemu.h"
 #include "qemu-common.h"
+#include "tcg.h"
 
 //#define DEBUG_MMAP
 
@@ -41,6 +42,7 @@ void mmap_lock(void)
 {
     if (mmap_lock_count++ == 0) {
         pthread_mutex_lock(&mmap_mutex);
+        tcg_lock();
     }
 }
 
@@ -48,6 +50,7 @@ void mmap_unlock(void)
 {
     if (--mmap_lock_count == 0) {
         pthread_mutex_unlock(&mmap_mutex);
+        tcg_unlock();
     }
 }
 
diff --git a/tcg/tcg.c b/tcg/tcg.c
index ab589c7..4c93acb 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -40,6 +40,8 @@
 #include "cache-utils.h"
 #include "host-utils.h"
 #include "qemu-timer.h"
+#include "config-host.h"
+#include "qemu-thread.h"
 
 /* Note: the long term plan is to reduce the dependancies on the QEMU
    CPU definitions. Currently they are used for qemu_ld/st
@@ -105,6 +107,29 @@ static TCGRegSet tcg_target_call_clobber_regs;
 uint16_t *gen_opc_ptr;
 TCGArg *gen_opparam_ptr;
 
+#ifdef CONFIG_USER_ONLY
+static __thread int tcg_lock_count;
+#endif
+void tcg_lock(void)
+{
+#ifdef CONFIG_USER_ONLY
+    TCGContext *s = &tcg_ctx;
+    if (tcg_lock_count++ == 0) {
+        qemu_mutex_lock(&s->lock);
+    }
+#endif
+}
+
+void tcg_unlock(void)
+{
+#ifdef CONFIG_USER_ONLY
+    TCGContext *s = &tcg_ctx;
+    if (--tcg_lock_count == 0) {
+        qemu_mutex_unlock(&s->lock);
+    }
+#endif
+}
+
 static inline void tcg_out8(TCGContext *s, uint8_t v)
 {
     *s->code_ptr++ = v;
@@ -245,7 +270,8 @@ void tcg_context_init(TCGContext *s)
     memset(s, 0, sizeof(*s));
     s->temps = s->static_temps;
     s->nb_globals = 0;
-    
+    qemu_mutex_init(&s->lock);
+
     /* Count total number of arguments and allocate the corresponding
        space */
     total_args = 0;
@@ -2182,11 +2208,13 @@ int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
     }
 #endif
 
+    tcg_lock();
     tcg_gen_code_common(s, gen_code_buf, -1);
 
     /* flush instruction cache */
     flush_icache_range((tcg_target_ulong)gen_code_buf,
                        (tcg_target_ulong)s->code_ptr);
+    tcg_unlock();
 
     return s->code_ptr -  gen_code_buf;
 }
@@ -2197,7 +2225,11 @@ int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
    Return -1 if not found. */
 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
 {
-    return tcg_gen_code_common(s, gen_code_buf, offset);
+    int r;
+    tcg_lock();
+    r = tcg_gen_code_common(s, gen_code_buf, offset);
+    tcg_unlock();
+    return r;
 }
 
 #ifdef CONFIG_PROFILER
diff --git a/tcg/tcg.h b/tcg/tcg.h
index a83bddd..e20fc82 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -46,6 +46,8 @@ typedef uint64_t tcg_target_ulong;
 #error unsupported
 #endif
 
+#include "config-host.h"
+#include "qemu-thread.h"
 #include "tcg-target.h"
 #include "tcg-runtime.h"
 
@@ -389,6 +391,7 @@ struct TCGContext {
 #ifdef CONFIG_DEBUG_TCG
     int temps_in_use;
 #endif
+    QemuMutex lock;
 };
 
 extern TCGContext tcg_ctx;
@@ -568,6 +571,9 @@ void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args,
                      TCGOpDef *tcg_op_def);
 
+extern void tcg_lock(void);
+extern void tcg_unlock(void);
+
 /* only used for debugging purposes */
 void tcg_register_helper(void *func, const char *name);
 const char *tcg_helper_get_name(TCGContext *s, void *func);
++++++ 0026-linux-user-Run-multi-threaded-code-.patch ++++++
>From f85c851986d24df8700ffe447301786fe83819d0 Mon Sep 17 00:00:00 2001
From: Alexander Graf <[email protected]>
Date: Tue, 10 Jul 2012 20:40:55 +0200
Subject: [PATCH] linux-user: Run multi-threaded code on a single core

Running multi-threaded code can easily expose some of the fundamental
breakages in QEMU's design. It's just not a well supported scenario.

So if we pin the whole process to a single host CPU, we guarantee that
we will never have concurrent memory access actually happen. We can still
get scheduled away at any time, so it's no complete guarantee, but apparently
it reduces the odds well enough to get my test cases to pass.

This gets Java 1.7 working for me again on my test box.

Signed-off-by: Alexander Graf <[email protected]>
---
 linux-user/syscall.c |    9 +++++++++
 1 files changed, 9 insertions(+), 0 deletions(-)

diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 46e964b..b0566cd 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -4298,6 +4298,15 @@ static int do_fork(CPUArchState *env, unsigned int 
flags, abi_ulong newsp,
         if (nptl_flags & CLONE_SETTLS)
             cpu_set_tls (new_env, newtls);
 
+        /* agraf: Pin ourselves to a single CPU when running multi-threaded.
+           This turned out to improve stability for me. */
+        {
+            cpu_set_t mask;
+            CPU_ZERO(&mask);
+            CPU_SET(0, &mask);
+            sched_setaffinity(0, sizeof(mask), &mask);
+        }
+
         /* Grab a mutex so that thread setup appears atomic.  */
         pthread_mutex_lock(&clone_lock);
 
++++++ 0027-linux-user-lock-tb-flushing-too.pat.patch ++++++
>From 777d50057b576a0d829481a0cea9cd399e7a5f65 Mon Sep 17 00:00:00 2001
From: Alexander Graf <[email protected]>
Date: Wed, 11 Jul 2012 16:47:42 +0200
Subject: [PATCH] linux-user: lock tb flushing too

Signed-off-by: Alexander Graf <[email protected]>
---
 exec.c |   33 ++++++++++++++++++++++++++-------
 1 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/exec.c b/exec.c
index 9ba4409..6da4b38 100644
--- a/exec.c
+++ b/exec.c
@@ -732,17 +732,22 @@ static TranslationBlock *tb_alloc(target_ulong pc)
 {
     TranslationBlock *tb;
 
+    tcg_lock();
     if (nb_tbs >= code_gen_max_blocks ||
-        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
+        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) {
+        tcg_unlock();
         return NULL;
+    }
     tb = &tbs[nb_tbs++];
     tb->pc = pc;
     tb->cflags = 0;
+    tcg_unlock();
     return tb;
 }
 
 void tb_free(TranslationBlock *tb)
 {
+    tcg_lock();
     /* In practice this is mostly used for single use temporary TB
        Ignore the hard cases and just back up if this TB happens to
        be the last one generated.  */
@@ -750,6 +755,7 @@ void tb_free(TranslationBlock *tb)
         code_gen_ptr = tb->tc_ptr;
         nb_tbs--;
     }
+    tcg_unlock();
 }
 
 static inline void invalidate_page_bitmap(PageDesc *p)
@@ -803,6 +809,7 @@ void tb_flush(CPUArchState *env1)
            nb_tbs, nb_tbs > 0 ?
            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
 #endif
+    tcg_lock();
     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
         cpu_abort(env1, "Internal error: code buffer overflow\n");
 
@@ -819,6 +826,7 @@ void tb_flush(CPUArchState *env1)
     /* XXX: flush processor icache at this point if cache flush is
        expensive */
     tb_flush_count++;
+    tcg_unlock();
 }
 
 #ifdef DEBUG_TB_CHECK
@@ -1116,9 +1124,12 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, 
tb_page_addr_t end,
     int current_flags = 0;
 #endif /* TARGET_HAS_PRECISE_SMC */
 
+    tcg_lock();
     p = page_find(start >> TARGET_PAGE_BITS);
-    if (!p)
+    if (!p) {
+        tcg_unlock();
         return;
+    }
     if (!p->code_bitmap &&
         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
         is_cpu_write_access) {
@@ -1202,6 +1213,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, 
tb_page_addr_t end,
         cpu_resume_from_signal(env, NULL);
     }
 #endif
+    tcg_unlock();
 }
 
 /* len must be <= 8 and start must be a multiple of len */
@@ -1397,12 +1409,16 @@ TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
 {
     int m_min, m_max, m;
     uintptr_t v;
-    TranslationBlock *tb;
+    TranslationBlock *tb, *r;
 
-    if (nb_tbs <= 0)
+    tcg_lock();
+    if (nb_tbs <= 0) {
+        tcg_unlock();
         return NULL;
+    }
     if (tc_ptr < (uintptr_t)code_gen_buffer ||
         tc_ptr >= (uintptr_t)code_gen_ptr) {
+        tcg_unlock();
         return NULL;
     }
     /* binary search (cf Knuth) */
@@ -1412,15 +1428,18 @@ TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
         m = (m_min + m_max) >> 1;
         tb = &tbs[m];
         v = (uintptr_t)tb->tc_ptr;
-        if (v == tc_ptr)
+        if (v == tc_ptr) {
+            tcg_unlock();
             return tb;
-        else if (tc_ptr < v) {
+        } else if (tc_ptr < v) {
             m_max = m - 1;
         } else {
             m_min = m + 1;
         }
     }
-    return &tbs[m_max];
+    r = &tbs[m_max];
+    tcg_unlock();
+    return r;
 }
 
 static void tb_reset_jump_recursive(TranslationBlock *tb);
++++++ 0028-XXX-merge-with-segmentation-fault-p.patch ++++++
>From a81c9410cc3538dfeb22484d1daa95e6a5bb1e79 Mon Sep 17 00:00:00 2001
From: Alexander Graf <[email protected]>
Date: Wed, 11 Jul 2012 23:15:47 +0200
Subject: [PATCH] XXX merge with segmentation fault passing patch

---
 user-exec.c |   32 +++++++++++++++++++-------------
 1 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/user-exec.c b/user-exec.c
index 83d2d44..cc57bde 100644
--- a/user-exec.c
+++ b/user-exec.c
@@ -100,21 +100,27 @@ static inline int handle_cpu_signal(uintptr_t pc, 
unsigned long address,
     /* Maybe we're still holding the TB fiddling lock? */
     spin_unlock_safe(&tb_lock);
 
-    if (h2g_valid(address)) {
-        /* XXX: locking issue */
-        if (is_write && page_unprotect(h2g(address), pc, puc)) {
-            return 1;
-        }
+    /* XXX: locking issue */
+    if (h2g_valid(address) && is_write &&
+        page_unprotect(h2g(address), pc, puc)) {
+        return 1;
+    }
 
-        /* see if it is an MMU fault */
-        ret = cpu_handle_mmu_fault(env, h2g(address), is_write, MMU_USER_IDX);
-        if (ret < 0) {
-            return 0; /* not an MMU fault */
-        }
-        if (ret == 0) {
-            return 1; /* the MMU fault was handled without causing real CPU 
fault */
-        }
+    if (RESERVED_VA) {
+        /* Convert forcefully to guest address space, invalid addresses
+           are still valid segv ones */
+        address = address - GUEST_BASE;
     }
+
+    /* see if it is an MMU fault */
+    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX);
+    if (ret < 0) {
+        return 0; /* not an MMU fault */
+    }
+    if (ret == 0) {
+        return 1; /* the MMU fault was handled without causing real CPU fault 
*/
+    }
+
     /* now we have a real cpu fault */
     tb = tb_find_pc(pc);
     if (tb) {
-- 
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to