Author: Armin Rigo <[email protected]>
Branch: stmgc-c7
Changeset: r70085:22b83e351878
Date: 2014-03-19 09:22 +0100
http://bitbucket.org/pypy/pypy/changeset/22b83e351878/

Log:    import stmgc/75893b92af4e (c7-fork)

diff --git a/rpython/translator/stm/src_stm/revision 
b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-885ed3b0f6ee
+75893b92af4e
diff --git a/rpython/translator/stm/src_stm/stm/core.c 
b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -158,7 +158,7 @@
              MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) {
         /* fall-back */
 #if STM_TESTS
-        stm_fatalerror("reset_transaction_read_version: %m\n");
+        stm_fatalerror("reset_transaction_read_version: %m");
 #endif
         memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL);
     }
@@ -273,7 +273,7 @@
     assert(_has_mutex_pages());
     assert(!_is_young(obj));
 
-    char *segment_base = get_segment(source_segment_num)->segment_base;
+    char *segment_base = get_segment_base(source_segment_num);
     uintptr_t start = (uintptr_t)obj;
     uintptr_t first_page = start / 4096UL;
     struct object_s *realobj = (struct object_s *)
@@ -519,19 +519,17 @@
 static void
 reset_modified_from_other_segments(int segment_num)
 {
-    /* pull the right versions from other threads in order
+    /* pull the right versions from segment 0 in order
        to reset our pages as part of an abort.
 
        Note that this function is also sometimes called from
        contention.c to clean up the state of a different thread,
        when we would really like it to be aborted now and it is
        suspended at a safe-point.
-
     */
     struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num);
-    long remote_num = !segment_num;
     char *local_base = get_segment_base(segment_num);
-    char *remote_base = get_segment_base(remote_num);
+    char *remote_base = get_segment_base(0);
 
     LIST_FOREACH_R(
         pseg->modified_old_objects,
diff --git a/rpython/translator/stm/src_stm/stm/fprintcolor.c 
b/rpython/translator/stm/src_stm/stm/fprintcolor.c
--- a/rpython/translator/stm/src_stm/stm/fprintcolor.c
+++ b/rpython/translator/stm/src_stm/stm/fprintcolor.c
@@ -9,8 +9,8 @@
     char buffer[2048];
     va_list ap;
     int result;
-    int size = (int)sprintf(buffer, "\033[%dm[%lx] ", dprintfcolor(),
-                            (long)pthread_self());
+    int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(),
+                            (int)getpid(), (long)pthread_self());
     assert(size >= 0);
 
     va_start(ap, format);
@@ -42,6 +42,7 @@
 
     va_start(ap, format);
     vfprintf(stderr, format, ap);
+    fprintf(stderr, "\n");
     va_end(ap);
 
     abort();
diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c 
b/rpython/translator/stm/src_stm/stm/gcpage.c
--- a/rpython/translator/stm/src_stm/stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/stm/gcpage.c
@@ -61,7 +61,7 @@
     return;
 
  out_of_memory:
-    stm_fatalerror("out of memory!\n");   /* XXX */
+    stm_fatalerror("out of memory!");   /* XXX */
 }
 
 static char *_allocate_small_slowpath(uint64_t size)
@@ -86,7 +86,7 @@
     /* Allocate the object with largemalloc.c from the lower addresses. */
     char *addr = _stm_large_malloc(size);
     if (addr == NULL)
-        stm_fatalerror("not enough memory!\n");
+        stm_fatalerror("not enough memory!");
 
     if (addr + size > uninitialized_page_start) {
         uintptr_t npages;
@@ -94,7 +94,7 @@
         npages += GCPAGE_NUM_PAGES;
         if (uninitialized_page_stop - uninitialized_page_start <
                 npages * 4096UL) {
-            stm_fatalerror("out of memory!\n");   /* XXX */
+            stm_fatalerror("out of memory!");   /* XXX */
         }
         setup_N_pages(uninitialized_page_start, npages);
         uninitialized_page_start += npages * 4096UL;
diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c 
b/rpython/translator/stm/src_stm/stm/largemalloc.c
--- a/rpython/translator/stm/src_stm/stm/largemalloc.c
+++ b/rpython/translator/stm/src_stm/stm/largemalloc.c
@@ -274,8 +274,10 @@
         /* unlink the following chunk */
         mscan->d.next->prev = mscan->d.prev;
         mscan->d.prev->next = mscan->d.next;
-        assert((mscan->prev_size = (size_t)-258, 1));  /* 0xfffffffffffffefe */
-        assert((mscan->size = (size_t)-515, 1));       /* 0xfffffffffffffdfd */
+#ifndef NDEBUG
+        mscan->prev_size = (size_t)-258;  /* 0xfffffffffffffefe */
+        mscan->size = (size_t)-515;       /* 0xfffffffffffffdfd */
+#endif
 
         /* merge the two chunks */
         assert(fsize == fscan->prev_size);
diff --git a/rpython/translator/stm/src_stm/stm/list.c 
b/rpython/translator/stm/src_stm/stm/list.c
--- a/rpython/translator/stm/src_stm/stm/list.c
+++ b/rpython/translator/stm/src_stm/stm/list.c
@@ -13,7 +13,7 @@
     uintptr_t initial_allocation = 32;
     struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation));
     if (lst == NULL)
-        stm_fatalerror("out of memory in list_create\n");   /* XXX */
+        stm_fatalerror("out of memory in list_create");   /* XXX */
 
     lst->count = 0;
     lst->last_allocated = initial_allocation - 1;
@@ -25,7 +25,7 @@
     nalloc = LIST_OVERCNT(nalloc);
     lst = realloc(lst, LIST_SETSIZE(nalloc));
     if (lst == NULL)
-        stm_fatalerror("out of memory in _list_grow\n");   /* XXX */
+        stm_fatalerror("out of memory in _list_grow");   /* XXX */
 
     lst->last_allocated = nalloc - 1;
     return lst;
@@ -94,7 +94,7 @@
     //fprintf(stderr, "growth: %ld\n", newalloc);
     char *newitems = malloc(newalloc);
     if (newitems == NULL) {
-        stm_fatalerror("out of memory!\n");   /* XXX */
+        stm_fatalerror("out of memory!");   /* XXX */
     }
     newtree.raw_start = newitems;
     newtree.raw_current = newitems;
diff --git a/rpython/translator/stm/src_stm/stm/nursery.c 
b/rpython/translator/stm/src_stm/stm/nursery.c
--- a/rpython/translator/stm/src_stm/stm/nursery.c
+++ b/rpython/translator/stm/src_stm/stm/nursery.c
@@ -33,10 +33,6 @@
     }
 }
 
-static void teardown_nursery(void)
-{
-}
-
 static inline bool _is_in_nursery(object_t *obj)
 {
     assert((uintptr_t)obj >= NURSERY_START);
diff --git a/rpython/translator/stm/src_stm/stm/pages.c 
b/rpython/translator/stm/src_stm/stm/pages.c
--- a/rpython/translator/stm/src_stm/stm/pages.c
+++ b/rpython/translator/stm/src_stm/stm/pages.c
@@ -94,7 +94,7 @@
 
     int res = remap_file_pages(addr, size, 0, pgoff, 0);
     if (UNLIKELY(res < 0))
-        stm_fatalerror("remap_file_pages: %m\n");
+        stm_fatalerror("remap_file_pages: %m");
 }
 
 static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count)
@@ -104,6 +104,8 @@
        segment 0. */
     uintptr_t i;
     assert(_has_mutex_pages());
+    if (count == 0)
+        return;
     for (i = 1; i <= NB_SEGMENTS; i++) {
         char *segment_base = get_segment_base(i);
         d_remap_file_pages(segment_base + pagenum * 4096UL,
@@ -141,6 +143,13 @@
     mutex_pages_unlock();
 }
 
+static void _page_do_reshare(long segnum, uintptr_t pagenum)
+{
+    char *segment_base = get_segment_base(segnum);
+    d_remap_file_pages(segment_base + pagenum * 4096UL,
+                       4096, pagenum);
+}
+
 static void page_reshare(uintptr_t pagenum)
 {
     struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START];
@@ -150,7 +159,7 @@
     for (j = 0; j < NB_SEGMENTS; j++) {
         if (ps.by_segment & (1 << j)) {
             /* Page 'pagenum' is private in segment 'j + 1'. Reshare */
-            char *segment_base = stm_object_pages + NB_PAGES * 4096UL * (j+1);
+            char *segment_base = get_segment_base(j + 1);
 
             madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED);
             d_remap_file_pages(segment_base + pagenum * 4096UL,
diff --git a/rpython/translator/stm/src_stm/stm/pages.h 
b/rpython/translator/stm/src_stm/stm/pages.h
--- a/rpython/translator/stm/src_stm/stm/pages.h
+++ b/rpython/translator/stm/src_stm/stm/pages.h
@@ -39,7 +39,9 @@
 static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count);
 static void page_privatize(uintptr_t pagenum);
 static void page_reshare(uintptr_t pagenum);
+static void _page_do_reshare(long segnum, uintptr_t pagenum);
 
+/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */
 static void mutex_pages_lock(void);
 static void mutex_pages_unlock(void);
 static bool _has_mutex_pages(void) __attribute__((unused));
diff --git a/rpython/translator/stm/src_stm/stm/setup.c 
b/rpython/translator/stm/src_stm/stm/setup.c
--- a/rpython/translator/stm/src_stm/stm/setup.c
+++ b/rpython/translator/stm/src_stm/stm/setup.c
@@ -4,6 +4,17 @@
 #endif
 
 
+static char *setup_mmap(char *reason)
+{
+    char *result = mmap(NULL, TOTAL_MEMORY,
+                        PROT_READ | PROT_WRITE,
+                        MAP_PAGES_FLAGS, -1, 0);
+    if (result == MAP_FAILED)
+        stm_fatalerror("%s failed: %m\n", reason);
+
+    return result;
+}
+
 void stm_setup(void)
 {
     /* Check that some values are acceptable */
@@ -21,13 +32,9 @@
            (FIRST_READMARKER_PAGE * 4096UL));
     assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096);
 
-    stm_object_pages = mmap(NULL, TOTAL_MEMORY,
-                            PROT_READ | PROT_WRITE,
-                            MAP_PAGES_FLAGS, -1, 0);
-    if (stm_object_pages == MAP_FAILED)
-        stm_fatalerror("initial stm_object_pages mmap() failed: %m\n");
+    stm_object_pages = setup_mmap("initial stm_object_pages mmap()");
 
-    /* The segment 0 is not used to run transactions, but to contain the
+    /* The segment 0 is not used to run transactions, but contains the
        shared copy of the pages.  We mprotect all pages before so that
        accesses fail, up to and including the pages corresponding to the
        nurseries of the other segments. */
@@ -84,6 +91,7 @@
     setup_nursery();
     setup_gcpage();
     setup_pages();
+    setup_forksupport();
 }
 
 void stm_teardown(void)
@@ -111,11 +119,10 @@
     teardown_core();
     teardown_sync();
     teardown_gcpage();
-    teardown_nursery();
     teardown_pages();
 }
 
-void _init_shadow_stack(stm_thread_local_t *tl)
+static void _init_shadow_stack(stm_thread_local_t *tl)
 {
     struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)
         malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s));
@@ -124,13 +131,18 @@
     tl->shadowstack_base = s;
 }
 
-void _done_shadow_stack(stm_thread_local_t *tl)
+static void _done_shadow_stack(stm_thread_local_t *tl)
 {
     free(tl->shadowstack_base);
     tl->shadowstack = NULL;
     tl->shadowstack_base = NULL;
 }
 
+static pthread_t *_get_cpth(stm_thread_local_t *tl)
+{
+    assert(sizeof(pthread_t) <= sizeof(tl->creating_pthread));
+    return (pthread_t *)(tl->creating_pthread);
+}
 
 void stm_register_thread_local(stm_thread_local_t *tl)
 {
@@ -154,6 +166,7 @@
        numbers automatically. */
     num = (num % NB_SEGMENTS) + 1;
     tl->associated_segment_num = num;
+    *_get_cpth(tl) = pthread_self();
     _init_shadow_stack(tl);
     set_gs_register(get_segment_base(num));
     s_mutex_unlock();
@@ -162,6 +175,7 @@
 void stm_unregister_thread_local(stm_thread_local_t *tl)
 {
     s_mutex_lock();
+    assert(tl->prev != NULL);
     assert(tl->next != NULL);
     _done_shadow_stack(tl);
     if (tl == stm_all_thread_locals) {
diff --git a/rpython/translator/stm/src_stm/stm/sync.c 
b/rpython/translator/stm/src_stm/stm/sync.c
--- a/rpython/translator/stm/src_stm/stm/sync.c
+++ b/rpython/translator/stm/src_stm/stm/sync.c
@@ -41,24 +41,24 @@
 static void setup_sync(void)
 {
     if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0)
-        stm_fatalerror("mutex initialization: %m\n");
+        stm_fatalerror("mutex initialization: %m");
 
     long i;
     for (i = 0; i < _C_TOTAL; i++) {
         if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0)
-            stm_fatalerror("cond initialization: %m\n");
+            stm_fatalerror("cond initialization: %m");
     }
 }
 
 static void teardown_sync(void)
 {
     if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0)
-        stm_fatalerror("mutex destroy: %m\n");
+        stm_fatalerror("mutex destroy: %m");
 
     long i;
     for (i = 0; i < _C_TOTAL; i++) {
         if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0)
-            stm_fatalerror("cond destroy: %m\n");
+            stm_fatalerror("cond destroy: %m");
     }
 
     memset(&sync_ctl, 0, sizeof(sync_ctl));
@@ -75,14 +75,14 @@
 static void set_gs_register(char *value)
 {
     if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0))
-        stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n");
+        stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m");
 }
 
 static inline void s_mutex_lock(void)
 {
     assert(!_has_mutex_here);
     if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0))
-        stm_fatalerror("pthread_mutex_lock: %m\n");
+        stm_fatalerror("pthread_mutex_lock: %m");
     assert((_has_mutex_here = true, 1));
 }
 
@@ -90,32 +90,32 @@
 {
     assert(_has_mutex_here);
     if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0))
-        stm_fatalerror("pthread_mutex_unlock: %m\n");
+        stm_fatalerror("pthread_mutex_unlock: %m");
     assert((_has_mutex_here = false, 1));
 }
 
 static inline void cond_wait(enum cond_type_e ctype)
 {
 #ifdef STM_NO_COND_WAIT
-    stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype);
+    stm_fatalerror("*** cond_wait/%d called!", (int)ctype);
 #endif
 
     assert(_has_mutex_here);
     if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype],
                                    &sync_ctl.global_mutex) != 0))
-        stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype);
+        stm_fatalerror("pthread_cond_wait/%d: %m", (int)ctype);
 }
 
 static inline void cond_signal(enum cond_type_e ctype)
 {
     if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0))
-        stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype);
+        stm_fatalerror("pthread_cond_signal/%d: %m", (int)ctype);
 }
 
 static inline void cond_broadcast(enum cond_type_e ctype)
 {
     if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0))
-        stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype);
+        stm_fatalerror("pthread_cond_broadcast/%d: %m", (int)ctype);
 }
 
 /************************************************************/
diff --git a/rpython/translator/stm/src_stm/stmgc.c 
b/rpython/translator/stm/src_stm/stmgc.c
--- a/rpython/translator/stm/src_stm/stmgc.c
+++ b/rpython/translator/stm/src_stm/stmgc.c
@@ -24,6 +24,7 @@
 #include "stm/largemalloc.c"
 #include "stm/nursery.c"
 #include "stm/sync.c"
+#include "stm/forksupport.c"
 #include "stm/setup.c"
 #include "stm/hash_id.c"
 #include "stm/core.c"
diff --git a/rpython/translator/stm/src_stm/stmgc.h 
b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -70,6 +70,7 @@
     /* the next fields are handled internally by the library */
     int associated_segment_num;
     struct stm_thread_local_s *prev, *next;
+    void *creating_pthread[2];
 } stm_thread_local_t;
 
 /* this should use llvm's coldcc calling convention,
@@ -130,8 +131,10 @@
 
 /* ==================== PUBLIC API ==================== */
 
-/* Number of segments (i.e. how many threads can be executed in
-   parallel, in maximum).
+/* Number of segments (i.e. how many transactions can be executed in
+   parallel, in maximum).  If you try to start transactions in more
+   threads than the number of segments, it will block, waiting for the
+   next segment to become free.
 */
 #define STM_NB_SEGMENTS    4
 
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to