Author: Remi Meier <remi.me...@gmail.com>
Branch: 
Changeset: r544:8a3b7748ba7f
Date: 2013-11-05 22:04 +0100
http://bitbucket.org/pypy/stmgc/changeset/8a3b7748ba7f/

Log:    fix impersonating the other thread when forcing minor collections in
        other threads. also use the direct thread locals in some places (for
        better or worse).

diff --git a/c4/Makefile b/c4/Makefile
--- a/c4/Makefile
+++ b/c4/Makefile
@@ -31,7 +31,7 @@
 # gcc address sanitizer: -fPIE -pie -fsanitize=address -lasan 
-fno-omit-frame-pointer
 
 debug-%: %.c ${H_FILES} ${C_FILES}
-       gcc -pthread -DDUMP_EXTRA=1 ${DEBUG} $< -o debug-$* -Wall ${C_FILES} 
-lrt
+       gcc -Wall -pthread -DDUMP_EXTRA=1 ${DEBUG} $< -o debug-$* -Wall 
${C_FILES} -lrt
 
 release-%: %.c ${H_FILES} ${C_FILES} stmgc.c
        gcc -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c -lrt
diff --git a/c4/et.c b/c4/et.c
--- a/c4/et.c
+++ b/c4/et.c
@@ -705,7 +705,7 @@
     }
 
   struct tx_descriptor *d = thread_descriptor;
-  assert(*d->active_ref >= 1);
+  assert(stm_active >= 1);
 
   /* We need the collection_lock for the sequel; this is required notably
      because we're about to edit flags on a protected object.
@@ -889,7 +889,7 @@
 void SpinLoop(int num)
 {
   struct tx_descriptor *d = thread_descriptor;
-  assert(*d->active_ref >= 1);
+  assert(stm_active >= 1);
   assert(num < SPINLOOP_REASONS);
   d->num_spinloops[num]++;
   smp_spinloop();
@@ -924,7 +924,7 @@
       assert(!stm_has_got_any_lock(d));
     }
 
-  assert(*d->active_ref != 0);
+  assert(stm_active != 0);
   assert(!is_inevitable(d));
   assert(num < ABORT_REASONS);
   d->num_aborts[num]++;
@@ -989,7 +989,7 @@
   SpinLoop(SPLP_ABORT);
 
   /* make the transaction no longer active */
-  *d->active_ref = 0;
+  stm_active = 0;
   d->atomic = 0;
 
   /* release the lock */
@@ -1043,10 +1043,10 @@
 void AbortNowIfDelayed(void)
 {
   struct tx_descriptor *d = thread_descriptor;
-  if (*d->active_ref < 0)
+  if (stm_active < 0)
     {
-      int reason = -*d->active_ref;
-      *d->active_ref = 1;
+      int reason = -stm_active;
+      stm_active = 1;
       AbortTransaction(reason);
     }
 }
@@ -1098,7 +1098,7 @@
 {
   struct tx_descriptor *d = thread_descriptor;
   init_transaction(d, 0);
-  *d->active_ref = 1;
+  stm_active = 1;
   d->setjmp_buf = buf;
   d->longjmp_callback = longjmp_callback;
   d->old_thread_local_obj = stm_thread_local_obj;
@@ -1508,7 +1508,7 @@
 
   spinlock_release(d->public_descriptor->collection_lock);
   d->num_commits++;
-  *d->active_ref = 0;
+  stm_active = 0;
   if (!stay_inevitable)
     stm_stop_sharedlock();
 
@@ -1550,7 +1550,7 @@
 {   /* must save roots around this call */
   revision_t cur_time;
   struct tx_descriptor *d = thread_descriptor;
-  if (d == NULL || *d->active_ref != 1)
+  if (d == NULL || stm_active != 1)
     return;  /* I am already inevitable, or not in a transaction at all
                 (XXX statically we should know when we're outside
                 a transaction) */
@@ -1761,11 +1761,15 @@
       assert(d->my_lock & 1);
       assert(d->my_lock >= LOCKED);
       stm_private_rev_num = -d->my_lock;
+      /* Attention: in the following, we add references to real thread-locals
+         to the thread_descriptor. Make sure that force_minor_collections()
+         fakes all of them when doing minor collections in other threads! */
       d->active_ref = &stm_active;
       d->nursery_current_ref = &stm_nursery_current;
       d->nursery_nextlimit_ref = &stm_nursery_nextlimit;
       d->private_revision_ref = &stm_private_rev_num;
       d->read_barrier_cache_ref = &stm_read_barrier_cache;
+
       stm_thread_local_obj = NULL;
       d->thread_local_obj_ref = &stm_thread_local_obj;
       d->max_aborts = -1;
diff --git a/c4/extra.c b/c4/extra.c
--- a/c4/extra.c
+++ b/c4/extra.c
@@ -25,7 +25,7 @@
 void stm_call_on_abort(void *key, void callback(void *))
 {
     struct tx_descriptor *d = thread_descriptor;
-    if (d == NULL || *d->active_ref != 1)
+    if (d == NULL || stm_active != 1)
         return;   /* ignore callbacks if we're outside a transaction or
                      in an inevitable transaction (which cannot abort) */
     if (callback == NULL) {
diff --git a/c4/gcpage.c b/c4/gcpage.c
--- a/c4/gcpage.c
+++ b/c4/gcpage.c
@@ -919,9 +919,15 @@
     struct tx_descriptor *saved = thread_descriptor;
     revision_t saved_private_rev = stm_private_rev_num;
     char *saved_read_barrier_cache = stm_read_barrier_cache;
+    int saved_active = stm_active;
+    char *saved_nursery_current = stm_nursery_current;
+    char *saved_nursery_nextlimit = stm_nursery_nextlimit;
 
     assert(saved_private_rev == *saved->private_revision_ref);
     assert(saved_read_barrier_cache == *saved->read_barrier_cache_ref);
+    assert(saved_active == *saved->active_ref);
+    assert(saved_nursery_current == *saved->nursery_current_ref);
+    assert(saved_nursery_nextlimit == *saved->nursery_nextlimit_ref);
 
     for (d = stm_tx_head; d; d = d->tx_next) {
         /* Force a minor collection to run in the thread 'd'.
@@ -933,20 +939,49 @@
             /* Hack: temporarily pretend that we "are" the other thread...
              */
             assert(d->shadowstack_end_ref && *d->shadowstack_end_ref);
-            thread_descriptor = d;
-            stm_private_rev_num = *d->private_revision_ref;
+            /* set thread locals to expected values */
+            thread_descriptor      = d;
+            stm_private_rev_num    = *d->private_revision_ref;
             stm_read_barrier_cache = *d->read_barrier_cache_ref;
+            stm_active             = *d->active_ref;
+            stm_nursery_current    = *d->nursery_current_ref;
+            stm_nursery_nextlimit  = *d->nursery_nextlimit_ref;
+            /* save, then point _refs to the new thread-locals */
+            revision_t *d_private_revision_ref = d->private_revision_ref;
+            char **d_read_barrier_cache_ref    = d->read_barrier_cache_ref;
+            int   *d_active_ref                = d->active_ref;
+            char **d_nursery_current_ref       = d->nursery_current_ref;
+            char **d_nursery_nextlimit_ref     = d->nursery_nextlimit_ref;
+            d->private_revision_ref   = &stm_private_rev_num;
+            d->read_barrier_cache_ref = &stm_read_barrier_cache;
+            d->active_ref             = &stm_active;
+            d->nursery_current_ref    = &stm_nursery_current;
+            d->nursery_nextlimit_ref  = &stm_nursery_nextlimit;
 
+            /* we impersonated the other thread. */
             stmgc_minor_collect_no_abort();
 
-            assert(stm_private_rev_num == *d->private_revision_ref);
-            *d->read_barrier_cache_ref = stm_read_barrier_cache;
-
-            thread_descriptor = saved;
-            stm_private_rev_num = saved_private_rev;
-            stm_read_barrier_cache = saved_read_barrier_cache;
+            /* priv_rev didn't change! others may have */
+            assert(*d_private_revision_ref == stm_private_rev_num);
+            *d_read_barrier_cache_ref = stm_read_barrier_cache;
+            *d_active_ref             = stm_active;
+            *d_nursery_current_ref    = stm_nursery_current;
+            *d_nursery_nextlimit_ref  = stm_nursery_nextlimit;
+            /* restore _ref pointers in other thread */
+            d->private_revision_ref = d_private_revision_ref;
+            d->read_barrier_cache_ref = d_read_barrier_cache_ref;
+            d->active_ref = d_active_ref;
+            d->nursery_current_ref = d_nursery_current_ref;
+            d->nursery_nextlimit_ref = d_nursery_nextlimit_ref;
         }
     }
+    /* restore current thread */
+    thread_descriptor = saved;
+    stm_private_rev_num = saved_private_rev;
+    stm_read_barrier_cache = saved_read_barrier_cache;
+    stm_active = saved_active;
+    stm_nursery_current = saved_nursery_current;
+    stm_nursery_nextlimit = saved_nursery_nextlimit;
     stmgc_minor_collect_no_abort();
 }
 
diff --git a/c4/nursery.c b/c4/nursery.c
--- a/c4/nursery.c
+++ b/c4/nursery.c
@@ -36,8 +36,8 @@
     assert(d->nursery_base == NULL);
     d->nursery_base = stm_malloc(GC_NURSERY);       /* start of nursery */
     d->nursery_end = d->nursery_base + GC_NURSERY;  /* end of nursery */
-    *d->nursery_current_ref = d->nursery_base;           /* current position */
-    *d->nursery_nextlimit_ref = d->nursery_base;         /* next section limit 
*/
+    stm_nursery_current = d->nursery_base;           /* current position */
+    stm_nursery_nextlimit = d->nursery_base;         /* next section limit */
     d->nursery_cleared = NC_REGULAR;
 
     dprintf(("minor: nursery is at [%p to %p]\n", d->nursery_base,
@@ -63,7 +63,7 @@
 void stmgc_minor_collect_soon(void)
 {
     struct tx_descriptor *d = thread_descriptor;
-    *d->nursery_current_ref = d->nursery_end;
+    stm_nursery_current = d->nursery_end;
 }
 
 inline static gcptr allocate_nursery(size_t size, revision_t tid)
@@ -71,11 +71,11 @@
     /* if 'tid == -1', we must not collect */
     struct tx_descriptor *d = thread_descriptor;
     gcptr P;
-    char *cur = *d->nursery_current_ref;
+    char *cur = stm_nursery_current;
     char *end = cur + size;
     assert((size & 3) == 0);
-    *d->nursery_current_ref = end;
-    if (end > *d->nursery_nextlimit_ref) {
+    stm_nursery_current = end;
+    if (end > stm_nursery_nextlimit) {
         P = allocate_next_section(size, tid);
     }
     else {
@@ -592,7 +592,7 @@
        First fix 'nursery_current', left to a bogus value by the caller.
     */
     struct tx_descriptor *d = thread_descriptor;
-    *d->nursery_current_ref -= allocate_size;
+    stm_nursery_current -= allocate_size;
 
     /* Are we asking for a "reasonable" number of bytes, i.e. a value
        at most equal to one section?
@@ -612,8 +612,8 @@
     }
 
     /* Are we at the end of the nursery? */
-    if (*d->nursery_nextlimit_ref == d->nursery_end ||
-        *d->nursery_current_ref == d->nursery_end) {   // 
stmgc_minor_collect_soon()
+    if (stm_nursery_nextlimit == d->nursery_end ||
+        stm_nursery_current == d->nursery_end) {   // 
stmgc_minor_collect_soon()
         /* Yes */
         if (tid == -1)
             return NULL;    /* cannot collect */
@@ -629,12 +629,12 @@
 
     /* Clear the next section */
     if (d->nursery_cleared != NC_ALREADY_CLEARED)
-        memset(*d->nursery_nextlimit_ref, 0, GC_NURSERY_SECTION);
-    *d->nursery_nextlimit_ref += GC_NURSERY_SECTION;
+        memset(stm_nursery_nextlimit, 0, GC_NURSERY_SECTION);
+    stm_nursery_nextlimit += GC_NURSERY_SECTION;
 
     /* Return the object from there */
-    gcptr P = (gcptr)(*d->nursery_current_ref);
-    *d->nursery_current_ref += allocate_size;
+    gcptr P = (gcptr)(stm_nursery_current);
+    stm_nursery_current += allocate_size;
     assert(*d->nursery_current_ref <= *d->nursery_nextlimit_ref);
 
     P->h_tid = tid;
diff --git a/c4/stmgc.h b/c4/stmgc.h
--- a/c4/stmgc.h
+++ b/c4/stmgc.h
@@ -135,7 +135,7 @@
 /* change the default transaction length, and ask if now would be a good
    time to break the transaction (by returning from the 'callback' above
    with a positive value). */
-void stm_set_transaction_length(long length_max);
+void stm_set_transaction_length(long length_max); /* save roots! */
 _Bool stm_should_break_transaction(void);
 
 /* change the atomic counter by 'delta' and return the new value.  Used
@@ -162,7 +162,7 @@
    stm_inspect_abort_info().  (XXX details not documented yet) */
 void stm_abort_info_push(gcptr obj, long fieldoffsets[]);
 void stm_abort_info_pop(long count);
-char *stm_inspect_abort_info(void);    /* turns inevitable */
+char *stm_inspect_abort_info(void);    /* turns inevitable, push roots! */
 
 /* mostly for debugging support */
 void stm_abort_and_retry(void);
diff --git a/c4/stmsync.c b/c4/stmsync.c
--- a/c4/stmsync.c
+++ b/c4/stmsync.c
@@ -9,7 +9,7 @@
 static revision_t sync_required = 0;
 
 void stm_set_transaction_length(long length_max)
-{
+{                               /* save roots around this call! */
     BecomeInevitable("set_transaction_length");
     if (length_max <= 0) {
         length_max = 1;
@@ -42,7 +42,7 @@
                                    d->reads_size_limit_nonatomic));
     /* if is_inevitable(), reads_size_limit_nonatomic should be 0
        (and thus reads_size_limit too, if !d->atomic.) */
-    if (*d->active_ref == 2)
+    if (stm_active == 2)
         assert(d->reads_size_limit_nonatomic == 0);
 #endif
 
@@ -167,7 +167,7 @@
            has configured 'reads_size_limit_nonatomic' to a smaller value.
            When such a shortened transaction succeeds, the next one will
            see its length limit doubled, up to the maximum. */
-        if (counter == 0 && *d->active_ref != 2) {
+        if (counter == 0 && stm_active != 2) {
             unsigned long limit = d->reads_size_limit_nonatomic;
             if (limit != 0 && limit < (stm_regular_length_limit >> 1))
                 limit = (limit << 1) | 1;
@@ -182,7 +182,7 @@
             /* atomic transaction: a common case is that callback() returned
                even though we are atomic because we need a major GC.  For
                that case, release and reaquire the rw lock here. */
-            assert(*d->active_ref >= 1);
+            assert(stm_active >= 1);
             stm_possible_safe_point();
         }
 
@@ -217,7 +217,7 @@
 {   /* must save roots around this call */
     struct tx_descriptor *d = thread_descriptor;
     if (d->atomic) {
-        assert(*d->active_ref >= 1);
+        assert(stm_active >= 1);
         stm_possible_safe_point();
     }
     else {
@@ -266,7 +266,7 @@
 int stm_in_transaction(void)
 {
     struct tx_descriptor *d = thread_descriptor;
-    return d && *d->active_ref;
+    return d && stm_active;
 }
 
 /************************************************************/
@@ -336,7 +336,7 @@
 void stm_partial_commit_and_resume_other_threads(void)
 {                               /* push gc roots! */
     struct tx_descriptor *d = thread_descriptor;
-    assert(*d->active_ref == 2);
+    assert(stm_active == 2);
     int atomic = d->atomic;
 
     /* Give up atomicity during commit. This still works because
@@ -390,7 +390,7 @@
 
     /* Warning, may block waiting for rwlock_in_transaction while another
        thread runs a major GC */
-    assert(*thread_descriptor->active_ref);
+    assert(stm_active);
     assert(in_single_thread != thread_descriptor);
 
     stm_stop_sharedlock();
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to