Author: Remi Meier <remi.me...@inf.ethz.ch>
Branch: stmgc-c8
Changeset: r75478:1f67666be256
Date: 2015-01-22 10:58 +0100
http://bitbucket.org/pypy/pypy/changeset/1f67666be256/

Log:    import stmgc-c8 5cfce5d61c50

diff too long, truncating to 2000 out of 8126 lines

diff --git a/rpython/translator/stm/import_stmgc.py 
b/rpython/translator/stm/import_stmgc.py
--- a/rpython/translator/stm/import_stmgc.py
+++ b/rpython/translator/stm/import_stmgc.py
@@ -15,7 +15,7 @@
         yield line
 
 def main(stmgc_dir):
-    stmgc_dir = py.path.local(stmgc_dir).join('c7')
+    stmgc_dir = py.path.local(stmgc_dir).join('c8')
     popen = subprocess.Popen(['hg', 'id', '-i'], cwd=str(stmgc_dir),
                              stdout=subprocess.PIPE)
     rev = popen.stdout.read().strip()
diff --git a/rpython/translator/stm/src_stm/revision 
b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-957947bc7ad9
+5cfce5d61c50
diff --git a/rpython/translator/stm/src_stm/stm/contention.c 
b/rpython/translator/stm/src_stm/stm/contention.c
deleted file mode 100644
--- a/rpython/translator/stm/src_stm/stm/contention.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/* Imported by rpython/translator/stm/import_stmgc.py */
-#ifndef _STM_CORE_H_
-# error "must be compiled via stmgc.c"
-#endif
-
-
-/* Here are the possible kinds of contention:
-
-   STM_CONTENTION_WRITE_WRITE
-
-       A write-write contention occurs when we are running our
-       transaction and detect that we are about to write to an object
-       that another thread is also writing to.  This kind of
-       contention must be resolved before continuing.  This *must*
-       abort one of the two threads: the caller's thread is not at a
-       safe-point, so cannot wait!
-
-       It is reported as a timing event with the following two markers:
-       the current thread (i.e. where the second-in-time write occurs);
-       and the other thread (from its 'modified_old_objects_markers',
-       where the first-in-time write occurred).
-
-   STM_CONTENTION_WRITE_READ
-
-       A write-read contention occurs when we are trying to commit: it
-       means that an object we wrote to was also read by another
-       transaction.  Even though it would seem obvious that we should
-       just abort the other thread and proceed in our commit, a more
-       subtle answer would be in some cases to wait for the other thread
-       to commit first.  It would commit having read the old value, and
-       then we can commit our change to it.
-
-       It is reported as a timing event with only one marker: the
-       older location of the write that was done by the current thread.
-
-    STM_CONTENTION_INEVITABLE
-
-       An inevitable contention occurs when we're trying to become
-       inevitable but another thread already is.  We can never abort the
-       other thread in this case, but we still have the choice to abort
-       ourselves or pause until the other thread commits.
-
-       It is reported with two markers, one for the current thread and
-       one for the other thread.  Each marker gives the location that
-       attempts to make the transaction inevitable.
-*/
-
-
-struct contmgr_s {
-    enum stm_event_e kind;
-    struct stm_priv_segment_info_s *other_pseg;
-    bool abort_other;
-    bool try_sleep;  // XXX add a way to timeout, but should handle repeated
-                     // calls to contention_management() to avoid re-sleeping
-                     // for the whole duration
-};
-
-
-/************************************************************/
-
-
-__attribute__((unused))
-static void cm_always_abort_myself(struct contmgr_s *cm)
-{
-    cm->abort_other = false;
-}
-
-__attribute__((unused))
-static void cm_always_abort_other(struct contmgr_s *cm)
-{
-    cm->abort_other = true;
-}
-
-__attribute__((unused))
-static void cm_abort_the_younger(struct contmgr_s *cm)
-{
-    if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) {
-        /* We started after the other thread.  Abort */
-        cm->abort_other = false;
-    }
-    else {
-        cm->abort_other = true;
-    }
-}
-
-__attribute__((unused))
-static void cm_always_wait_for_other_thread(struct contmgr_s *cm)
-{
-    /* we tried this contention management, but it seems to have
-       very bad cases: if thread 1 always reads an object in every
-       transaction, and thread 2 wants to write this object just
-       once, then thread 2 will pause when it tries to commit;
-       it will wait until thread 1 committed; but by the time
-       thread 2 resumes again, thread 1 has already started the
-       next transaction and read the object again.
-    */
-    cm_abort_the_younger(cm);
-    cm->try_sleep = true;
-}
-
-__attribute__((unused))
-static void cm_pause_if_younger(struct contmgr_s *cm)
-{
-    if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) {
-        /* We started after the other thread.  Pause */
-        cm->try_sleep = true;
-        cm->abort_other = false;
-    }
-    else {
-        cm->abort_other = true;
-    }
-}
-
-
-/************************************************************/
-
-
-static bool contention_management(uint8_t other_segment_num,
-                                  enum stm_event_e kind,
-                                  object_t *obj)
-{
-    assert(_has_mutex());
-    assert(other_segment_num != STM_SEGMENT->segment_num);
-
-    bool others_may_have_run = false;
-    if (must_abort())
-        abort_with_mutex();
-
-    /* Report the contention */
-    timing_contention(kind, other_segment_num, obj);
-
-    /* Who should abort here: this thread, or the other thread? */
-    struct contmgr_s contmgr;
-    contmgr.kind = kind;
-    contmgr.other_pseg = get_priv_segment(other_segment_num);
-    contmgr.abort_other = false;
-    contmgr.try_sleep = false;
-
-    /* Pick one contention management... could be made dynamically choosable */
-#ifdef STM_TESTS
-    cm_abort_the_younger(&contmgr);
-#else
-    cm_pause_if_younger(&contmgr);
-#endif
-
-    /* Fix the choices that are found incorrect due to TS_INEVITABLE
-       or is_abort() */
-    if (is_abort(contmgr.other_pseg->pub.nursery_end)) {
-        contmgr.abort_other = true;
-        contmgr.try_sleep = false;
-    }
-    else if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
-        assert(contmgr.other_pseg->transaction_state != TS_INEVITABLE);
-        contmgr.abort_other = true;
-        contmgr.try_sleep = false;
-    }
-    else if (contmgr.other_pseg->transaction_state == TS_INEVITABLE) {
-        contmgr.abort_other = false;
-    }
-
-    /* Do one of three things here...
-     */
-    if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE &&
-        contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) {
-        others_may_have_run = true;
-        /* Sleep.
-
-           - Not for write-write contentions, because we're not at a
-             safe-point.
-
-           - To prevent loops of threads waiting for each others, use
-             a crude heuristic of never pausing for a thread that is
-             itself already paused here.
-        */
-        contmgr.other_pseg->signal_when_done = true;
-
-        /* tell the other to commit ASAP */
-        signal_other_to_commit_soon(contmgr.other_pseg);
-
-        dprintf(("pausing...\n"));
-
-        timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION);
-
-        cond_signal(C_AT_SAFE_POINT);
-        STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE;
-        cond_wait(C_TRANSACTION_DONE);
-        STM_PSEGMENT->safe_point = SP_RUNNING;
-        dprintf(("pausing done\n"));
-
-        timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
-
-        if (must_abort())
-            abort_with_mutex();
-    }
-
-    else if (!contmgr.abort_other) {
-        /* tell the other to commit ASAP, since it causes aborts */
-        signal_other_to_commit_soon(contmgr.other_pseg);
-
-        dprintf(("abort in contention: kind %d\n", kind));
-        abort_with_mutex();
-    }
-
-    else {
-        /* We have to signal the other thread to abort, and wait until
-           it does. */
-        contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT;
-
-        timing_event(STM_SEGMENT->running_thread,
-                     STM_ABORTING_OTHER_CONTENTION);
-
-        int sp = contmgr.other_pseg->safe_point;
-        switch (sp) {
-
-        case SP_RUNNING:
-            /* The other thread is running now, so as NSE_SIGABORT was
-               set in its 'nursery_end', it will soon enter a
-               mutex_lock() and thus abort.
-
-               In this case, we will wait until it broadcasts "I'm done
-               aborting".  Important: this is not a safe point of any
-               kind!  The shadowstack may not be correct here.  It
-               should not end in a deadlock, because the target thread
-               is, in principle, guaranteed to call abort_with_mutex()
-               very soon.  Just to be on the safe side, make it really
-               impossible for the target thread to later enter the same
-               cond_wait(C_ABORTED) (and thus wait, possibly for us,
-               ending in a deadlock): check again must_abort() first.
-            */
-            if (must_abort())
-                abort_with_mutex();
-
-            others_may_have_run = true;
-            dprintf(("contention: wait C_ABORTED...\n"));
-            cond_wait(C_ABORTED);
-            dprintf(("contention: done\n"));
-
-            if (must_abort())
-                abort_with_mutex();
-            break;
-
-        /* The other cases are where the other thread is at a
-           safe-point.  We wake it up by sending the correct signal.
-           We don't have to wait here: the other thread will not do
-           anything more than abort when it really wakes up later.
-        */
-        case SP_WAIT_FOR_C_REQUEST_REMOVED:
-            cond_broadcast(C_REQUEST_REMOVED);
-            break;
-
-        case SP_WAIT_FOR_C_AT_SAFE_POINT:
-            cond_broadcast(C_AT_SAFE_POINT);
-            break;
-
-        case SP_WAIT_FOR_C_TRANSACTION_DONE:
-            cond_broadcast(C_TRANSACTION_DONE);
-            break;
-
-#ifdef STM_TESTS
-        case SP_WAIT_FOR_OTHER_THREAD:
-            /* for tests: the other thread will abort as soon as
-               stm_stop_safe_point() is called */
-            break;
-#endif
-
-        default:
-            stm_fatalerror("unexpected other_pseg->safe_point: %d", sp);
-        }
-
-        if (is_aborting_now(other_segment_num)) {
-            /* The other thread is blocked in a safe-point with NSE_SIGABORT.
-               We don't have to wake it up right now, but we know it will
-               abort as soon as it wakes up.  We can safely force it to
-               reset its state now. */
-            dprintf(("killing data structures\n"));
-            abort_data_structures_from_segment_num(other_segment_num);
-        }
-        dprintf(("killed other thread\n"));
-
-        /* we should commit soon, we caused an abort */
-        
//signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num));
-        if (!STM_PSEGMENT->signalled_to_commit_soon) {
-            STM_PSEGMENT->signalled_to_commit_soon = true;
-            stmcb_commit_soon();
-        }
-    }
-    return others_may_have_run;
-}
-
-static void write_write_contention_management(uintptr_t lock_idx,
-                                              object_t *obj)
-{
-    s_mutex_lock();
-
-    uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx];
-    if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) {
-
-        uint8_t other_segment_num = prev_owner;
-        assert(get_priv_segment(other_segment_num)->write_lock_num ==
-               prev_owner);
-
-        contention_management(other_segment_num,
-                              STM_CONTENTION_WRITE_WRITE, obj);
-
-        /* now we return into _stm_write_slowpath() and will try again
-           to acquire the write lock on our object. */
-    }
-
-    s_mutex_unlock();
-}
-
-static bool write_read_contention_management(uint8_t other_segment_num,
-                                             object_t *obj)
-{
-    return contention_management(other_segment_num,
-                                 STM_CONTENTION_WRITE_READ, obj);
-}
-
-static void inevitable_contention_management(uint8_t other_segment_num)
-{
-    contention_management(other_segment_num,
-                          STM_CONTENTION_INEVITABLE, NULL);
-}
diff --git a/rpython/translator/stm/src_stm/stm/contention.h 
b/rpython/translator/stm/src_stm/stm/contention.h
deleted file mode 100644
--- a/rpython/translator/stm/src_stm/stm/contention.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Imported by rpython/translator/stm/import_stmgc.py */
-
-static void write_write_contention_management(uintptr_t lock_idx,
-                                              object_t *obj);
-static bool write_read_contention_management(uint8_t other_segment_num,
-                                             object_t *obj);
-static void inevitable_contention_management(uint8_t other_segment_num);
-
-static inline bool is_abort(uintptr_t nursery_end) {
-    return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE
-            && nursery_end != NSE_SIGCOMMITSOON);
-}
-
-static inline bool is_aborting_now(uint8_t other_segment_num) {
-    return (is_abort(get_segment(other_segment_num)->nursery_end) &&
-            get_priv_segment(other_segment_num)->safe_point != SP_RUNNING);
-}
diff --git a/rpython/translator/stm/src_stm/stm/core.c 
b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -4,305 +4,628 @@
 #endif
 
 
-static void teardown_core(void)
+/* General helper: copies objects into our own segment, from some
+   source described by a range of 'struct stm_undo_s'.  Maybe later
+   we could specialize this function to avoid the checks in the
+   inner loop.
+*/
+static void import_objects(
+        int from_segnum,            /* or -1: from undo->backup,
+                                       or -2: from undo->backup if not 
modified */
+        uintptr_t pagenum,          /* or -1: "all accessible" */
+        struct stm_undo_s *undo,
+        struct stm_undo_s *end)
 {
-    memset(write_locks, 0, sizeof(write_locks));
+    char *src_segment_base = (from_segnum >= 0 ? get_segment_base(from_segnum)
+                                               : NULL);
+
+    assert(IMPLY(from_segnum >= 0, 
get_priv_segment(from_segnum)->modification_lock));
+    assert(STM_PSEGMENT->modification_lock);
+
+    DEBUG_EXPECT_SEGFAULT(false);
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        stm_char *oslice = ((stm_char *)obj) + SLICE_OFFSET(undo->slice);
+        uintptr_t current_page_num = ((uintptr_t)oslice) / 4096;
+
+        if (pagenum == -1) {
+            if (get_page_status_in(STM_SEGMENT->segment_num,
+                                   current_page_num) == PAGE_NO_ACCESS)
+                continue;
+        }
+        else {
+            if (current_page_num != pagenum)
+                continue;
+        }
+
+        if (from_segnum == -2 && _stm_was_read(obj) && (obj->stm_flags & 
GCFLAG_WB_EXECUTED)) {
+            /* called from stm_validate():
+                > if not was_read(), we certainly didn't modify
+                > if not WB_EXECUTED, we may have read from the obj in a 
different page but
+                  did not modify it (should not occur right now, but future 
proof!)
+               only the WB_EXECUTED alone is not enough, since we may have 
imported from a
+               segment's private page (which had the flag set) */
+            assert(IMPLY(_stm_was_read(obj), (obj->stm_flags & 
GCFLAG_WB_EXECUTED))); /* for now */
+            continue;           /* only copy unmodified */
+        }
+
+        /* XXX: if the next assert is always true, we should never get a 
segfault
+           in this function at all. So the DEBUG_EXPECT_SEGFAULT is correct. */
+        assert((get_page_status_in(STM_SEGMENT->segment_num,
+                                   current_page_num) != PAGE_NO_ACCESS));
+
+        dprintf(("import slice seg=%d obj=%p off=%lu sz=%d pg=%lu\n",
+                 from_segnum, obj, SLICE_OFFSET(undo->slice),
+                 SLICE_SIZE(undo->slice), current_page_num));
+        char *src, *dst;
+        if (src_segment_base != NULL)
+            src = REAL_ADDRESS(src_segment_base, oslice);
+        else
+            src = undo->backup;
+        dst = REAL_ADDRESS(STM_SEGMENT->segment_base, oslice);
+        memcpy(dst, src, SLICE_SIZE(undo->slice));
+
+        if (src_segment_base == NULL && SLICE_OFFSET(undo->slice) == 0) {
+            /* check that restored obj doesn't have WB_EXECUTED */
+            assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED));
+        }
+    }
+    DEBUG_EXPECT_SEGFAULT(true);
 }
 
-#ifdef NDEBUG
-#define EVENTUALLY(condition)    /* nothing */
-#else
-#define EVENTUALLY(condition)                                   \
-    {                                                           \
-        if (!(condition)) {                                     \
-            acquire_privatization_lock();                       \
-            if (!(condition))                                   \
-                stm_fatalerror("fails: " #condition);           \
-            release_privatization_lock();                       \
-        }                                                       \
-    }
-#endif
 
-static void check_flag_write_barrier(object_t *obj)
+/* ############# signal handler ############# */
+
+static void copy_bk_objs_in_page_from(int from_segnum, uintptr_t pagenum,
+                                      bool only_if_not_modified)
 {
-    /* check that all copies of the object, apart from mine, have the
-       GCFLAG_WRITE_BARRIER.  (a bit messy because it's possible that we
-       read a page in the middle of privatization by another thread)
-    */
-#ifndef NDEBUG
-    long i;
-    struct object_s *o1;
-    for (i = 0; i <= NB_SEGMENTS; i++) {
-        if (i == STM_SEGMENT->segment_num)
-            continue;
-        o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj);
-        EVENTUALLY(o1->stm_flags & GCFLAG_WRITE_BARRIER);
-    }
-#endif
+    /* looks at all bk copies of objects overlapping page 'pagenum' and
+       copies the part in 'pagenum' back to the current segment */
+    dprintf(("copy_bk_objs_in_page_from(%d, %ld, %d)\n",
+             from_segnum, (long)pagenum, only_if_not_modified));
+
+    struct list_s *list = get_priv_segment(from_segnum)->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+
+    import_objects(only_if_not_modified ? -2 : -1,
+                   pagenum, undo, end);
 }
 
-__attribute__((always_inline))
-static void write_slowpath_overflow_obj(object_t *obj, bool mark_card)
+static void go_to_the_past(uintptr_t pagenum,
+                           struct stm_commit_log_entry_s *from,
+                           struct stm_commit_log_entry_s *to)
 {
-    /* An overflow object is an object from the same transaction, but
-       outside the nursery.  More precisely, it is no longer young,
-       i.e. it comes from before the most recent minor collection.
-    */
-    assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL);
+    assert(STM_PSEGMENT->modification_lock);
+    assert(from->rev_num >= to->rev_num);
+    /* walk BACKWARDS the commit log and update the page 'pagenum',
+       initially at revision 'from', until we reach the revision 'to'. */
 
-    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
-    if (!mark_card) {
-        /* The basic case, with no card marking.  We append the object
-           into 'objects_pointing_to_nursery', and remove the flag so
-           that the write_slowpath will not be called again until the
-           next minor collection. */
-        if (obj->stm_flags & GCFLAG_CARDS_SET) {
-            /* if we clear this flag, we also need to clear the cards */
-            _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num),
-                                obj, CARD_CLEAR, false);
-        }
-        obj->stm_flags &= ~(GCFLAG_WRITE_BARRIER | GCFLAG_CARDS_SET);
-        LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
-    }
-    else {
-        /* Card marking.  Don't remove GCFLAG_WRITE_BARRIER because we
-           need to come back to _stm_write_slowpath_card() for every
-           card to mark.  Add GCFLAG_CARDS_SET. */
-        assert(!(obj->stm_flags & GCFLAG_CARDS_SET));
-        obj->stm_flags |= GCFLAG_CARDS_SET;
-        assert(STM_PSEGMENT->old_objects_with_cards);
-        LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj);
+    /* XXXXXXX Recursive algo for now, fix this! */
+    if (from != to) {
+        struct stm_commit_log_entry_s *cl = to->next;
+        go_to_the_past(pagenum, from, cl);
+
+        struct stm_undo_s *undo = cl->written;
+        struct stm_undo_s *end = cl->written + cl->written_count;
+
+        import_objects(-1, pagenum, undo, end);
     }
 }
 
-__attribute__((always_inline))
-static void write_slowpath_common(object_t *obj, bool mark_card)
+
+
+static void handle_segfault_in_page(uintptr_t pagenum)
 {
-    assert(_seems_to_be_running_transaction());
-    assert(!_is_young(obj));
-    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
+    /* assumes page 'pagenum' is ACCESS_NONE, privatizes it,
+       and validates to newest revision */
 
-    uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj);
+    dprintf(("handle_segfault_in_page(%lu), seg %d\n", pagenum, 
STM_SEGMENT->segment_num));
 
-    if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) {
-        assert(write_locks[base_lock_idx] == 0);
-        write_slowpath_overflow_obj(obj, mark_card);
+    /* XXX: bad, but no deadlocks: */
+    acquire_all_privatization_locks();
+
+    long i;
+    int my_segnum = STM_SEGMENT->segment_num;
+
+    assert(get_page_status_in(my_segnum, pagenum) == PAGE_NO_ACCESS);
+
+    /* find who has the most recent revision of our page */
+    int copy_from_segnum = -1;
+    uint64_t most_recent_rev = 0;
+    for (i = 1; i < NB_SEGMENTS; i++) {
+        if (i == my_segnum)
+            continue;
+
+        struct stm_commit_log_entry_s *log_entry;
+        log_entry = get_priv_segment(i)->last_commit_log_entry;
+        if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS
+            && (copy_from_segnum == -1 || log_entry->rev_num > 
most_recent_rev)) {
+            copy_from_segnum = i;
+            most_recent_rev = log_entry->rev_num;
+        }
+    }
+    OPT_ASSERT(copy_from_segnum != my_segnum);
+
+    /* make our page write-ready */
+    page_mark_accessible(my_segnum, pagenum);
+
+    if (copy_from_segnum == -1) {
+        /* this page is only accessible in the sharing segment so far (new
+           allocation). We can thus simply mark it accessible here. */
+        pagecopy(get_virtual_page(my_segnum, pagenum),
+                 get_virtual_page(0, pagenum));
+        release_all_privatization_locks();
         return;
     }
-    /* Else, it's an old object and we need to privatise it.
-       Do a read-barrier now.  Note that this must occur before the
-       safepoints that may be issued in write_write_contention_management().
-    */
-    stm_read(obj);
 
-    /* Take the segment's own lock number */
-    uint8_t lock_num = STM_PSEGMENT->write_lock_num;
+    /* before copying anything, acquire modification locks from our and
+       the other segment */
+    uint64_t to_lock = (1UL << copy_from_segnum)| (1UL << my_segnum);
+    acquire_modification_lock_set(to_lock);
+    pagecopy(get_virtual_page(my_segnum, pagenum),
+             get_virtual_page(copy_from_segnum, pagenum));
 
-    /* If CARDS_SET, we entered here at least once already, so we
-       already own the write_lock */
-    assert(IMPLY(obj->stm_flags & GCFLAG_CARDS_SET,
-                 write_locks[base_lock_idx] == lock_num));
+    /* if there were modifications in the page, revert them. */
+    copy_bk_objs_in_page_from(copy_from_segnum, pagenum, false);
 
-    /* XXX XXX XXX make the logic of write-locking objects optional! */
+    /* we need to go from 'src_version' to 'target_version'.  This
+       might need a walk into the past. */
+    struct stm_commit_log_entry_s *src_version, *target_version;
+    src_version = get_priv_segment(copy_from_segnum)->last_commit_log_entry;
+    target_version = STM_PSEGMENT->last_commit_log_entry;
 
-    /* claim the write-lock for this object.  In case we're running the
-       same transaction since a long while, the object can be already in
-       'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER,
-       not in 'objects_pointing_to_nursery').  We'll detect this case
-       by finding that we already own the write-lock. */
 
- retry:
-    if (write_locks[base_lock_idx] == 0) {
-        /* A lock to prevent reading garbage from
-           lookup_other_thread_recorded_marker() */
-        acquire_marker_lock(STM_SEGMENT->segment_base);
+    dprintf(("handle_segfault_in_page: rev %lu to rev %lu\n",
+             src_version->rev_num, target_version->rev_num));
+    /* adapt revision of page to our revision:
+       if our rev is higher than the page we copy from, everything
+       is fine as we never read/modified the page anyway
+     */
+    if (src_version->rev_num > target_version->rev_num)
+        go_to_the_past(pagenum, src_version, target_version);
 
-        if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[base_lock_idx],
-                                                   0, lock_num))) {
-            release_marker_lock(STM_SEGMENT->segment_base);
-            goto retry;
+    release_modification_lock_set(to_lock);
+    release_all_privatization_locks();
+}
+
+static void _signal_handler(int sig, siginfo_t *siginfo, void *context)
+{
+    assert(_stm_segfault_expected > 0);
+
+    int saved_errno = errno;
+    char *addr = siginfo->si_addr;
+    dprintf(("si_addr: %p\n", addr));
+    if (addr == NULL || addr < stm_object_pages ||
+        addr >= stm_object_pages+TOTAL_MEMORY) {
+        /* actual segfault, unrelated to stmgc */
+        fprintf(stderr, "Segmentation fault: accessing %p\n", addr);
+        abort();
+    }
+
+    int segnum = get_segment_of_linear_address(addr);
+    if (segnum != STM_SEGMENT->segment_num) {
+        fprintf(stderr, "Segmentation fault: accessing %p (seg %d) from"
+                " seg %d\n", addr, segnum, STM_SEGMENT->segment_num);
+        abort();
+    }
+    dprintf(("-> segment: %d\n", segnum));
+
+    char *seg_base = STM_SEGMENT->segment_base;
+    uintptr_t pagenum = ((char*)addr - seg_base) / 4096UL;
+    if (pagenum < END_NURSERY_PAGE) {
+        fprintf(stderr, "Segmentation fault: accessing %p (seg %d "
+                        "page %lu)\n", addr, segnum, pagenum);
+        abort();
+    }
+
+    DEBUG_EXPECT_SEGFAULT(false);
+    handle_segfault_in_page(pagenum);
+    DEBUG_EXPECT_SEGFAULT(true);
+
+    errno = saved_errno;
+    /* now return and retry */
+}
+
+/* ############# commit log ############# */
+
+
+void _dbg_print_commit_log()
+{
+    struct stm_commit_log_entry_s *cl = &commit_log_root;
+
+    fprintf(stderr, "commit log:\n");
+    while ((cl = cl->next)) {
+        if (cl == INEV_RUNNING) {
+            fprintf(stderr, "  INEVITABLE\n");
+            return;
+        }
+        fprintf(stderr, "  entry at %p: seg %d, rev %lu\n", cl, 
cl->segment_num, cl->rev_num);
+        struct stm_undo_s *undo = cl->written;
+        struct stm_undo_s *end = undo + cl->written_count;
+        for (; undo < end; undo++) {
+            fprintf(stderr, "    obj %p, size %d, ofs %lu: ", undo->object,
+                    SLICE_SIZE(undo->slice), SLICE_OFFSET(undo->slice));
+            /* long i; */
+            /* for (i=0; i<SLICE_SIZE(undo->slice); i += 8) */
+            /*     fprintf(stderr, " 0x%016lx", *(long *)(undo->backup + i)); 
*/
+            fprintf(stderr, "\n");
+        }
+    }
+}
+
+static void reset_modified_from_backup_copies(int segment_num);  /* forward */
+
+static bool _stm_validate()
+{
+    /* returns true if we reached a valid state, or false if
+       we need to abort now */
+    dprintf(("_stm_validate()\n"));
+    /* go from last known entry in commit log to the
+       most current one and apply all changes done
+       by other transactions. Abort if we have read one of
+       the committed objs. */
+    struct stm_commit_log_entry_s *first_cl = 
STM_PSEGMENT->last_commit_log_entry;
+    struct stm_commit_log_entry_s *next_cl, *last_cl, *cl;
+    int my_segnum = STM_SEGMENT->segment_num;
+    /* Don't check this 'cl'. This entry is already checked */
+
+    if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
+        //assert(first_cl->next == INEV_RUNNING);
+        /* the above assert may fail when running a major collection
+           while the commit of the inevitable transaction is in progress */
+        return true;
+    }
+
+    bool needs_abort = false;
+
+    while(1) {
+        /* retry IF: */
+        /* if at the time of "HERE" (s.b.) there happen to be
+           more commits (and bk copies) then it could be that
+           copy_bk_objs_in_page_from (s.b.) reads a bk copy that
+           is itself more recent than last_cl. This is fixed
+           by re-validating. */
+        first_cl = STM_PSEGMENT->last_commit_log_entry;
+        if (first_cl->next == NULL)
+            break;
+
+        if (first_cl->next == INEV_RUNNING) {
+            /* need to reach safe point if an INEV transaction
+               is waiting for us, otherwise deadlock */
+            break;
         }
 
-        dprintf_test(("write_slowpath %p -> mod_old\n", obj));
+        /* Find the set of segments we need to copy from and lock them: */
+        uint64_t segments_to_lock = 1UL << my_segnum;
+        cl = first_cl;
+        while ((next_cl = cl->next) != NULL) {
+            if (next_cl == INEV_RUNNING) {
+                /* only validate entries up to INEV */
+                break;
+            }
+            assert(next_cl->rev_num > cl->rev_num);
+            cl = next_cl;
 
-        /* Add the current marker, recording where we wrote to this object */
-        timing_record_write();
-
-        /* Change to this old object from this transaction.
-           Add it to the list 'modified_old_objects'. */
-        LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj);
-
-        release_marker_lock(STM_SEGMENT->segment_base);
-
-        /* We need to privatize the pages containing the object, if they
-           are still SHARED_PAGE.  The common case is that there is only
-           one page in total. */
-        uintptr_t first_page = ((uintptr_t)obj) / 4096UL;
-
-        /* If the object is in the uniform pages of small objects
-           (outside the nursery), then it fits into one page.  This is
-           the common case. Otherwise, we need to compute it based on
-           its location and size. */
-        if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) {
-            page_privatize(first_page);
-        }
-        else {
-            char *realobj;
-            size_t obj_size;
-            uintptr_t i, end_page;
-
-            /* get the size of the object */
-            realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-            obj_size = stmcb_size_rounded_up((struct object_s *)realobj);
-
-            /* get the last page containing data from the object */
-            end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL;
-
-            for (i = first_page; i <= end_page; i++) {
-                page_privatize(i);
+            if (cl->written_count) {
+                segments_to_lock |= (1UL << cl->segment_num);
             }
         }
+        last_cl = cl;
+
+        /* HERE */
+
+        acquire_privatization_lock(STM_SEGMENT->segment_num);
+        acquire_modification_lock_set(segments_to_lock);
+
+
+        /* import objects from first_cl to last_cl: */
+        if (first_cl != last_cl) {
+            uint64_t segment_really_copied_from = 0UL;
+
+            cl = first_cl;
+            while ((cl = cl->next) != NULL) {
+                if (!needs_abort) {
+                    struct stm_undo_s *undo = cl->written;
+                    struct stm_undo_s *end = cl->written + cl->written_count;
+                    for (; undo < end; undo++) {
+                        if (_stm_was_read(undo->object)) {
+                            /* first reset all modified objects from the backup
+                               copies as soon as the first conflict is 
detected;
+                               then we will proceed below to update our 
segment from
+                               the old (but unmodified) version to the newer 
version.
+                            */
+                            reset_modified_from_backup_copies(my_segnum);
+                            needs_abort = true;
+
+                            dprintf(("_stm_validate() failed for obj %p\n", 
undo->object));
+                            break;
+                        }
+                    }
+                }
+
+                if (cl->written_count) {
+                    struct stm_undo_s *undo = cl->written;
+                    struct stm_undo_s *end = cl->written + cl->written_count;
+
+                    segment_really_copied_from |= (1UL << cl->segment_num);
+
+                    import_objects(cl->segment_num, -1, undo, end);
+
+                    /* here we can actually have our own modified version, so
+                       make sure to only copy things that are not modified in 
our
+                       segment... (if we do not abort) */
+                    copy_bk_objs_in_page_from
+                        (cl->segment_num, -1,     /* any page */
+                         !needs_abort);  /* if we abort, we still want to copy 
everything */
+                }
+
+                /* last fully validated entry */
+                STM_PSEGMENT->last_commit_log_entry = cl;
+                if (cl == last_cl)
+                    break;
+            }
+            assert(cl == last_cl);
+
+            /* XXX: this optimization fails in test_basic.py, bug3 */
+            /* OPT_ASSERT(segment_really_copied_from < (1 << NB_SEGMENTS)); */
+            /* int segnum; */
+            /* for (segnum = 1; segnum < NB_SEGMENTS; segnum++) { */
+            /*     if (segment_really_copied_from & (1UL << segnum)) { */
+            /*         /\* here we can actually have our own modified version, 
so */
+            /*            make sure to only copy things that are not modified 
in our */
+            /*            segment... (if we do not abort) *\/ */
+            /*         copy_bk_objs_in_page_from( */
+            /*             segnum, -1,     /\* any page *\/ */
+            /*             !needs_abort);  /\* if we abort, we still want to 
copy everything *\/ */
+            /*     } */
+            /* } */
+        }
+
+        /* done with modifications */
+        release_modification_lock_set(segments_to_lock);
+        release_privatization_lock(STM_SEGMENT->segment_num);
     }
-    else if (write_locks[base_lock_idx] == lock_num) {
-#ifdef STM_TESTS
-        bool found = false;
-        LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *,
-                       ({ if (item == obj) { found = true; break; } }));
-        assert(found);
+
+    return !needs_abort;
+}
+
+static struct stm_commit_log_entry_s *_create_commit_log_entry(void)
+{
+    /* puts all modified_old_objects in a new commit log entry */
+
+    // we don't need the privatization lock, as we are only
+    // reading from modified_old_objs and nobody but us can change it
+    struct list_s *list = STM_PSEGMENT->modified_old_objects;
+    OPT_ASSERT((list_count(list) % 3) == 0);
+    size_t count = list_count(list) / 3;
+    size_t byte_len = sizeof(struct stm_commit_log_entry_s) +
+        count * sizeof(struct stm_undo_s);
+    struct stm_commit_log_entry_s *result = malloc(byte_len);
+
+    result->next = NULL;
+    result->segment_num = STM_SEGMENT->segment_num;
+    result->rev_num = -1;       /* invalid */
+    result->written_count = count;
+    memcpy(result->written, list->items, count * sizeof(struct stm_undo_s));
+    return result;
+}
+
+
+static void reset_wb_executed_flags(void);
+static void readd_wb_executed_flags(void);
+static void check_all_write_barrier_flags(char *segbase, struct list_s *list);
+
+static void _validate_and_attach(struct stm_commit_log_entry_s *new)
+{
+    struct stm_commit_log_entry_s *old;
+
+    OPT_ASSERT(new != NULL);
+    /* we are attaching a real CL entry: */
+    bool is_commit = new != INEV_RUNNING;
+
+    while (1) {
+        if (!_stm_validate()) {
+            if (new != INEV_RUNNING)
+                free(new);
+            stm_abort_transaction();
+        }
+
+#if STM_TESTS
+        if (STM_PSEGMENT->transaction_state != TS_INEVITABLE
+            && STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) {
+            /* abort for tests... */
+            stm_abort_transaction();
+        }
 #endif
+
+        if (is_commit) {
+            /* we must not remove the WB_EXECUTED flags before validation as
+               it is part of a condition in import_objects() called by
+               copy_bk_objs_in_page_from to not overwrite our modifications.
+               So we do it here: */
+            reset_wb_executed_flags();
+            check_all_write_barrier_flags(STM_SEGMENT->segment_base,
+                                          STM_PSEGMENT->modified_old_objects);
+        }
+
+        /* try to attach to commit log: */
+        old = STM_PSEGMENT->last_commit_log_entry;
+        if (old->next == NULL) {
+            if (new != INEV_RUNNING) /* INEVITABLE */
+                new->rev_num = old->rev_num + 1;
+
+            if (__sync_bool_compare_and_swap(&old->next, NULL, new))
+                break;   /* success! */
+        } else if (old->next == INEV_RUNNING) {
+            /* we failed because there is an INEV transaction running */
+            usleep(10);
+        }
+
+        if (is_commit) {
+            /* XXX: unfortunately, if we failed to attach our CL entry,
+               we have to re-add the WB_EXECUTED flags before we try to
+               validate again because of said condition (s.a) */
+            readd_wb_executed_flags();
+        }
+
+        dprintf(("_validate_and_attach(%p) failed, enter safepoint\n", new));
+
+        /* check for requested safe point. otherwise an INEV transaction
+           may try to commit but cannot because of the busy-loop here. */
+        _stm_collectable_safe_point();
+    }
+}
+
+static void _validate_and_turn_inevitable(void)
+{
+    _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING);
+}
+
+static void _validate_and_add_to_commit_log(void)
+{
+    struct stm_commit_log_entry_s *old, *new;
+
+    new = _create_commit_log_entry();
+    if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
+        old = STM_PSEGMENT->last_commit_log_entry;
+        new->rev_num = old->rev_num + 1;
+        OPT_ASSERT(old->next == INEV_RUNNING);
+
+        /* WB_EXECUTED must be removed before we attach */
+        reset_wb_executed_flags();
+        check_all_write_barrier_flags(STM_SEGMENT->segment_base,
+                                      STM_PSEGMENT->modified_old_objects);
+
+        bool yes = __sync_bool_compare_and_swap(&old->next, INEV_RUNNING, new);
+        OPT_ASSERT(yes);
     }
     else {
-        /* call the contention manager, and then retry (unless we were
-           aborted). */
-        write_write_contention_management(base_lock_idx, obj);
-        goto retry;
+        _validate_and_attach(new);
     }
 
+    acquire_modification_lock(STM_SEGMENT->segment_num);
+    list_clear(STM_PSEGMENT->modified_old_objects);
+    STM_PSEGMENT->last_commit_log_entry = new;
+    release_modification_lock(STM_SEGMENT->segment_num);
+}
 
-    /* check that we really have a private page */
-    assert(is_private_page(STM_SEGMENT->segment_num,
-                           ((uintptr_t)obj) / 4096));
+/* ############# STM ############# */
+void stm_validate()
+{
+    if (!_stm_validate())
+        stm_abort_transaction();
+}
 
-    /* check that so far all copies of the object have the flag */
-    check_flag_write_barrier(obj);
-
-    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
-    if (!mark_card) {
-        /* A common case for write_locks[] that was either 0 or lock_num:
-           we need to add the object to the appropriate list if there is one.
-        */
-        if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) {
-            dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj));
-            LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
-        }
-
-        if (obj->stm_flags & GCFLAG_CARDS_SET) {
-            /* if we clear this flag, we have to tell sync_old_objs that
-               everything needs to be synced */
-            _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num),
-                                obj, CARD_MARKED_OLD, true); /* mark all */
-        }
-
-        /* remove GCFLAG_WRITE_BARRIER if we succeeded in getting the base
-           write-lock (not for card marking). */
-        obj->stm_flags &= ~(GCFLAG_WRITE_BARRIER | GCFLAG_CARDS_SET);
-    }
-    else {
-        /* don't remove WRITE_BARRIER, but add CARDS_SET */
-        obj->stm_flags |= GCFLAG_CARDS_SET;
-        assert(STM_PSEGMENT->old_objects_with_cards);
-        LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj);
-    }
-
-    /* for sanity, check again that all other segment copies of this
-       object still have the flag (so privatization worked) */
-    check_flag_write_barrier(obj);
-}
 
 void _stm_write_slowpath(object_t *obj)
 {
-    write_slowpath_common(obj, /*mark_card=*/false);
-}
+    assert(_seems_to_be_running_transaction());
+    assert(!_is_in_nursery(obj));
+    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
 
-static bool obj_should_use_cards(object_t *obj)
-{
-    struct object_s *realobj = (struct object_s *)
-        REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-    long supports = stmcb_obj_supports_cards(realobj);
-    if (!supports)
-        return 0;
+    int my_segnum = STM_SEGMENT->segment_num;
+    uintptr_t end_page, first_page = ((uintptr_t)obj) / 4096UL;
+    char *realobj;
+    size_t obj_size;
 
-    /* check also if it makes sense: */
-    size_t size = stmcb_size_rounded_up(realobj);
-    return (size >= _STM_MIN_CARD_OBJ_SIZE);
-}
-
-char _stm_write_slowpath_card_extra(object_t *obj)
-{
-    /* the PyPy JIT calls this function directly if it finds that an
-       array doesn't have the GCFLAG_CARDS_SET */
-    bool mark_card = obj_should_use_cards(obj);
-    write_slowpath_common(obj, mark_card);
-    return mark_card;
-}
-
-long _stm_write_slowpath_card_extra_base(void)
-{
-    /* for the PyPy JIT: _stm_write_slowpath_card_extra_base[obj >> 4]
-       is the byte that must be set to CARD_MARKED.  The logic below
-       does the same, but more explicitly. */
-    return (((long)write_locks) - WRITELOCK_START + 1)
-        + 0x4000000000000000L;   // <- workaround for a clang bug :-(
-}
-
-void _stm_write_slowpath_card(object_t *obj, uintptr_t index)
-{
-    /* If CARDS_SET is not set so far, issue a normal write barrier.
-       If the object is large enough, ask it to set up the object for
-       card marking instead.
-    */
-    if (!(obj->stm_flags & GCFLAG_CARDS_SET)) {
-        char mark_card = _stm_write_slowpath_card_extra(obj);
-        if (!mark_card)
-            return;
+    realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
+    obj_size = stmcb_size_rounded_up((struct object_s *)realobj);
+    /* get the last page containing data from the object */
+    if (LIKELY(is_small_uniform(obj))) {
+        end_page = first_page;
+    } else {
+        end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL;
     }
 
-    dprintf_test(("write_slowpath_card %p -> index:%lu\n",
-                  obj, index));
+    /* add to read set: */
+    stm_read(obj);
 
-    /* We reach this point if we have to mark the card.
-     */
-    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
-    assert(obj->stm_flags & GCFLAG_CARDS_SET);
-    assert(!(obj->stm_flags & GCFLAG_SMALL_UNIFORM)); /* not supported/tested 
*/
+    if (obj->stm_flags & GCFLAG_WB_EXECUTED) {
+        /* already executed WB once in this transaction. do GC
+           part again: */
+        dprintf(("write_slowpath-fast(%p)\n", obj));
+        obj->stm_flags &= ~GCFLAG_WRITE_BARRIER;
+        LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
+        return;
+    }
 
-#ifndef NDEBUG
-    struct object_s *realobj = (struct object_s *)
-        REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-    size_t size = stmcb_size_rounded_up(realobj);
-    /* we need at least one lock in addition to the STM-reserved object
-       write-lock */
-    assert(size >= 32);
-    /* the 'index' must be in range(length-of-obj), but we don't have
-       a direct way to know the length.  We know that it is smaller
-       than the size in bytes. */
-    assert(index < size);
-#endif
+    assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED));
+    dprintf(("write_slowpath(%p): sz=%lu\n", obj, obj_size));
 
-    /* Write into the card's lock.  This is used by the next minor
-       collection to know what parts of the big object may have changed.
-       We already own the object here or it is an overflow obj. */
-    uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj);
-    uintptr_t card_lock_idx = base_lock_idx + get_index_to_card_index(index);
-    write_locks[card_lock_idx] = CARD_MARKED;
+ retry:
+    /* privatize pages: */
+    /* XXX don't always acquire all locks... */
+    acquire_all_privatization_locks();
 
-    /* More debug checks */
-    dprintf(("mark %p index %lu, card:%lu with %d\n",
-             obj, index, get_index_to_card_index(index), CARD_MARKED));
-    assert(IMPLY(IS_OVERFLOW_OBJ(STM_PSEGMENT, obj),
-                 write_locks[base_lock_idx] == 0));
-    assert(IMPLY(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj),
-                 write_locks[base_lock_idx] == STM_PSEGMENT->write_lock_num));
+    uintptr_t page;
+    for (page = first_page; page <= end_page; page++) {
+        if (get_page_status_in(my_segnum, page) == PAGE_NO_ACCESS) {
+            /* XXX: slow? */
+            release_all_privatization_locks();
+
+            volatile char *dummy = REAL_ADDRESS(STM_SEGMENT->segment_base, 
page * 4096UL);
+            *dummy;            /* force segfault */
+
+            goto retry;
+        }
+    }
+    /* all pages are private to us and we hold the privatization_locks so
+       we are allowed to modify them */
+
+    /* phew, now add the obj to the write-set and register the
+       backup copy. */
+    /* XXX: we should not be here at all fiddling with page status
+       if 'obj' is merely an overflow object.  FIX ME, likely by copying
+       the overflow number logic from c7. */
+
+    DEBUG_EXPECT_SEGFAULT(false);
+
+    acquire_modification_lock(STM_SEGMENT->segment_num);
+    uintptr_t slice_sz;
+    uintptr_t in_page_offset = (uintptr_t)obj % 4096UL;
+    uintptr_t remaining_obj_sz = obj_size;
+    for (page = first_page; page <= end_page; page++) {
+        /* XXX Maybe also use mprotect() again to mark pages of the object as 
read-only, and
+           only stick it into modified_old_objects page-by-page?  Maybe it's
+           possible to do card-marking that way, too. */
+        OPT_ASSERT(remaining_obj_sz);
+
+        slice_sz = remaining_obj_sz;
+        if (in_page_offset + slice_sz > 4096UL) {
+            /* not over page boundaries */
+            slice_sz = 4096UL - in_page_offset;
+        }
+
+        size_t slice_off = obj_size - remaining_obj_sz;
+
+        /* make backup slice: */
+        char *bk_slice = malloc(slice_sz);
+        memcpy(bk_slice, realobj + slice_off, slice_sz);
+
+        STM_PSEGMENT->modified_old_objects = list_append3(
+            STM_PSEGMENT->modified_old_objects,
+            (uintptr_t)obj,     /* obj */
+            (uintptr_t)bk_slice,  /* bk_addr */
+            NEW_SLICE(slice_off, slice_sz));
+
+        remaining_obj_sz -= slice_sz;
+        in_page_offset = (in_page_offset + slice_sz) % 4096UL; /* mostly 0 */
+    }
+    OPT_ASSERT(remaining_obj_sz == 0);
+
+    /* remove the WRITE_BARRIER flag and add WB_EXECUTED */
+    obj->stm_flags &= ~GCFLAG_WRITE_BARRIER;
+    obj->stm_flags |= GCFLAG_WB_EXECUTED;
+
+    DEBUG_EXPECT_SEGFAULT(true);
+
+    release_modification_lock(STM_SEGMENT->segment_num);
+    /* done fiddling with protection and privatization */
+    release_all_privatization_locks();
+
+    /* also add it to the GC list for minor collections */
+    LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
 }
 
 static void reset_transaction_read_version(void)
@@ -315,7 +638,7 @@
              (long)(NB_READMARKER_PAGES * 4096UL)));
     if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL,
              PROT_READ | PROT_WRITE,
-             MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) {
+             MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0) != 
readmarkers) {
         /* fall-back */
 #if STM_TESTS
         stm_fatalerror("reset_transaction_read_version: %m");
@@ -325,24 +648,46 @@
     STM_SEGMENT->transaction_read_version = 1;
 }
 
-static uint64_t _global_start_time = 0;
+static void reset_wb_executed_flags(void)
+{
+    dprintf(("reset_wb_executed_flags()\n"));
+    struct list_s *list = STM_PSEGMENT->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        obj->stm_flags &= ~GCFLAG_WB_EXECUTED;
+    }
+}
+
+static void readd_wb_executed_flags(void)
+{
+    dprintf(("readd_wb_executed_flags()\n"));
+    struct list_s *list = STM_PSEGMENT->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        obj->stm_flags |= GCFLAG_WB_EXECUTED;
+    }
+}
+
+
+
 
 static void _stm_start_transaction(stm_thread_local_t *tl)
 {
     assert(!_stm_in_transaction(tl));
 
-    while (!acquire_thread_segment(tl))
-        ;
+    while (!acquire_thread_segment(tl)) {}
     /* GS invalid before this point! */
 
     assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION);
     assert(STM_PSEGMENT->transaction_state == TS_NONE);
-    timing_event(tl, STM_TRANSACTION_START);
-    STM_PSEGMENT->start_time = _global_start_time++;
-    STM_PSEGMENT->signalled_to_commit_soon = false;
+    STM_PSEGMENT->transaction_state = TS_REGULAR;
     STM_PSEGMENT->safe_point = SP_RUNNING;
-    STM_PSEGMENT->marker_inev.object = NULL;
-    STM_PSEGMENT->transaction_state = TS_REGULAR;
 #ifndef NDEBUG
     STM_PSEGMENT->running_pthread = pthread_self();
 #endif
@@ -350,16 +695,10 @@
     STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj;
 
     enter_safe_point_if_requested();
-    dprintf(("start_transaction\n"));
+    dprintf(("> start_transaction\n"));
 
-    s_mutex_unlock();
+    s_mutex_unlock();   // XXX it's probably possible to not acquire this here
 
-    /* Now running the SP_RUNNING start.  We can set our
-       'transaction_read_version' after releasing the mutex,
-       because it is only read by a concurrent thread in
-       stm_commit_transaction(), which waits until SP_RUNNING
-       threads are paused.
-    */
     uint8_t old_rv = STM_SEGMENT->transaction_read_version;
     STM_SEGMENT->transaction_read_version = old_rv + 1;
     if (UNLIKELY(old_rv == 0xff)) {
@@ -367,22 +706,17 @@
     }
 
     assert(list_is_empty(STM_PSEGMENT->modified_old_objects));
-    assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers));
+    assert(list_is_empty(STM_PSEGMENT->new_objects));
+    assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery));
     assert(list_is_empty(STM_PSEGMENT->young_weakrefs));
     assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery));
     assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows));
     assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0]));
     assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
-    assert(list_is_empty(STM_PSEGMENT->young_objects_with_light_finalizers));
-    assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL);
-    assert(STM_PSEGMENT->large_overflow_objects == NULL);
-    assert(STM_PSEGMENT->finalizers == NULL);
-#ifndef NDEBUG
-    /* this should not be used when objects_pointing_to_nursery == NULL */
-    STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L;
-#endif
 
     check_nursery_at_transaction_start();
+
+    stm_validate();
 }
 
 long stm_start_transaction(stm_thread_local_t *tl)
@@ -399,553 +733,142 @@
 
 void stm_start_inevitable_transaction(stm_thread_local_t *tl)
 {
-    /* used to be more efficient, starting directly an inevitable transaction,
-       but there is no real point any more, I believe */
-    rewind_jmp_buf rjbuf;
-    stm_rewind_jmp_enterframe(tl, &rjbuf);
-
-    stm_start_transaction(tl);
-    stm_become_inevitable(tl, "start_inevitable_transaction");
-
-    stm_rewind_jmp_leaveframe(tl, &rjbuf);
+    s_mutex_lock();
+    _stm_start_transaction(tl);
+    _stm_become_inevitable("stm_start_inevitable_transaction");
 }
 
+#ifdef STM_NO_AUTOMATIC_SETJMP
+void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn));
+int stm_is_inevitable(void)
+{
+    switch (STM_PSEGMENT->transaction_state) {
+    case TS_REGULAR: return 0;
+    case TS_INEVITABLE: return 1;
+    default: abort();
+    }
+}
+#endif
 
 /************************************************************/
 
+static void _finish_transaction()
+{
+    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
 
-static bool detect_write_read_conflicts(void)
-{
-    /* Detect conflicts of the form: we want to commit a write to an object,
-       but the same object was also read in a different thread.
-    */
-    long i;
-    for (i = 1; i <= NB_SEGMENTS; i++) {
-
-        if (i == STM_SEGMENT->segment_num)
-            continue;
-
-        if (get_priv_segment(i)->transaction_state == TS_NONE)
-            continue;    /* no need to check */
-
-        if (is_aborting_now(i))
-            continue;    /* no need to check: is pending immediate abort */
-
-        char *remote_base = get_segment_base(i);
-        uint8_t remote_version = get_segment(i)->transaction_read_version;
-
-        LIST_FOREACH_R(
-            STM_PSEGMENT->modified_old_objects,
-            object_t * /*item*/,
-            ({
-                if (was_read_remote(remote_base, item, remote_version)) {
-                    /* A write-read conflict! */
-                    dprintf(("write-read conflict on %p, our seg: %d, other: 
%ld\n",
-                             item, STM_SEGMENT->segment_num, i));
-                    if (write_read_contention_management(i, item)) {
-                        /* If we reach this point, we didn't abort, but we
-                           had to wait for the other thread to commit.  If we
-                           did, then we have to restart committing from our 
call
-                           to synchronize_all_threads(). */
-                        return true;
-                    }
-                    /* we aborted the other transaction without waiting, so
-                       we can just break out of this loop on
-                       modified_old_objects and continue with the next
-                       segment */
-                    break;
-                }
-            }));
-    }
-
-    return false;
-}
-
-static void copy_object_to_shared(object_t *obj, int source_segment_num)
-{
-    /* Only used by major GC.  XXX There is a lot of code duplication
-       with synchronize_object_now() but I don't completely see how to
-       improve...
-    */
-    assert(!_is_young(obj));
-
-    char *segment_base = get_segment_base(source_segment_num);
-    uintptr_t start = (uintptr_t)obj;
-    uintptr_t first_page = start / 4096UL;
-    struct object_s *realobj = (struct object_s *)
-        REAL_ADDRESS(segment_base, obj);
-
-    if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) {
-        abort();//XXX WRITE THE FAST CASE
-    }
-    else {
-        ssize_t obj_size = stmcb_size_rounded_up(realobj);
-        assert(obj_size >= 16);
-        uintptr_t end = start + obj_size;
-        uintptr_t last_page = (end - 1) / 4096UL;
-
-        for (; first_page <= last_page; first_page++) {
-
-            /* Copy the object into the shared page, if needed */
-            if (is_private_page(source_segment_num, first_page)) {
-
-                uintptr_t copy_size;
-                if (first_page == last_page) {
-                    /* this is the final fragment */
-                    copy_size = end - start;
-                }
-                else {
-                    /* this is a non-final fragment, going up to the
-                       page's end */
-                    copy_size = 4096 - (start & 4095);
-                }
-                /* double-check that the result fits in one page */
-                assert(copy_size > 0);
-                assert(copy_size + (start & 4095) <= 4096);
-
-                char *src = REAL_ADDRESS(segment_base, start);
-                char *dst = REAL_ADDRESS(stm_object_pages, start);
-                if (copy_size == 4096)
-                    pagecopy(dst, src);
-                else
-                    memcpy(dst, src, copy_size);
-            }
-
-            start = (start + 4096) & ~4095;
-        }
-    }
-}
-
-static void _page_wise_synchronize_object_now(object_t *obj)
-{
-    uintptr_t start = (uintptr_t)obj;
-    uintptr_t first_page = start / 4096UL;
-
-    char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-    ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj);
-    assert(obj_size >= 16);
-    uintptr_t end = start + obj_size;
-    uintptr_t last_page = (end - 1) / 4096UL;
-    long i, myself = STM_SEGMENT->segment_num;
-
-    for (; first_page <= last_page; first_page++) {
-
-        uintptr_t copy_size;
-        if (first_page == last_page) {
-            /* this is the final fragment */
-            copy_size = end - start;
-        }
-        else {
-            /* this is a non-final fragment, going up to the
-               page's end */
-            copy_size = 4096 - (start & 4095);
-        }
-        /* double-check that the result fits in one page */
-        assert(copy_size > 0);
-        assert(copy_size + (start & 4095) <= 4096);
-
-        /* First copy the object into the shared page, if needed */
-        char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start);
-        char *dst = REAL_ADDRESS(stm_object_pages, start);
-        if (is_private_page(myself, first_page)) {
-            if (copy_size == 4096)
-                pagecopy(dst, src);
-            else
-                memcpy(dst, src, copy_size);
-        }
-        else {
-            assert(memcmp(dst, src, copy_size) == 0);  /* same page */
-        }
-
-        for (i = 1; i <= NB_SEGMENTS; i++) {
-            if (i == myself)
-                continue;
-
-            /* src = REAL_ADDRESS(stm_object_pages, start); */
-            dst = REAL_ADDRESS(get_segment_base(i), start);
-            if (is_private_page(i, first_page)) {
-                /* The page is a private page.  We need to diffuse this
-                   fragment of object from the shared page to this private
-                   page. */
-                if (copy_size == 4096)
-                    pagecopy(dst, src);
-                else
-                    memcpy(dst, src, copy_size);
-            }
-            else {
-                assert(!memcmp(dst, src, copy_size));  /* same page */
-            }
-        }
-
-        start = (start + 4096) & ~4095;
-    }
-}
-
-static inline bool _has_private_page_in_range(
-    long seg_num, uintptr_t start, uintptr_t size)
-{
-    uintptr_t first_page = start / 4096UL;
-    uintptr_t last_page = (start + size) / 4096UL;
-    for (; first_page <= last_page; first_page++)
-        if (is_private_page(seg_num, first_page))
-            return true;
-    return false;
-}
-
-static void _card_wise_synchronize_object_now(object_t *obj)
-{
-    assert(obj_should_use_cards(obj));
-    assert(!(obj->stm_flags & GCFLAG_CARDS_SET));
-    assert(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj));
-
-    uintptr_t offset_itemsize[2];
-    struct object_s *realobj = (struct object_s 
*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-    size_t obj_size = stmcb_size_rounded_up(realobj);
-    assert(obj_size >= 32);
-    stmcb_get_card_base_itemsize(realobj, offset_itemsize);
-    size_t real_idx_count = (obj_size - offset_itemsize[0]) / 
offset_itemsize[1];
-
-    uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj);
-    uintptr_t card_index = 1;
-    uintptr_t last_card_index = get_index_to_card_index(real_idx_count - 1); 
/* max valid index */
-    long i, myself = STM_SEGMENT->segment_num;
-
-    /* simple heuristic to check if probably the whole object is
-       marked anyway so we should do page-wise synchronize */
-    if (write_locks[first_card_index + 1] == CARD_MARKED_OLD
-        && write_locks[first_card_index + last_card_index] == CARD_MARKED_OLD
-        && write_locks[first_card_index + (last_card_index >> 1) + 1] == 
CARD_MARKED_OLD) {
-
-        dprintf(("card_wise_sync assumes %p,size:%lu is fully marked\n", obj, 
obj_size));
-        _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num),
-                            obj, CARD_CLEAR, false);
-        _page_wise_synchronize_object_now(obj);
-        return;
-    }
-
-    dprintf(("card_wise_sync syncs %p,size:%lu card-wise\n", obj, obj_size));
-
-    /* Combine multiple marked cards and do a memcpy for them. We don't
-       try yet to use page_copy() or otherwise take into account privatization
-       of pages (except _has_private_page_in_range) */
-    bool all_cards_were_cleared = true;
-
-    uintptr_t start_card_index = -1;
-    while (card_index <= last_card_index) {
-        uintptr_t card_lock_idx = first_card_index + card_index;
-        uint8_t card_value = write_locks[card_lock_idx];
-
-        if (card_value == CARD_MARKED_OLD) {
-            write_locks[card_lock_idx] = CARD_CLEAR;
-
-            if (start_card_index == -1) {   /* first marked card */
-                start_card_index = card_index;
-                /* start = (uintptr_t)obj + stmcb_index_to_byte_offset( */
-                /*     realobj, get_card_index_to_index(card_index)); */
-                if (all_cards_were_cleared) {
-                    all_cards_were_cleared = false;
-                }
-            }
-        }
-        else {
-            OPT_ASSERT(card_value == CARD_CLEAR);
-        }
-
-        if (start_card_index != -1                    /* something to copy */
-            && (card_value != CARD_MARKED_OLD         /* found non-marked card 
*/
-                || card_index == last_card_index)) {  /* this is the last card 
*/
-            /* do the copying: */
-            uintptr_t start, copy_size;
-            uintptr_t next_card_offset;
-            uintptr_t start_card_offset;
-            uintptr_t next_card_index = card_index;
-
-            if (card_value == CARD_MARKED_OLD) {
-                /* card_index is the last card of the object, but we need
-                   to go one further to get the right offset */
-                next_card_index++;
-            }
-
-            start_card_offset = offset_itemsize[0] +
-                get_card_index_to_index(start_card_index) * offset_itemsize[1];
-
-            next_card_offset = offset_itemsize[0] +
-                get_card_index_to_index(next_card_index) * offset_itemsize[1];
-
-            if (next_card_offset > obj_size)
-                next_card_offset = obj_size;
-
-            start = (uintptr_t)obj + start_card_offset;
-            copy_size = next_card_offset - start_card_offset;
-            OPT_ASSERT(copy_size > 0);
-
-            /* dprintf(("copy %lu bytes\n", copy_size)); */
-
-            /* since we have marked cards, at least one page here must be 
private */
-            assert(_has_private_page_in_range(myself, start, copy_size));
-
-            /* copy to shared segment: */
-            char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start);
-            char *dst = REAL_ADDRESS(stm_object_pages, start);
-            memcpy(dst, src, copy_size);
-
-            /* copy to other segments */
-            for (i = 1; i <= NB_SEGMENTS; i++) {
-                if (i == myself)
-                    continue;
-                if (!_has_private_page_in_range(i, start, copy_size))
-                    continue;
-                /* src = REAL_ADDRESS(stm_object_pages, start); */
-                dst = REAL_ADDRESS(get_segment_base(i), start);
-                memcpy(dst, src, copy_size);
-            }
-
-            start_card_index = -1;
-        }
-
-        card_index++;
-    }
-
-    if (all_cards_were_cleared) {
-        /* well, seems like we never called stm_write_card() on it, so actually
-           we need to fall back to synchronize the whole object */
-        _page_wise_synchronize_object_now(obj);
-        return;
-    }
-
-#ifndef NDEBUG
-    char *src = REAL_ADDRESS(stm_object_pages, (uintptr_t)obj);
-    char *dst;
-    for (i = 1; i <= NB_SEGMENTS; i++) {
-        dst = REAL_ADDRESS(get_segment_base(i), (uintptr_t)obj);
-        assert(memcmp(dst, src, obj_size) == 0);
-    }
-#endif
-}
-
-
-static void synchronize_object_now(object_t *obj, bool ignore_cards)
-{
-    /* Copy around the version of 'obj' that lives in our own segment.
-       It is first copied into the shared pages, and then into other
-       segments' own private pages.
-
-       Must be called with the privatization lock acquired.
-    */
-    assert(!_is_young(obj));
-    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
-    assert(STM_PSEGMENT->privatization_lock == 1);
-
-    if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) {
-        assert(!(obj->stm_flags & GCFLAG_CARDS_SET));
-        abort();//XXX WRITE THE FAST CASE
-    } else if (ignore_cards || !obj_should_use_cards(obj)) {
-        _page_wise_synchronize_object_now(obj);
-    } else {
-        _card_wise_synchronize_object_now(obj);
-    }
-
-    _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), obj);
-}
-
-static void push_overflow_objects_from_privatized_pages(void)
-{
-    if (STM_PSEGMENT->large_overflow_objects == NULL)
-        return;
-
-    acquire_privatization_lock();
-    LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *,
-                   synchronize_object_now(item, true /*ignore_cards*/));
-    release_privatization_lock();
-}
-
-static void push_modified_to_other_segments(void)
-{
-    acquire_privatization_lock();
-    LIST_FOREACH_R(
-        STM_PSEGMENT->modified_old_objects,
-        object_t * /*item*/,
-        ({
-            /* clear the write-lock (note that this runs with all other
-               threads paused, so no need to be careful about ordering) */
-            uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START;
-            assert(lock_idx < sizeof(write_locks));
-            assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num);
-            write_locks[lock_idx] = 0;
-
-            /* the WRITE_BARRIER flag should have been set again by
-               minor_collection() */
-            assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0);
-
-            /* copy the object to the shared page, and to the other
-               private pages as needed */
-            synchronize_object_now(item, false); /* don't ignore_cards */
-        }));
-    release_privatization_lock();
-
-    list_clear(STM_PSEGMENT->modified_old_objects);
-    list_clear(STM_PSEGMENT->modified_old_objects_markers);
-}
-
-static void _finish_transaction(enum stm_event_e event)
-{
     STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
     STM_PSEGMENT->transaction_state = TS_NONE;
-
-    /* marker_inev is not needed anymore */
-    STM_PSEGMENT->marker_inev.object = NULL;
-
-    /* reset these lists to NULL for the next transaction */
-    
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
-    LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery);
-    list_clear(STM_PSEGMENT->old_objects_with_cards);
-    LIST_FREE(STM_PSEGMENT->large_overflow_objects);
-
-    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
-    timing_event(tl, event);
+    list_clear(STM_PSEGMENT->objects_pointing_to_nursery);
+    list_clear(STM_PSEGMENT->new_objects);
 
     release_thread_segment(tl);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 }
 
+static void check_all_write_barrier_flags(char *segbase, struct list_s *list)
+{
+#ifndef NDEBUG
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        struct object_s *dst = (struct object_s*)REAL_ADDRESS(segbase, obj);
+        assert(dst->stm_flags & GCFLAG_WRITE_BARRIER);
+        assert(!(dst->stm_flags & GCFLAG_WB_EXECUTED));
+    }
+#endif
+}
+
+static void push_new_objects_to_other_segments(void)
+{
+    acquire_privatization_lock(STM_SEGMENT->segment_num);
+    LIST_FOREACH_R(STM_PSEGMENT->new_objects, object_t *,
+        ({
+            assert(item->stm_flags & GCFLAG_WB_EXECUTED);
+            item->stm_flags &= ~GCFLAG_WB_EXECUTED;
+            synchronize_object_enqueue(item);
+        }));
+    synchronize_objects_flush();
+    release_privatization_lock(STM_SEGMENT->segment_num);
+
+    /* we can as well clear the list here, since the
+       objects are only useful if the commit succeeds. And
+       we never do a major collection in-between.
+       They should also survive any page privatization happening
+       before the actual commit, since we always do a pagecopy
+       in handle_segfault_in_page() that also copies
+       unknown-to-the-segment/uncommitted things.
+    */
+    list_clear(STM_PSEGMENT->new_objects);
+}
+
+
 void stm_commit_transaction(void)
 {
- restart_all:
-    exec_local_finalizers();
-
     assert(!_has_mutex());
     assert(STM_PSEGMENT->safe_point == SP_RUNNING);
     assert(STM_PSEGMENT->running_pthread == pthread_self());
 
-    minor_collection(/*commit=*/ true);
+    dprintf(("> stm_commit_transaction()\n"));
+    minor_collection(1);
 
-    /* synchronize overflow objects living in privatized pages */
-    push_overflow_objects_from_privatized_pages();
-
-    s_mutex_lock();
-
- restart:
-    /* force all other threads to be paused.  They will unpause
-       automatically when we are done here, i.e. at mutex_unlock().
-       Important: we should not call cond_wait() in the meantime. */
-    synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
-
-    if (any_local_finalizers()) {
-        s_mutex_unlock();
-        goto restart_all;
-    }
-
-    /* detect conflicts */
-    if (detect_write_read_conflicts())
-        goto restart;
-
-    /* cannot abort any more from here */
-    dprintf(("commit_transaction\n"));
-
-    assert(STM_SEGMENT->nursery_end == NURSERY_END);
-    stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
-
-    /* if a major collection is required, do it here */
-    if (is_major_collection_requested()) {
-        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
-        major_collection_now_at_safe_point();
-        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
-    }
-
-    /* synchronize modified old objects to other threads */
-    push_modified_to_other_segments();
-    
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
-
-    commit_finalizers();
-
-    /* update 'overflow_number' if needed */
-    if (STM_PSEGMENT->overflow_number_has_been_used) {
-        highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0;
-        assert(highest_overflow_number !=        /* XXX else, overflow! */
-               (uint32_t)-GCFLAG_OVERFLOW_NUMBER_bit0);
-        STM_PSEGMENT->overflow_number = highest_overflow_number;
-        STM_PSEGMENT->overflow_number_has_been_used = false;
-    }
+    push_new_objects_to_other_segments();
+    /* push before validate. otherwise they are reachable too early */
+    _validate_and_add_to_commit_log();
 
     invoke_and_clear_user_callbacks(0);   /* for commit */
 
-    /* send what is hopefully the correct signals */
-    if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
-        /* wake up one thread in wait_for_end_of_inevitable_transaction() */
-        cond_signal(C_INEVITABLE);
-        if (globally_unique_transaction)
-            committed_globally_unique_transaction();
+    /* XXX do we still need a s_mutex_lock() section here? */
+    s_mutex_lock();
+    enter_safe_point_if_requested();
+    assert(STM_SEGMENT->nursery_end == NURSERY_END);
+
+    stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
+
+    if (globally_unique_transaction && STM_PSEGMENT->transaction_state == 
TS_INEVITABLE) {
+        committed_globally_unique_transaction();
     }
 
     /* done */
-    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
-    _finish_transaction(STM_TRANSACTION_COMMIT);
+    _finish_transaction();
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 
     s_mutex_unlock();
-
-    invoke_general_finalizers(tl);
 }
 
-void stm_abort_transaction(void)
+static void reset_modified_from_backup_copies(int segment_num)
 {
-    s_mutex_lock();
-    abort_with_mutex();
-}
+#pragma push_macro("STM_PSEGMENT")
+#pragma push_macro("STM_SEGMENT")
+#undef STM_PSEGMENT
+#undef STM_SEGMENT
+    assert(get_priv_segment(segment_num)->modification_lock);
 
-static void
-reset_modified_from_other_segments(int segment_num)
-{
-    /* pull the right versions from segment 0 in order
-       to reset our pages as part of an abort.
+    struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num);
+    struct list_s *list = pseg->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
 
-       Note that this function is also sometimes called from
-       contention.c to clean up the state of a different thread,
-       when we would really like it to be aborted now and it is
-       suspended at a safe-point.
-    */
-    struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num);
-    char *local_base = get_segment_base(segment_num);
-    char *remote_base = get_segment_base(0);
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        char *dst = REAL_ADDRESS(pseg->pub.segment_base, obj);
 
-    LIST_FOREACH_R(
-        pseg->modified_old_objects,
-        object_t * /*item*/,
-        ({
-            /* memcpy in the opposite direction than
-               push_modified_to_other_segments() */
-            char *src = REAL_ADDRESS(remote_base, item);
-            char *dst = REAL_ADDRESS(local_base, item);
-            ssize_t size = stmcb_size_rounded_up((struct object_s *)src);
-            memcpy(dst, src, size);
+        memcpy(dst + SLICE_OFFSET(undo->slice),
+               undo->backup,
+               SLICE_SIZE(undo->slice));
 
-            if (obj_should_use_cards(item))
-                _reset_object_cards(pseg, item, CARD_CLEAR, false);
+        dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu 
bk=%p\n",
+                 segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup));
 
-            /* objects in 'modified_old_objects' usually have the
-               WRITE_BARRIER flag, unless they have been modified
-               recently.  Ignore the old flag; after copying from the
-               other segment, we should have the flag. */
-            assert(((struct object_s *)dst)->stm_flags & GCFLAG_WRITE_BARRIER);
+        free(undo->backup);
+    }
 
-            /* write all changes to the object before we release the
-               write lock below.  This is needed because we need to
-               ensure that if the write lock is not set, another thread
-               can get it and then change 'src' in parallel.  The
-               write_fence() ensures in particular that 'src' has been
-               fully read before we release the lock: reading it
-               is necessary to write 'dst'. */
-            write_fence();
+    /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */
+    check_all_write_barrier_flags(pseg->pub.segment_base, list);
 
-            /* clear the write-lock */
-            uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START;
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to