Author: Remi Meier <[email protected]>
Branch: c8-private-pages
Changeset: r1546:82528c2b6af6
Date: 2015-01-19 15:09 +0100
http://bitbucket.org/pypy/stmgc/changeset/82528c2b6af6/

Log:    Merge with default

diff too long, truncating to 2000 out of 3250 lines

diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c
--- a/c7/demo/demo2.c
+++ b/c7/demo/demo2.c
@@ -3,6 +3,7 @@
 #include <assert.h>
 #include <pthread.h>
 #include <semaphore.h>
+#include <time.h>
 
 #ifdef USE_HTM
 #  include "../../htm-c7/stmgc.h"
@@ -59,12 +60,25 @@
 }
 void stmcb_commit_soon() {}
 
-static void expand_marker(char *base, uintptr_t odd_number,
-                          object_t *following_object,
-                          char *outputbuf, size_t outputbufsize)
+static void timing_event(stm_thread_local_t *tl, /* the local thread */
+                         enum stm_event_e event,
+                         stm_loc_marker_t *markers)
 {
-    assert(following_object == NULL);
-    snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number);
+    static char *event_names[] = { STM_EVENT_NAMES };
+
+    char buf[1024], *p;
+    struct timespec tp;
+    clock_gettime(CLOCK_MONOTONIC, &tp);
+
+    p = buf;
+    p += sprintf(p, "{%.9f} %p %s", tp.tv_sec + 0.000000001 * tp.tv_nsec,
+                 tl, event_names[event]);
+    if (markers != NULL) {
+        p += sprintf(p, ", markers: %lu, %lu",
+                     markers[0].odd_number, markers[1].odd_number);
+    }
+    sprintf(p, "\n");
+    fputs(buf, stderr);
 }
 
 
@@ -108,18 +122,6 @@
 
     stm_start_transaction(&stm_thread_local);
 
-    if (stm_thread_local.longest_marker_state != 0) {
-        fprintf(stderr, "[%p] marker %d for %.6f seconds:\n",
-                &stm_thread_local,
-                stm_thread_local.longest_marker_state,
-                stm_thread_local.longest_marker_time);
-        fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n",
-                stm_thread_local.longest_marker_self,
-                stm_thread_local.longest_marker_other);
-        stm_thread_local.longest_marker_state = 0;
-        stm_thread_local.longest_marker_time = 0.0;
-    }
-
     nodeptr_t prev = initial;
     stm_read((objptr_t)prev);
 
@@ -223,7 +225,6 @@
 
 void unregister_thread_local(void)
 {
-    stm_flush_timing(&stm_thread_local, 1);
     stm_unregister_thread_local(&stm_thread_local);
 }
 
@@ -295,9 +296,15 @@
 
     stm_setup();
     stm_register_thread_local(&stm_thread_local);
+
+    /* check that we can use stm_start_inevitable_transaction() without
+       any rjbuf on the stack */
+    stm_start_inevitable_transaction(&stm_thread_local);
+    stm_commit_transaction();
+
+
     stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
-    stmcb_expand_marker = expand_marker;
-
+    stmcb_timing_event = timing_event;
 
     setup_list();
 
diff --git a/c7/stm/contention.c b/c7/stm/contention.c
--- a/c7/stm/contention.c
+++ b/c7/stm/contention.c
@@ -3,34 +3,50 @@
 #endif
 
 
-enum contention_kind_e {
+/* Here are the possible kinds of contention:
 
-    /* A write-write contention occurs when we running our transaction
-       and detect that we are about to write to an object that another
-       thread is also writing to.  This kind of contention must be
-       resolved before continuing.  This *must* abort one of the two
-       threads: the caller's thread is not at a safe-point, so cannot
-       wait! */
-    WRITE_WRITE_CONTENTION,
+   STM_CONTENTION_WRITE_WRITE
 
-    /* A write-read contention occurs when we are trying to commit: it
+       A write-write contention occurs when we are running our
+       transaction and detect that we are about to write to an object
+       that another thread is also writing to.  This kind of
+       contention must be resolved before continuing.  This *must*
+       abort one of the two threads: the caller's thread is not at a
+       safe-point, so cannot wait!
+
+       It is reported as a timing event with the following two markers:
+       the current thread (i.e. where the second-in-time write occurs);
+       and the other thread (from its 'modified_old_objects_markers',
+       where the first-in-time write occurred).
+
+   STM_CONTENTION_WRITE_READ
+
+       A write-read contention occurs when we are trying to commit: it
        means that an object we wrote to was also read by another
        transaction.  Even though it would seem obvious that we should
        just abort the other thread and proceed in our commit, a more
        subtle answer would be in some cases to wait for the other thread
        to commit first.  It would commit having read the old value, and
-       then we can commit our change to it. */
-    WRITE_READ_CONTENTION,
+       then we can commit our change to it.
 
-    /* An inevitable contention occurs when we're trying to become
+       It is reported as a timing event with only one marker: the
+       older location of the write that was done by the current thread.
+
+    STM_CONTENTION_INEVITABLE
+
+       An inevitable contention occurs when we're trying to become
        inevitable but another thread already is.  We can never abort the
        other thread in this case, but we still have the choice to abort
-       ourselves or pause until the other thread commits. */
-    INEVITABLE_CONTENTION,
-};
+       ourselves or pause until the other thread commits.
+
+       It is reported with two markers, one for the current thread and
+       one for the other thread.  Each marker gives the location that
+       attempts to make the transaction inevitable.
+*/
+
 
 struct contmgr_s {
-    enum contention_kind_e kind;
+    enum stm_event_e kind;
     struct stm_priv_segment_info_s *other_pseg;
     bool abort_other;
     bool try_sleep;  // XXX add a way to timeout, but should handle repeated
@@ -99,7 +115,7 @@
 
 
 static bool contention_management(uint8_t other_segment_num,
-                                  enum contention_kind_e kind,
+                                  enum stm_event_e kind,
                                   object_t *obj)
 {
     assert(_has_mutex());
@@ -109,6 +125,9 @@
     if (must_abort())
         abort_with_mutex();
 
+    /* Report the contention */
+    timing_contention(kind, other_segment_num, obj);
+
     /* Who should abort here: this thread, or the other thread? */
     struct contmgr_s contmgr;
     contmgr.kind = kind;
@@ -138,20 +157,9 @@
         contmgr.abort_other = false;
     }
 
-
-    int wait_category =
-        kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ :
-        kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE :
-        STM_TIME_WAIT_OTHER;
-
-    int abort_category =
-        kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE :
-        kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ :
-        kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE :
-        STM_TIME_RUN_ABORTED_OTHER;
-
-
-    if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION &&
+    /* Do one of three things here...
+     */
+    if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE &&
         contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) {
         others_may_have_run = true;
         /* Sleep.
@@ -164,30 +172,24 @@
              itself already paused here.
         */
         contmgr.other_pseg->signal_when_done = true;
-        marker_contention(kind, false, other_segment_num, obj);
-
-        change_timing_state(wait_category);
 
         /* tell the other to commit ASAP */
         signal_other_to_commit_soon(contmgr.other_pseg);
 
         dprintf(("pausing...\n"));
+
+        timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION);
+
         cond_signal(C_AT_SAFE_POINT);
         STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE;
         cond_wait(C_TRANSACTION_DONE);
         STM_PSEGMENT->safe_point = SP_RUNNING;
         dprintf(("pausing done\n"));
 
+        timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
+
         if (must_abort())
             abort_with_mutex();
-
-        struct stm_priv_segment_info_s *pseg =
-            get_priv_segment(STM_SEGMENT->segment_num);
-        double elapsed =
-            change_timing_state_tl(pseg->pub.running_thread,
-                                   STM_TIME_RUN_CURRENT);
-        marker_copy(pseg->pub.running_thread, pseg,
-                    wait_category, elapsed);
     }
 
     else if (!contmgr.abort_other) {
@@ -195,16 +197,16 @@
         signal_other_to_commit_soon(contmgr.other_pseg);
 
         dprintf(("abort in contention: kind %d\n", kind));
-        STM_SEGMENT->nursery_end = abort_category;
-        marker_contention(kind, false, other_segment_num, obj);
         abort_with_mutex();
     }
 
     else {
         /* We have to signal the other thread to abort, and wait until
            it does. */
-        contmgr.other_pseg->pub.nursery_end = abort_category;
-        marker_contention(kind, true, other_segment_num, obj);
+        contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT;
+
+        timing_event(STM_SEGMENT->running_thread,
+                     STM_ABORTING_OTHER_CONTENTION);
 
         int sp = contmgr.other_pseg->safe_point;
         switch (sp) {
@@ -296,7 +298,8 @@
         assert(get_priv_segment(other_segment_num)->write_lock_num ==
                prev_owner);
 
-        contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj);
+        contention_management(other_segment_num,
+                              STM_CONTENTION_WRITE_WRITE, obj);
 
         /* now we return into _stm_write_slowpath() and will try again
            to acquire the write lock on our object. */
@@ -308,10 +311,12 @@
 static bool write_read_contention_management(uint8_t other_segment_num,
                                              object_t *obj)
 {
-    return contention_management(other_segment_num, WRITE_READ_CONTENTION, 
obj);
+    return contention_management(other_segment_num,
+                                 STM_CONTENTION_WRITE_READ, obj);
 }
 
 static void inevitable_contention_management(uint8_t other_segment_num)
 {
-    contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL);
+    contention_management(other_segment_num,
+                          STM_CONTENTION_INEVITABLE, NULL);
 }
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -124,17 +124,13 @@
 
         dprintf_test(("write_slowpath %p -> mod_old\n", obj));
 
-        /* First change to this old object from this transaction.
+        /* Add the current marker, recording where we wrote to this object */
+        timing_record_write();
+
+        /* Change to this old object from this transaction.
            Add it to the list 'modified_old_objects'. */
         LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj);
 
-        /* Add the current marker, recording where we wrote to this object */
-        uintptr_t marker[2];
-        marker_fetch(STM_SEGMENT->running_thread, marker);
-        STM_PSEGMENT->modified_old_objects_markers =
-            list_append2(STM_PSEGMENT->modified_old_objects_markers,
-                         marker[0], marker[1]);
-
         release_marker_lock(STM_SEGMENT->segment_base);
 
         /* We need to privatize the pages containing the object, if they
@@ -328,29 +324,24 @@
     STM_SEGMENT->transaction_read_version = 1;
 }
 
-static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable)
+static uint64_t _global_start_time = 0;
+
+static void _stm_start_transaction(stm_thread_local_t *tl)
 {
     assert(!_stm_in_transaction(tl));
 
-  retry:
-    if (inevitable) {
-        wait_for_end_of_inevitable_transaction(tl);
-    }
-
-    if (!acquire_thread_segment(tl))
-        goto retry;
+    while (!acquire_thread_segment(tl))
+        ;
     /* GS invalid before this point! */
 
     assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION);
     assert(STM_PSEGMENT->transaction_state == TS_NONE);
-    change_timing_state(STM_TIME_RUN_CURRENT);
-    STM_PSEGMENT->start_time = tl->_timing_cur_start;
+    timing_event(tl, STM_TRANSACTION_START);
+    STM_PSEGMENT->start_time = _global_start_time++;
     STM_PSEGMENT->signalled_to_commit_soon = false;
     STM_PSEGMENT->safe_point = SP_RUNNING;
-    STM_PSEGMENT->marker_inev[1] = 0;
-    if (inevitable)
-        marker_fetch_inev();
-    STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : 
TS_REGULAR);
+    STM_PSEGMENT->marker_inev.object = NULL;
+    STM_PSEGMENT->transaction_state = TS_REGULAR;
 #ifndef NDEBUG
     STM_PSEGMENT->running_pthread = pthread_self();
 #endif
@@ -383,6 +374,7 @@
     assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
     assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL);
     assert(STM_PSEGMENT->large_overflow_objects == NULL);
+    assert(STM_PSEGMENT->finalizers == NULL);
 #ifndef NDEBUG
     /* this should not be used when objects_pointing_to_nursery == NULL */
     STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L;
@@ -399,14 +391,21 @@
 #else
     long repeat_count = stm_rewind_jmp_setjmp(tl);
 #endif
-    _stm_start_transaction(tl, false);
+    _stm_start_transaction(tl);
     return repeat_count;
 }
 
 void stm_start_inevitable_transaction(stm_thread_local_t *tl)
 {
-    s_mutex_lock();
-    _stm_start_transaction(tl, true);
+    /* used to be more efficient, starting directly an inevitable transaction,
+       but there is no real point any more, I believe */
+    rewind_jmp_buf rjbuf;
+    stm_rewind_jmp_enterframe(tl, &rjbuf);
+
+    stm_start_transaction(tl);
+    stm_become_inevitable(tl, "start_inevitable_transaction");
+
+    stm_rewind_jmp_leaveframe(tl, &rjbuf);
 }
 
 
@@ -449,7 +448,10 @@
                         return true;
                     }
                     /* we aborted the other transaction without waiting, so
-                       we can just continue */
+                       we can just break out of this loop on
+                       modified_old_objects and continue with the next
+                       segment */
+                    break;
                 }
             }));
     }
@@ -783,13 +785,13 @@
     list_clear(STM_PSEGMENT->modified_old_objects_markers);
 }
 
-static void _finish_transaction(int attribute_to)
+static void _finish_transaction(enum stm_event_e event)
 {
     STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
     STM_PSEGMENT->transaction_state = TS_NONE;
 
     /* marker_inev is not needed anymore */
-    STM_PSEGMENT->marker_inev[1] = 0;
+    STM_PSEGMENT->marker_inev.object = NULL;
 
     /* reset these lists to NULL for the next transaction */
     
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
@@ -797,24 +799,24 @@
     list_clear(STM_PSEGMENT->old_objects_with_cards);
     LIST_FREE(STM_PSEGMENT->large_overflow_objects);
 
-    timing_end_transaction(attribute_to);
+    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+    timing_event(tl, event);
 
-    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
     release_thread_segment(tl);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 }
 
 void stm_commit_transaction(void)
 {
+ restart_all:
+    exec_local_finalizers();
+
     assert(!_has_mutex());
     assert(STM_PSEGMENT->safe_point == SP_RUNNING);
     assert(STM_PSEGMENT->running_pthread == pthread_self());
 
     minor_collection(/*commit=*/ true);
 
-    /* the call to minor_collection() above leaves us with
-       STM_TIME_BOOKKEEPING */
-
     /* synchronize overflow objects living in privatized pages */
     push_overflow_objects_from_privatized_pages();
 
@@ -826,6 +828,11 @@
        Important: we should not call cond_wait() in the meantime. */
     synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
 
+    if (any_local_finalizers()) {
+        s_mutex_unlock();
+        goto restart_all;
+    }
+
     /* detect conflicts */
     if (detect_write_read_conflicts())
         goto restart;
@@ -838,15 +845,17 @@
 
     /* if a major collection is required, do it here */
     if (is_major_collection_requested()) {
-        int oldstate = change_timing_state(STM_TIME_MAJOR_GC);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
         major_collection_now_at_safe_point();
-        change_timing_state(oldstate);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
     }
 
     /* synchronize modified old objects to other threads */
     push_modified_to_other_segments();
     
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
 
+    commit_finalizers();
+
     /* update 'overflow_number' if needed */
     if (STM_PSEGMENT->overflow_number_has_been_used) {
         highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0;
@@ -867,10 +876,13 @@
     }
 
     /* done */
-    _finish_transaction(STM_TIME_RUN_COMMITTED);
+    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+    _finish_transaction(STM_TRANSACTION_COMMIT);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 
     s_mutex_unlock();
+
+    invoke_general_finalizers(tl);
 }
 
 void stm_abort_transaction(void)
@@ -960,10 +972,6 @@
                        (int)pseg->transaction_state);
     }
 
-    /* if we don't have marker information already, look up and preserve
-       the marker information from the shadowstack as a string */
-    marker_default_for_abort(pseg);
-
     /* throw away the content of the nursery */
     long bytes_in_nursery = throw_away_nursery(pseg);
 
@@ -1052,16 +1060,15 @@
     /* invoke the callbacks */
     invoke_and_clear_user_callbacks(1);   /* for abort */
 
-    int attribute_to = STM_TIME_RUN_ABORTED_OTHER;
+    abort_finalizers();
 
     if (is_abort(STM_SEGMENT->nursery_end)) {
         /* done aborting */
-        attribute_to = STM_SEGMENT->nursery_end;
         STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE
                                                    : NURSERY_END;
     }
 
-    _finish_transaction(attribute_to);
+    _finish_transaction(STM_TRANSACTION_ABORT);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 
     /* Broadcast C_ABORTED to wake up contention.c */
@@ -1103,8 +1110,8 @@
     if (STM_PSEGMENT->transaction_state == TS_REGULAR) {
         dprintf(("become_inevitable: %s\n", msg));
 
-        marker_fetch_inev();
-        wait_for_end_of_inevitable_transaction(NULL);
+        timing_fetch_inev();
+        wait_for_end_of_inevitable_transaction();
         STM_PSEGMENT->transaction_state = TS_INEVITABLE;
         stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
         invoke_and_clear_user_callbacks(0);   /* for commit */
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -138,7 +138,7 @@
 
     /* Start time: to know approximately for how long a transaction has
        been running, in contention management */
-    double start_time;
+    uint64_t start_time;
 
     /* This is the number stored in the overflowed objects (a multiple of
        GCFLAG_OVERFLOW_NUMBER_bit0).  It is incremented when the
@@ -196,10 +196,15 @@
     pthread_t running_pthread;
 #endif
 
-    /* Temporarily stores the marker information */
-    char marker_self[_STM_MARKER_LEN];
-    char marker_other[_STM_MARKER_LEN];
-    uintptr_t marker_inev[2];  /* marker where this thread became inevitable */
+    /* marker where this thread became inevitable */
+    stm_loc_marker_t marker_inev;
+
+    /* light finalizers */
+    struct list_s *young_objects_with_light_finalizers;
+    struct list_s *old_objects_with_light_finalizers;
+
+    /* regular finalizers (objs from the current transaction only) */
+    struct finalizers_s *finalizers;
 };
 
 enum /* safe_point */ {
diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c
new file mode 100644
--- /dev/null
+++ b/c7/stm/finalizer.c
@@ -0,0 +1,404 @@
+
+
+/* callbacks */
+void (*stmcb_light_finalizer)(object_t *);
+void (*stmcb_finalizer)(object_t *);
+
+
+static void init_finalizers(struct finalizers_s *f)
+{
+    f->objects_with_finalizers = list_create();
+    f->count_non_young = 0;
+    f->run_finalizers = NULL;
+    f->running_next = NULL;
+}
+
+static void setup_finalizer(void)
+{
+    init_finalizers(&g_finalizers);
+}
+
+static void teardown_finalizer(void)
+{
+    if (g_finalizers.run_finalizers != NULL)
+        list_free(g_finalizers.run_finalizers);
+    list_free(g_finalizers.objects_with_finalizers);
+    memset(&g_finalizers, 0, sizeof(g_finalizers));
+}
+
+static void _commit_finalizers(void)
+{
+    if (STM_PSEGMENT->finalizers->run_finalizers != NULL) {
+        /* copy 'STM_PSEGMENT->finalizers->run_finalizers' into
+           'g_finalizers.run_finalizers', dropping any initial NULLs
+           (finalizers already called) */
+        struct list_s *src = STM_PSEGMENT->finalizers->run_finalizers;
+        uintptr_t frm = 0;
+        if (STM_PSEGMENT->finalizers->running_next != NULL) {
+            frm = *STM_PSEGMENT->finalizers->running_next;
+            assert(frm <= list_count(src));
+            *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1;
+        }
+        if (frm < list_count(src)) {
+            g_finalizers.run_finalizers = list_extend(
+                g_finalizers.run_finalizers,
+                src, frm);
+        }
+        list_free(src);
+    }
+
+    /* copy the whole 'STM_PSEGMENT->finalizers->objects_with_finalizers'
+       into 'g_finalizers.objects_with_finalizers' */
+    g_finalizers.objects_with_finalizers = list_extend(
+        g_finalizers.objects_with_finalizers,
+        STM_PSEGMENT->finalizers->objects_with_finalizers, 0);
+    list_free(STM_PSEGMENT->finalizers->objects_with_finalizers);
+
+    free(STM_PSEGMENT->finalizers);
+    STM_PSEGMENT->finalizers = NULL;
+}
+
+static void _abort_finalizers(void)
+{
+    /* like _commit_finalizers(), but forget everything from the
+       current transaction */
+    if (STM_PSEGMENT->finalizers->run_finalizers != NULL) {
+        if (STM_PSEGMENT->finalizers->running_next != NULL) {
+            *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1;
+        }
+        list_free(STM_PSEGMENT->finalizers->run_finalizers);
+    }
+    list_free(STM_PSEGMENT->finalizers->objects_with_finalizers);
+    free(STM_PSEGMENT->finalizers);
+    STM_PSEGMENT->finalizers = NULL;
+}
+
+
+void stm_enable_light_finalizer(object_t *obj)
+{
+    if (_is_young(obj))
+        LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj);
+    else
+        LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj);
+}
+
+object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up)
+{
+    object_t *obj = _stm_allocate_external(size_rounded_up);
+
+    if (STM_PSEGMENT->finalizers == NULL) {
+        struct finalizers_s *f = malloc(sizeof(struct finalizers_s));
+        if (f == NULL)
+            stm_fatalerror("out of memory in create_finalizers");   /* XXX */
+        init_finalizers(f);
+        STM_PSEGMENT->finalizers = f;
+    }
+    LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj);
+    return obj;
+}
+
+
+/************************************************************/
+/*  Light finalizers
+*/
+
+static void deal_with_young_objects_with_finalizers(void)
+{
+    /* for light finalizers */
+    struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers;
+    long i, count = list_count(lst);
+    for (i = 0; i < count; i++) {
+        object_t* obj = (object_t *)list_item(lst, i);
+        assert(_is_young(obj));
+
+        object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj;
+        if (pforwarded_array[0] != GCWORD_MOVED) {
+            /* not moved: the object dies */
+            stmcb_light_finalizer(obj);
+        }
+        else {
+            obj = pforwarded_array[1]; /* moved location */
+            assert(!_is_young(obj));
+            LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj);
+        }
+    }
+    list_clear(lst);
+}
+
+static void deal_with_old_objects_with_finalizers(void)
+{
+    /* for light finalizers */
+    int old_gs_register = STM_SEGMENT->segment_num;
+    int current_gs_register = old_gs_register;
+    long j;
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+
+        struct list_s *lst = pseg->old_objects_with_light_finalizers;
+        long i, count = list_count(lst);
+        lst->count = 0;
+        for (i = 0; i < count; i++) {
+            object_t* obj = (object_t *)list_item(lst, i);
+            if (!mark_visited_test(obj)) {
+                /* not marked: object dies */
+                /* we're calling the light finalizer in the same
+                   segment as where it was originally registered.  For
+                   objects that existed since a long time, it doesn't
+                   change anything: any thread should see the same old
+                   content (because if it wasn't the case, the object
+                   would be in a 'modified_old_objects' list
+                   somewhere, and so it wouldn't be dead).  But it's
+                   important if the object was created by the same
+                   transaction: then only that segment sees valid
+                   content.
+                */
+                if (j != current_gs_register) {
+                    set_gs_register(get_segment_base(j));
+                    current_gs_register = j;
+                }
+                stmcb_light_finalizer(obj);
+            }
+            else {
+                /* object survives */
+                list_set_item(lst, lst->count++, (uintptr_t)obj);
+            }
+        }
+    }
+    if (old_gs_register != current_gs_register)
+        set_gs_register(get_segment_base(old_gs_register));
+}
+
+
+/************************************************************/
+/*  Algorithm for regular (non-light) finalizers.
+    Follows closely pypy/doc/discussion/finalizer-order.rst
+    as well as rpython/memory/gc/minimark.py.
+*/
+
+static inline int _finalization_state(object_t *obj)
+{
+    /* Returns the state, "0", 1, 2 or 3, as per finalizer-order.rst.
+       One difference is that the official state 0 is returned here
+       as a number that is <= 0. */
+    uintptr_t lock_idx = mark_loc(obj);
+    return write_locks[lock_idx] - (WL_FINALIZ_ORDER_1 - 1);
+}
+
+static void _bump_finalization_state_from_0_to_1(object_t *obj)
+{
+    uintptr_t lock_idx = mark_loc(obj);
+    assert(write_locks[lock_idx] < WL_FINALIZ_ORDER_1);
+    write_locks[lock_idx] = WL_FINALIZ_ORDER_1;
+}
+
+static struct list_s *_finalizer_tmpstack;
+static struct list_s *_finalizer_emptystack;
+static struct list_s *_finalizer_pending;
+
+static inline void _append_to_finalizer_tmpstack(object_t **pobj)
+{
+    object_t *obj = *pobj;
+    if (obj != NULL)
+        LIST_APPEND(_finalizer_tmpstack, obj);
+}
+
+static inline struct list_s *finalizer_trace(char *base, object_t *obj,
+                                             struct list_s *lst)
+{
+    struct object_s *realobj = (struct object_s *)REAL_ADDRESS(base, obj);
+    _finalizer_tmpstack = lst;
+    stmcb_trace(realobj, &_append_to_finalizer_tmpstack);
+    return _finalizer_tmpstack;
+}
+
+static void _recursively_bump_finalization_state(char *base, object_t *obj,
+                                                 int to_state)
+{
+    struct list_s *tmpstack = _finalizer_emptystack;
+    assert(list_is_empty(tmpstack));
+
+    while (1) {
+        if (_finalization_state(obj) == to_state - 1) {
+            /* bump to the next state */
+            write_locks[mark_loc(obj)]++;
+
+            /* trace */
+            tmpstack = finalizer_trace(base, obj, tmpstack);
+        }
+
+        if (list_is_empty(tmpstack))
+            break;
+
+        obj = (object_t *)list_pop_item(tmpstack);
+    }
+    _finalizer_emptystack = tmpstack;
+}
+
+static struct list_s *mark_finalize_step1(char *base, struct finalizers_s *f)
+{
+    if (f == NULL)
+        return NULL;
+
+    struct list_s *marked = list_create();
+
+    struct list_s *lst = f->objects_with_finalizers;
+    long i, count = list_count(lst);
+    lst->count = 0;
+    for (i = 0; i < count; i++) {
+        object_t *x = (object_t *)list_item(lst, i);
+
+        assert(_finalization_state(x) != 1);
+        if (_finalization_state(x) >= 2) {
+            list_set_item(lst, lst->count++, (uintptr_t)x);
+            continue;
+        }
+        LIST_APPEND(marked, x);
+
+        struct list_s *pending = _finalizer_pending;
+        LIST_APPEND(pending, x);
+        while (!list_is_empty(pending)) {
+            object_t *y = (object_t *)list_pop_item(pending);
+            int state = _finalization_state(y);
+            if (state <= 0) {
+                _bump_finalization_state_from_0_to_1(y);
+                pending = finalizer_trace(base, y, pending);
+            }
+            else if (state == 2) {
+                _recursively_bump_finalization_state(base, y, 3);
+            }
+        }
+        _finalizer_pending = pending;
+        assert(_finalization_state(x) == 1);
+        _recursively_bump_finalization_state(base, x, 2);
+    }
+    return marked;
+}
+
+static void mark_finalize_step2(char *base, struct finalizers_s *f,
+                                struct list_s *marked)
+{
+    if (f == NULL)
+        return;
+
+    struct list_s *run_finalizers = f->run_finalizers;
+
+    long i, count = list_count(marked);
+    for (i = 0; i < count; i++) {
+        object_t *x = (object_t *)list_item(marked, i);
+
+        int state = _finalization_state(x);
+        assert(state >= 2);
+        if (state == 2) {
+            if (run_finalizers == NULL)
+                run_finalizers = list_create();
+            LIST_APPEND(run_finalizers, x);
+            _recursively_bump_finalization_state(base, x, 3);
+        }
+        else {
+            struct list_s *lst = f->objects_with_finalizers;
+            list_set_item(lst, lst->count++, (uintptr_t)x);
+        }
+    }
+    list_free(marked);
+
+    f->run_finalizers = run_finalizers;
+}
+
+static void deal_with_objects_with_finalizers(void)
+{
+    /* for non-light finalizers */
+
+    /* there is one 'objects_with_finalizers' list per segment.
+       Objects that die at a major collection running in the same
+       transaction as they were created will be put in the
+       'run_finalizers' list of that segment.  Objects that survive at
+       least one commit move to the global g_objects_with_finalizers,
+       and when they die they go to g_run_finalizers.  The former kind
+       of dying object must have its finalizer called in the correct
+       thread; the latter kind can be called in any thread, through
+       any segment, because they all should see the same old content
+       anyway.  (If the content was different between segments at this
+       point, the object would be in a 'modified_old_objects' list
+       somewhere, and so it wouldn't be dead).
+    */
+    struct list_s *marked_seg[NB_SEGMENTS + 1];
+    LIST_CREATE(_finalizer_emptystack);
+    LIST_CREATE(_finalizer_pending);
+
+    long j;
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+        marked_seg[j] = mark_finalize_step1(pseg->pub.segment_base,
+                                            pseg->finalizers);
+    }
+    marked_seg[0] = mark_finalize_step1(stm_object_pages, &g_finalizers);
+
+    LIST_FREE(_finalizer_pending);
+
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+        mark_finalize_step2(pseg->pub.segment_base, pseg->finalizers,
+                            marked_seg[j]);
+    }
+    mark_finalize_step2(stm_object_pages, &g_finalizers, marked_seg[0]);
+
+    LIST_FREE(_finalizer_emptystack);
+}
+
+static void _execute_finalizers(struct finalizers_s *f)
+{
+    if (f->run_finalizers == NULL)
+        return;   /* nothing to do */
+
+ restart:
+    if (f->running_next != NULL)
+        return;   /* in a nested invocation of execute_finalizers() */
+
+    uintptr_t next = 0, total = list_count(f->run_finalizers);
+    f->running_next = &next;
+
+    while (next < total) {
+        object_t *obj = (object_t *)list_item(f->run_finalizers, next);
+        list_set_item(f->run_finalizers, next, 0);
+        next++;
+
+        stmcb_finalizer(obj);
+    }
+    if (next == (uintptr_t)-1) {
+        /* transaction committed: the whole 'f' was freed */
+        return;
+    }
+    f->running_next = NULL;
+
+    if (f->run_finalizers->count > total) {
+        memmove(f->run_finalizers->items,
+                f->run_finalizers->items + total,
+                (f->run_finalizers->count - total) * sizeof(uintptr_t));
+        goto restart;
+    }
+
+    LIST_FREE(f->run_finalizers);
+}
+
+static void _invoke_general_finalizers(stm_thread_local_t *tl)
+{
+    /* called between transactions */
+    static int lock = 0;
+
+    if (__sync_lock_test_and_set(&lock, 1) != 0) {
+        /* can't acquire the lock: someone else is likely already
+           running this function, so don't wait. */
+        return;
+    }
+
+    rewind_jmp_buf rjbuf;
+    stm_rewind_jmp_enterframe(tl, &rjbuf);
+    stm_start_transaction(tl);
+
+    _execute_finalizers(&g_finalizers);
+
+    stm_commit_transaction();
+    stm_rewind_jmp_leaveframe(tl, &rjbuf);
+
+    __sync_lock_release(&lock);
+}
diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h
new file mode 100644
--- /dev/null
+++ b/c7/stm/finalizer.h
@@ -0,0 +1,47 @@
+
+struct finalizers_s {
+    struct list_s *objects_with_finalizers;
+    uintptr_t count_non_young;
+    struct list_s *run_finalizers;
+    uintptr_t *running_next;
+};
+
+static void deal_with_young_objects_with_finalizers(void);
+static void deal_with_old_objects_with_finalizers(void);
+static void deal_with_objects_with_finalizers(void);
+
+static void setup_finalizer(void);
+static void teardown_finalizer(void);
+
+static void _commit_finalizers(void);
+static void _abort_finalizers(void);
+
+#define commit_finalizers()   do {              \
+    if (STM_PSEGMENT->finalizers != NULL)       \
+        _commit_finalizers();                   \
+} while (0)
+
+#define abort_finalizers()   do {               \
+    if (STM_PSEGMENT->finalizers != NULL)       \
+        _abort_finalizers();                    \
+} while (0)
+
+
+/* regular finalizers (objs from already-committed transactions) */
+static struct finalizers_s g_finalizers;
+
+static void _invoke_general_finalizers(stm_thread_local_t *tl);
+
+#define invoke_general_finalizers(tl)    do {   \
+    if (g_finalizers.run_finalizers != NULL)    \
+        _invoke_general_finalizers(tl);         \
+} while (0)
+
+static void _execute_finalizers(struct finalizers_s *f);
+
+#define any_local_finalizers() (STM_PSEGMENT->finalizers != NULL &&         \
+                               STM_PSEGMENT->finalizers->run_finalizers != 
NULL)
+#define exec_local_finalizers()  do {                   \
+    if (any_local_finalizers())                         \
+        _execute_finalizers(STM_PSEGMENT->finalizers);  \
+} while (0)
diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c
--- a/c7/stm/forksupport.c
+++ b/c7/stm/forksupport.c
@@ -55,14 +55,12 @@
     s_mutex_unlock();
 
     bool was_in_transaction = _stm_in_transaction(this_tl);
-    if (was_in_transaction) {
-        stm_become_inevitable(this_tl, "fork");
-        /* Note that the line above can still fail and abort, which should
-           be fine */
-    }
-    else {
-        stm_start_inevitable_transaction(this_tl);
-    }
+    if (!was_in_transaction)
+        stm_start_transaction(this_tl);
+
+    stm_become_inevitable(this_tl, "fork");
+    /* Note that the line above can still fail and abort, which should
+       be fine */
 
     s_mutex_lock();
     synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
@@ -187,7 +185,6 @@
 #ifndef NDEBUG
     pr->running_pthread = pthread_self();
 #endif
-    strcpy(pr->marker_self, "fork");
     tl->shadowstack = NULL;
     pr->shadowstack_at_start_of_transaction = NULL;
     stm_rewind_jmp_forget(tl);
@@ -204,6 +201,9 @@
        just release these locks early */
     s_mutex_unlock();
 
+    /* Open a new profiling file, if any */
+    forksupport_open_new_profiling_file();
+
     /* Move the copy of the mmap over the old one, overwriting it
        and thus freeing the old mapping in this process
     */
diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -141,7 +141,7 @@
 
     if (is_major_collection_requested()) {   /* if still true */
 
-        int oldstate = change_timing_state(STM_TIME_MAJOR_GC);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
 
         synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
 
@@ -149,10 +149,11 @@
             major_collection_now_at_safe_point();
         }
 
-        change_timing_state(oldstate);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
     }
 
     s_mutex_unlock();
+    exec_local_finalizers();
 }
 
 
@@ -161,7 +162,11 @@
 
 static struct list_s *mark_objects_to_trace;
 
-#define WL_VISITED   255
+#define WL_FINALIZ_ORDER_1    253
+#define WL_FINALIZ_ORDER_2    254
+#define WL_FINALIZ_ORDER_3    WL_VISITED
+
+#define WL_VISITED            255
 
 
 static inline uintptr_t mark_loc(object_t *obj)
@@ -446,9 +451,9 @@
         for (i = list_count(lst); i > 0; i -= 2) {
             mark_visit_object((object_t *)list_item(lst, i - 1), base);
         }
-        if (get_priv_segment(j)->marker_inev[1]) {
-            uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1];
-            mark_visit_object((object_t *)marker_inev_obj, base);
+        if (get_priv_segment(j)->marker_inev.segment_base) {
+            object_t *marker_inev_obj = 
get_priv_segment(j)->marker_inev.object;
+            mark_visit_object(marker_inev_obj, base);
         }
     }
 }
@@ -626,8 +631,14 @@
     mark_visit_from_roots();
     LIST_FREE(mark_objects_to_trace);
 
-    /* weakrefs: */
+    /* finalizer support: will mark as WL_VISITED all objects with a
+       finalizer and all objects reachable from there, and also moves
+       some objects from 'objects_with_finalizers' to 'run_finalizers'. */
+    deal_with_objects_with_finalizers();
+
+    /* weakrefs and old light finalizers */
     stm_visit_old_weakrefs();
+    deal_with_old_objects_with_finalizers();
 
     /* cleanup */
     clean_up_segment_lists();
diff --git a/c7/stm/list.c b/c7/stm/list.c
--- a/c7/stm/list.c
+++ b/c7/stm/list.c
@@ -30,6 +30,21 @@
     return lst;
 }
 
+static struct list_s *list_extend(struct list_s *lst, struct list_s *lst2,
+                                  uintptr_t slicestart)
+{
+    if (lst2->count <= slicestart)
+        return lst;
+    uintptr_t baseindex = lst->count;
+    lst->count = baseindex + lst2->count - slicestart;
+    uintptr_t lastindex = lst->count - 1;
+    if (lastindex > lst->last_allocated)
+        lst = _list_grow(lst, lastindex);
+    memcpy(lst->items + baseindex, lst2->items + slicestart,
+           (lst2->count - slicestart) * sizeof(uintptr_t));
+    return lst;
+}
+
 
 /************************************************************/
 
diff --git a/c7/stm/list.h b/c7/stm/list.h
--- a/c7/stm/list.h
+++ b/c7/stm/list.h
@@ -83,6 +83,9 @@
     return &lst->items[index];
 }
 
+static struct list_s *list_extend(struct list_s *lst, struct list_s *lst2,
+                                  uintptr_t slicestart);
+
 #define LIST_FOREACH_R(lst, TYPE, CODE)         \
     do {                                        \
         struct list_s *_lst = (lst);            \
diff --git a/c7/stm/marker.c b/c7/stm/marker.c
--- a/c7/stm/marker.c
+++ b/c7/stm/marker.c
@@ -3,18 +3,11 @@
 #endif
 
 
-void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number,
-                            object_t *following_object,
-                            char *outputbuf, size_t outputbufsize);
-
-void (*stmcb_debug_print)(const char *cause, double time,
-                          const char *marker);
-
-
-static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2])
+static void marker_fetch(stm_loc_marker_t *out_marker)
 {
-    /* fetch the current marker from the tl's shadow stack,
-       and return it in 'marker[2]'. */
+    /* Fetch the current marker from the 'out_marker->tl's shadow stack,
+       and return it in 'out_marker->odd_number' and 'out_marker->object'. */
+    stm_thread_local_t *tl = out_marker->tl;
     struct stm_shadowentry_s *current = tl->shadowstack - 1;
     struct stm_shadowentry_s *base = tl->shadowstack_base;
 
@@ -28,85 +21,31 @@
     }
     if (current != base) {
         /* found the odd marker */
-        marker[0] = (uintptr_t)current[0].ss;
-        marker[1] = (uintptr_t)current[1].ss;
+        out_marker->odd_number = (uintptr_t)current[0].ss;
+        out_marker->object = current[1].ss;
     }
     else {
         /* no marker found */
-        marker[0] = 0;
-        marker[1] = 0;
+        out_marker->odd_number = 0;
+        out_marker->object = NULL;
     }
 }
 
-static void marker_expand(uintptr_t marker[2], char *segment_base,
-                          char *outmarker)
+static void _timing_fetch_inev(void)
 {
-    /* Expand the marker given by 'marker[2]' into a full string.  This
-       works assuming that the marker was produced inside the segment
-       given by 'segment_base'.  If that's from a different thread, you
-       must first acquire the corresponding 'marker_lock'. */
-    assert(_has_mutex());
-    outmarker[0] = 0;
-    if (marker[0] == 0)
-        return;   /* no marker entry found */
-    if (stmcb_expand_marker != NULL) {
-        stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1],
-                            outmarker, _STM_MARKER_LEN);
-    }
+    stm_loc_marker_t marker;
+    marker.tl = STM_SEGMENT->running_thread;
+    marker_fetch(&marker);
+    STM_PSEGMENT->marker_inev.odd_number = marker.odd_number;
+    STM_PSEGMENT->marker_inev.object = marker.object;
 }
 
-static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg)
+static void marker_fetch_obj_write(object_t *obj, stm_loc_marker_t *out_marker)
 {
-    if (pseg->marker_self[0] != 0)
-        return;   /* already collected an entry */
-
-    uintptr_t marker[2];
-    marker_fetch(pseg->pub.running_thread, marker);
-    marker_expand(marker, pseg->pub.segment_base, pseg->marker_self);
-    pseg->marker_other[0] = 0;
-}
-
-char *_stm_expand_marker(void)
-{
-    /* for tests only! */
-    static char _result[_STM_MARKER_LEN];
-    uintptr_t marker[2];
-    _result[0] = 0;
-    s_mutex_lock();
-    marker_fetch(STM_SEGMENT->running_thread, marker);
-    marker_expand(marker, STM_SEGMENT->segment_base, _result);
-    s_mutex_unlock();
-    return _result;
-}
-
-static void marker_copy(stm_thread_local_t *tl,
-                        struct stm_priv_segment_info_s *pseg,
-                        enum stm_time_e attribute_to, double time)
-{
-    /* Copies the marker information from pseg to tl.  This is called
-       indirectly from abort_with_mutex(), but only if the lost time is
-       greater than that of the previous recorded marker.  By contrast,
-       pseg->marker_self has been filled already in all cases.  The
-       reason for the two steps is that we must fill pseg->marker_self
-       earlier than now (some objects may be GCed), but we only know
-       here the total time it gets attributed.
+    /* From 'out_marker->tl', fill in 'out_marker->segment_base' and
+       'out_marker->odd_number' and 'out_marker->object' from the
+       marker associated with writing the 'obj'.
     */
-    if (stmcb_debug_print) {
-        stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self);
-    }
-    if (time * 0.99 > tl->longest_marker_time) {
-        tl->longest_marker_state = attribute_to;
-        tl->longest_marker_time = time;
-        memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN);
-        memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN);
-    }
-    pseg->marker_self[0] = 0;
-    pseg->marker_other[0] = 0;
-}
-
-static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj,
-                                   uintptr_t marker[2])
-{
     assert(_has_mutex());
 
     /* here, we acquired the other thread's marker_lock, which means that:
@@ -118,80 +57,86 @@
            the global mutex_lock at this point too).
     */
     long i;
+    int in_segment_num = out_marker->tl->associated_segment_num;
     struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num);
     struct list_s *mlst = pseg->modified_old_objects;
     struct list_s *mlstm = pseg->modified_old_objects_markers;
-    for (i = list_count(mlst); --i >= 0; ) {
+    assert(list_count(mlstm) <= 2 * list_count(mlst));
+    for (i = list_count(mlstm) / 2; --i >= 0; ) {
         if (list_item(mlst, i) == (uintptr_t)obj) {
-            assert(list_count(mlstm) == 2 * list_count(mlst));
-            marker[0] = list_item(mlstm, i * 2 + 0);
-            marker[1] = list_item(mlstm, i * 2 + 1);
+            out_marker->odd_number = list_item(mlstm, i * 2 + 0);
+            out_marker->object = (object_t *)list_item(mlstm, i * 2 + 1);
             return;
         }
     }
-    marker[0] = 0;
-    marker[1] = 0;
+    out_marker->odd_number = 0;
+    out_marker->object = NULL;
 }
 
-static void marker_contention(int kind, bool abort_other,
-                              uint8_t other_segment_num, object_t *obj)
+static void _timing_record_write(void)
 {
-    uintptr_t self_marker[2];
-    uintptr_t other_marker[2];
-    struct stm_priv_segment_info_s *my_pseg, *other_pseg;
+    stm_loc_marker_t marker;
+    marker.tl = STM_SEGMENT->running_thread;
+    marker_fetch(&marker);
 
-    my_pseg = get_priv_segment(STM_SEGMENT->segment_num);
+    long base_count = list_count(STM_PSEGMENT->modified_old_objects);
+    struct list_s *mlstm = STM_PSEGMENT->modified_old_objects_markers;
+    while (list_count(mlstm) < 2 * base_count) {
+        mlstm = list_append2(mlstm, 0, 0);
+    }
+    mlstm = list_append2(mlstm, marker.odd_number, (uintptr_t)marker.object);
+    STM_PSEGMENT->modified_old_objects_markers = mlstm;
+}
+
+static void _timing_contention(enum stm_event_e kind,
+                               uint8_t other_segment_num, object_t *obj)
+{
+    struct stm_priv_segment_info_s *other_pseg;
     other_pseg = get_priv_segment(other_segment_num);
 
-    char *my_segment_base = STM_SEGMENT->segment_base;
-    char *other_segment_base = get_segment_base(other_segment_num);
+    char *other_segment_base = other_pseg->pub.segment_base;
+    acquire_marker_lock(other_segment_base);
 
-    acquire_marker_lock(other_segment_base);
+    stm_loc_marker_t markers[2];
 
     /* Collect the location for myself.  It's usually the current
        location, except in a write-read abort, in which case it's the
        older location of the write. */
-    if (kind == WRITE_READ_CONTENTION)
-        marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker);
+    markers[0].tl = STM_SEGMENT->running_thread;
+    markers[0].segment_base = STM_SEGMENT->segment_base;
+
+    if (kind == STM_CONTENTION_WRITE_READ)
+        marker_fetch_obj_write(obj, &markers[0]);
     else
-        marker_fetch(my_pseg->pub.running_thread, self_marker);
-
-    /* Expand this location into either my_pseg->marker_self or
-       other_pseg->marker_other, depending on who aborts. */
-    marker_expand(self_marker, my_segment_base,
-                  abort_other ? other_pseg->marker_other
-                              : my_pseg->marker_self);
+        marker_fetch(&markers[0]);
 
     /* For some categories, we can also collect the relevant information
        for the other segment. */
-    char *outmarker = abort_other ? other_pseg->marker_self
-                                  : my_pseg->marker_other;
+    markers[1].tl = other_pseg->pub.running_thread;
+    markers[1].segment_base = other_pseg->pub.segment_base;
+
     switch (kind) {
-    case WRITE_WRITE_CONTENTION:
-        marker_fetch_obj_write(other_segment_num, obj, other_marker);
-        marker_expand(other_marker, other_segment_base, outmarker);
+    case STM_CONTENTION_WRITE_WRITE:
+        marker_fetch_obj_write(obj, &markers[1]);
         break;
-    case INEVITABLE_CONTENTION:
-        assert(abort_other == false);
-        other_marker[0] = other_pseg->marker_inev[0];
-        other_marker[1] = other_pseg->marker_inev[1];
-        marker_expand(other_marker, other_segment_base, outmarker);
-        break;
-    case WRITE_READ_CONTENTION:
-        strcpy(outmarker, "<read at unknown location>");
+    case STM_CONTENTION_INEVITABLE:
+        markers[1].odd_number = other_pseg->marker_inev.odd_number;
+        markers[1].object = other_pseg->marker_inev.object;
         break;
     default:
-        outmarker[0] = 0;
+        markers[1].odd_number = 0;
+        markers[1].object = NULL;
         break;
     }
 
+    stmcb_timing_event(markers[0].tl, kind, markers);
+
+    /* only release the lock after stmcb_timing_event(), otherwise it could
+       run into race conditions trying to interpret 'markers[1].object' */
     release_marker_lock(other_segment_base);
 }
 
-static void marker_fetch_inev(void)
-{
-    uintptr_t marker[2];
-    marker_fetch(STM_SEGMENT->running_thread, marker);
-    STM_PSEGMENT->marker_inev[0] = marker[0];
-    STM_PSEGMENT->marker_inev[1] = marker[1];
-}
+
+void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */
+                           enum stm_event_e event,
+                           stm_loc_marker_t *markers);
diff --git a/c7/stm/marker.h b/c7/stm/marker.h
--- a/c7/stm/marker.h
+++ b/c7/stm/marker.h
@@ -1,12 +1,19 @@
 
-static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]);
-static void marker_fetch_inev(void);
-static void marker_expand(uintptr_t marker[2], char *segment_base,
-                          char *outmarker);
-static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg);
-static void marker_copy(stm_thread_local_t *tl,
-                        struct stm_priv_segment_info_s *pseg,
-                        enum stm_time_e attribute_to, double time);
+static void _timing_record_write(void);
+static void _timing_fetch_inev(void);
+static void _timing_contention(enum stm_event_e kind,
+                               uint8_t other_segment_num, object_t *obj);
 
-static void marker_contention(int kind, bool abort_other,
-                              uint8_t other_segment_num, object_t *obj);
+
+#define timing_event(tl, event)                                         \
+    (stmcb_timing_event != NULL ? stmcb_timing_event(tl, event, NULL) : 
(void)0)
+
+#define timing_record_write()                                           \
+    (stmcb_timing_event != NULL ? _timing_record_write() : (void)0)
+
+#define timing_fetch_inev()                                             \
+    (stmcb_timing_event != NULL ? _timing_fetch_inev() : (void)0)
+
+#define timing_contention(kind, other_segnum, obj)                      \
+    (stmcb_timing_event != NULL ?                                       \
+        _timing_contention(kind, other_segnum, obj) : (void)0)
diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c
--- a/c7/stm/nursery.c
+++ b/c7/stm/nursery.c
@@ -425,14 +425,32 @@
     for (i = num_old + 1; i < total; i += 2) {
         minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i));
     }
-    if (STM_PSEGMENT->marker_inev[1]) {
-        uintptr_t *pmarker_inev_obj = (uintptr_t *)
+    if (STM_PSEGMENT->marker_inev.segment_base) {
+        assert(STM_PSEGMENT->marker_inev.segment_base ==
+               STM_SEGMENT->segment_base);
+        object_t **pmarker_inev_obj = (object_t **)
             REAL_ADDRESS(STM_SEGMENT->segment_base,
-                         &STM_PSEGMENT->marker_inev[1]);
-        minor_trace_if_young((object_t **)pmarker_inev_obj);
+                         &STM_PSEGMENT->marker_inev.object);
+        minor_trace_if_young(pmarker_inev_obj);
     }
 }
 
+static void collect_objs_still_young_but_with_finalizers(void)
+{
+    struct list_s *lst = STM_PSEGMENT->finalizers->objects_with_finalizers;
+    uintptr_t i, total = list_count(lst);
+
+    for (i = STM_PSEGMENT->finalizers->count_non_young; i < total; i++) {
+
+        object_t *o = (object_t *)list_item(lst, i);
+        minor_trace_if_young(&o);
+
+        /* was not actually movable */
+        assert(o == (object_t *)list_item(lst, i));
+    }
+    STM_PSEGMENT->finalizers->count_non_young = total;
+}
+
 static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg)
 {
 #pragma push_macro("STM_PSEGMENT")
@@ -552,11 +570,15 @@
 
     collect_roots_in_nursery();
 
+    if (STM_PSEGMENT->finalizers != NULL)
+        collect_objs_still_young_but_with_finalizers();
+
     collect_oldrefs_to_nursery();
     assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards));
 
     /* now all surviving nursery objects have been moved out */
     stm_move_young_weakrefs();
+    deal_with_young_objects_with_finalizers();
 
     throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num));
 
@@ -572,11 +594,11 @@
 
     stm_safe_point();
 
-    change_timing_state(STM_TIME_MINOR_GC);
+    timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START);
 
     _do_minor_collection(commit);
 
-    change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT);
+    timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_DONE);
 }
 
 void stm_collect(long level)
diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h
--- a/c7/stm/nursery.h
+++ b/c7/stm/nursery.h
@@ -1,7 +1,13 @@
 
-/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */
-#define NSE_SIGPAUSE   STM_TIME_WAIT_OTHER
-#define NSE_SIGCOMMITSOON   STM_TIME_SYNC_COMMIT_SOON
+/* 'nursery_end' is either NURSERY_END or one of NSE_SIGxxx */
+#define NSE_SIGABORT        1
+#define NSE_SIGPAUSE        2
+#define NSE_SIGCOMMITSOON   3
+#define _NSE_NUM_SIGNALS    4
+
+#if _NSE_NUM_SIGNALS >= _STM_NSE_SIGNAL_MAX
+#  error "increase _STM_NSE_SIGNAL_MAX"
+#endif
 
 
 static uint32_t highest_overflow_number;
diff --git a/c7/stm/prof.c b/c7/stm/prof.c
new file mode 100644
--- /dev/null
+++ b/c7/stm/prof.c
@@ -0,0 +1,106 @@
+#include <time.h>
+
+
+static FILE *profiling_file;
+static char *profiling_basefn = NULL;
+static int (*profiling_expand_marker)(stm_loc_marker_t *, char *, int);
+
+
+static void _stm_profiling_event(stm_thread_local_t *tl,
+                                 enum stm_event_e event,
+                                 stm_loc_marker_t *markers)
+{
+    struct buf_s {
+        uint32_t tv_sec;
+        uint32_t tv_nsec;
+        uint32_t thread_num;
+        uint32_t other_thread_num;
+        uint8_t event;
+        uint8_t marker_length[2];
+        char extra[256];
+    } __attribute__((packed));
+
+    struct buf_s buf;
+    struct timespec t;
+    clock_gettime(CLOCK_MONOTONIC, &t);
+    buf.tv_sec = t.tv_sec;
+    buf.tv_nsec = t.tv_nsec;
+    buf.thread_num = tl->thread_local_counter;
+    buf.other_thread_num = 0;
+    buf.event = event;
+
+    int len0 = 0;
+    int len1 = 0;
+    if (markers != NULL) {
+        if (markers[1].tl != NULL)
+            buf.other_thread_num = markers[1].tl->thread_local_counter;
+        if (markers[0].odd_number != 0)
+            len0 = profiling_expand_marker(&markers[0], buf.extra, 128);
+        if (markers[1].odd_number != 0)
+            len1 = profiling_expand_marker(&markers[1], buf.extra + len0, 128);
+    }
+    buf.marker_length[0] = len0;
+    buf.marker_length[1] = len1;
+
+    fwrite(&buf, offsetof(struct buf_s, extra) + len0 + len1,
+           1, profiling_file);
+}
+
+static int default_expand_marker(stm_loc_marker_t *m, char *p, int s)
+{
+    *(uintptr_t *)p = m->odd_number;
+    return sizeof(uintptr_t);
+}
+
+static bool open_timing_log(const char *filename)
+{
+    profiling_file = fopen(filename, "w");
+    if (profiling_file == NULL)
+        return false;
+
+    fwrite("STMGC-C7-PROF01\n", 16, 1, profiling_file);
+    stmcb_timing_event = _stm_profiling_event;
+    return true;
+}
+
+static bool close_timing_log(void)
+{
+    if (stmcb_timing_event == &_stm_profiling_event) {
+        stmcb_timing_event = NULL;
+        fclose(profiling_file);
+        profiling_file = NULL;
+        return true;
+    }
+    return false;
+}
+
+static void forksupport_open_new_profiling_file(void)
+{
+    if (close_timing_log() && profiling_basefn != NULL) {
+        char filename[1024];
+        snprintf(filename, sizeof(filename),
+                 "%s.fork%ld", profiling_basefn, (long)getpid());
+        open_timing_log(filename);
+    }
+}
+
+int stm_set_timing_log(const char *profiling_file_name,
+                       int expand_marker(stm_loc_marker_t *, char *, int))
+{
+    close_timing_log();
+    free(profiling_basefn);
+    profiling_basefn = NULL;
+
+    if (profiling_file_name == NULL)
+        return 0;
+
+    if (!expand_marker)
+        expand_marker = default_expand_marker;
+    profiling_expand_marker = expand_marker;
+
+    if (!open_timing_log(profiling_file_name))
+        return -1;
+
+    profiling_basefn = strdup(profiling_file_name);
+    return 0;
+}
diff --git a/c7/stm/prof.h b/c7/stm/prof.h
new file mode 100644
--- /dev/null
+++ b/c7/stm/prof.h
@@ -0,0 +1,2 @@
+
+static void forksupport_open_new_profiling_file(void);
diff --git a/c7/stm/setup.c b/c7/stm/setup.c
--- a/c7/stm/setup.c
+++ b/c7/stm/setup.c
@@ -22,8 +22,8 @@
 static char *setup_mmap(char *reason, int *map_fd)
 {
     char name[128];
-    sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e",
-            (long)getpid(), get_stm_time());
+    sprintf(name, "/stmgc-c7-bigmem-%ld",
+            (long)getpid());
 
     /* Create the big shared memory object, and immediately unlink it.
        There is a small window where if this process is killed the
@@ -113,7 +113,7 @@
 
         /* Initialize STM_PSEGMENT */
         struct stm_priv_segment_info_s *pr = get_priv_segment(i);
-        assert(1 <= i && i < 255);   /* 255 is WL_VISITED in gcpage.c */
+        assert(1 <= i && i < 253);   /* 253 is WL_FINALIZ_ORDER_1 in gcpage.c 
*/
         pr->write_lock_num = i;
         pr->pub.segment_num = i;
         pr->pub.segment_base = segment_base;
@@ -128,6 +128,8 @@
         pr->nursery_objects_shadows = tree_create();
         pr->callbacks_on_commit_and_abort[0] = tree_create();
         pr->callbacks_on_commit_and_abort[1] = tree_create();
+        pr->young_objects_with_light_finalizers = list_create();
+        pr->old_objects_with_light_finalizers = list_create();
         pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i;
         highest_overflow_number = pr->overflow_number;
         pr->pub.transaction_read_version = 0xff;
@@ -147,6 +149,7 @@
     setup_gcpage();
     setup_pages();
     setup_forksupport();
+    setup_finalizer();
 }
 
 void stm_teardown(void)
@@ -169,12 +172,15 @@
         tree_free(pr->nursery_objects_shadows);
         tree_free(pr->callbacks_on_commit_and_abort[0]);
         tree_free(pr->callbacks_on_commit_and_abort[1]);
+        list_free(pr->young_objects_with_light_finalizers);
+        list_free(pr->old_objects_with_light_finalizers);
     }
 
     munmap(stm_object_pages, TOTAL_MEMORY);
     stm_object_pages = NULL;
     close_fd_mmap(stm_object_pages_fd);
 
+    teardown_finalizer();
     teardown_core();
     teardown_sync();
     teardown_gcpage();
@@ -225,6 +231,8 @@
     return (pthread_t *)(tl->creating_pthread);
 }
 
+static int thread_local_counters = 0;
+
 void stm_register_thread_local(stm_thread_local_t *tl)
 {
     int num;
@@ -241,14 +249,13 @@
         num = tl->prev->associated_segment_num;
     }
     tl->thread_local_obj = NULL;
-    tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION;
-    tl->_timing_cur_start = get_stm_time();
 
     /* assign numbers consecutively, but that's for tests; we could also
        assign the same number to all of them and they would get their own
        numbers automatically. */
     num = (num % NB_SEGMENTS) + 1;
     tl->associated_segment_num = num;
+    tl->thread_local_counter = ++thread_local_counters;
     *_get_cpth(tl) = pthread_self();
     _init_shadow_stack(tl);
     set_gs_register(get_segment_base(num));
diff --git a/c7/stm/sync.c b/c7/stm/sync.c
--- a/c7/stm/sync.c
+++ b/c7/stm/sync.c
@@ -123,32 +123,19 @@
 /************************************************************/
 
 
-static void wait_for_end_of_inevitable_transaction(
-                        stm_thread_local_t *tl_or_null_if_can_abort)
+static void wait_for_end_of_inevitable_transaction(void)
 {
     long i;
  restart:
     for (i = 1; i <= NB_SEGMENTS; i++) {
         struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i);
         if (other_pseg->transaction_state == TS_INEVITABLE) {
-            if (tl_or_null_if_can_abort == NULL) {
-                /* handle this case like a contention: it will either
-                   abort us (not the other thread, which is inevitable),
-                   or wait for a while.  If we go past this call, then we
-                   waited; in this case we have to re-check if no other
-                   thread is inevitable. */
-                inevitable_contention_management(i);
-            }
-            else {
-                /* wait for stm_commit_transaction() to finish this
-                   inevitable transaction */
-                signal_other_to_commit_soon(other_pseg);
-                change_timing_state_tl(tl_or_null_if_can_abort,
-                                       STM_TIME_WAIT_INEVITABLE);
-                cond_wait(C_INEVITABLE);
-                /* don't bother changing the timing state again: the caller
-                   will very soon go to STM_TIME_RUN_CURRENT */
-            }
+            /* handle this case like a contention: it will either
+               abort us (not the other thread, which is inevitable),
+               or wait for a while.  If we go past this call, then we
+               waited; in this case we have to re-check if no other
+               thread is inevitable. */
+            inevitable_contention_management(i);
             goto restart;
         }
     }
@@ -188,8 +175,9 @@
     }
     /* No segment available.  Wait until release_thread_segment()
        signals that one segment has been freed. */
-    change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT);
+    timing_event(tl, STM_WAIT_FREE_SEGMENT);
     cond_wait(C_SEGMENT_FREE);
+    timing_event(tl, STM_WAIT_DONE);
 
     /* Return false to the caller, which will call us again */
     return false;
@@ -240,6 +228,7 @@
     assert(_stm_in_transaction(tl));
     set_gs_register(get_segment_base(tl->associated_segment_num));
     assert(STM_SEGMENT->running_thread == tl);
+    exec_local_finalizers();
 }
 
 #if STM_TESTS
@@ -331,7 +320,6 @@
     if (STM_SEGMENT->nursery_end == NURSERY_END)
         return;    /* fast path: no safe point requested */
 
-    int previous_state = -1;
     assert(_seems_to_be_running_transaction());
     assert(_has_mutex());
     while (1) {
@@ -342,10 +330,6 @@
             break;    /* no safe point requested */
 
         if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) {
-            if (previous_state == -1) {
-                previous_state = 
change_timing_state(STM_TIME_SYNC_COMMIT_SOON);
-            }
-
             STM_PSEGMENT->signalled_to_commit_soon = true;
             stmcb_commit_soon();
             if (!pause_signalled) {
@@ -362,17 +346,12 @@
 #ifdef STM_TESTS
         abort_with_mutex();
 #endif
-        if (previous_state == -1) {
-            previous_state = change_timing_state(STM_TIME_SYNC_PAUSE);
-        }
+        timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE);
         cond_signal(C_AT_SAFE_POINT);
         STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED;
         cond_wait(C_REQUEST_REMOVED);
         STM_PSEGMENT->safe_point = SP_RUNNING;
-    }
-
-    if (previous_state != -1) {
-        change_timing_state(previous_state);
+        timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
     }
 }
 
diff --git a/c7/stm/sync.h b/c7/stm/sync.h
--- a/c7/stm/sync.h
+++ b/c7/stm/sync.h
@@ -28,7 +28,7 @@
 static bool acquire_thread_segment(stm_thread_local_t *tl);
 static void release_thread_segment(stm_thread_local_t *tl);
 
-static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *);
+static void wait_for_end_of_inevitable_transaction(void);
 
 enum sync_type_e {
     STOP_OTHERS_UNTIL_MUTEX_UNLOCK,
diff --git a/c7/stm/timing.c b/c7/stm/timing.c
deleted file mode 100644
--- a/c7/stm/timing.c
+++ /dev/null
@@ -1,91 +0,0 @@
-#ifndef _STM_CORE_H_
-# error "must be compiled via stmgc.c"
-#endif
-
-
-static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category,
-                              double elapsed)
-{
-    tl->timing[category] += elapsed;
-    tl->events[category] += 1;
-}
-
-#define TIMING_CHANGE(tl, newstate)                     \
-    double curtime = get_stm_time();                    \
-    double elasped = curtime - tl->_timing_cur_start;   \
-    enum stm_time_e oldstate = tl->_timing_cur_state;   \
-    add_timing(tl, oldstate, elasped);                  \
-    tl->_timing_cur_state = newstate;                   \
-    tl->_timing_cur_start = curtime
-
-static enum stm_time_e change_timing_state(enum stm_time_e newstate)
-{
-    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
-    TIMING_CHANGE(tl, newstate);
-    return oldstate;
-}
-
-static double change_timing_state_tl(stm_thread_local_t *tl,
-                                     enum stm_time_e newstate)
-{
-    TIMING_CHANGE(tl, newstate);
-    return elasped;
-}
-
-static void timing_end_transaction(enum stm_time_e attribute_to)
-{
-    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
-    TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION);
-    double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT];
-    add_timing(tl, attribute_to, time_this_transaction);
-    tl->timing[STM_TIME_RUN_CURRENT] = 0.0f;
-
-    if (attribute_to != STM_TIME_RUN_COMMITTED) {
-        struct stm_priv_segment_info_s *pseg =
-            get_priv_segment(STM_SEGMENT->segment_num);
-        marker_copy(tl, pseg, attribute_to, time_this_transaction);
-    }
-}
-
-static const char *timer_names[] = {
-    "outside transaction",
-    "run current",
-    "run committed",
-    "run aborted write write",
-    "run aborted write read",
-    "run aborted inevitable",
-    "run aborted other",
-    "wait free segment",
-    "wait write read",
-    "wait inevitable",
-    "wait other",
-    "sync commit soon",
-    "bookkeeping",
-    "minor gc",
-    "major gc",
-    "sync pause",
-};
-
-void stm_flush_timing(stm_thread_local_t *tl, int verbose)
-{
-    enum stm_time_e category = tl->_timing_cur_state;
-    uint64_t oldevents = tl->events[category];
-    TIMING_CHANGE(tl, category);
-    tl->events[category] = oldevents;
-
-    assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N);
-    if (verbose > 0) {
-        int i;
-        s_mutex_lock();
-        fprintf(stderr, "thread %p:\n", tl);
-        for (i = 0; i < _STM_TIME_N; i++) {
-            fprintf(stderr, "    %-24s %9u %8.3f s\n",
-                    timer_names[i], tl->events[i], (double)tl->timing[i]);
-        }
-        fprintf(stderr, "    %-24s %6s %11.6f s\n",
-                "longest recorded marker", "", tl->longest_marker_time);
-        fprintf(stderr, "    \"%.*s\"\n",
-                (int)_STM_MARKER_LEN, tl->longest_marker_self);
-        s_mutex_unlock();
-    }
-}
diff --git a/c7/stm/timing.h b/c7/stm/timing.h
deleted file mode 100644
--- a/c7/stm/timing.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#include <time.h>
-
-static inline double get_stm_time(void)
-{
-    struct timespec tp;
-    clock_gettime(CLOCK_MONOTONIC, &tp);
-    return tp.tv_sec + tp.tv_nsec * 0.000000001;
-}
-
-static enum stm_time_e change_timing_state(enum stm_time_e newstate);
-static double change_timing_state_tl(stm_thread_local_t *tl,
-                                     enum stm_time_e newstate);
-
-static void timing_end_transaction(enum stm_time_e attribute_to);
diff --git a/c7/stmgc.c b/c7/stmgc.c
--- a/c7/stmgc.c
+++ b/c7/stmgc.c
@@ -14,8 +14,9 @@
 #include "stm/extra.h"
 #include "stm/fprintcolor.h"
 #include "stm/weakref.h"
-#include "stm/timing.h"
 #include "stm/marker.h"
+#include "stm/prof.h"
+#include "stm/finalizer.h"
 
 #include "stm/misc.c"
 #include "stm/list.c"
@@ -34,6 +35,7 @@
 #include "stm/extra.c"
 #include "stm/fprintcolor.c"
 #include "stm/weakref.c"
-#include "stm/timing.c"
 #include "stm/marker.c"
+#include "stm/prof.c"
 #include "stm/rewind_setjmp.c"
+#include "stm/finalizer.c"
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -54,28 +54,6 @@
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to