Author: Remi Meier <remi.me...@gmail.com>
Branch: gc-small-uniform
Changeset: r1648:3dc8c734e257
Date: 2015-02-24 12:47 +0100
http://bitbucket.org/pypy/stmgc/changeset/3dc8c734e257/

Log:    merge default

diff too long, truncating to 2000 out of 22436 lines

diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -7,3 +7,5 @@
 *.orig
 */__pycache__
 *.out.*
+*/\#*\#
+*/.\#*
diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c
--- a/c7/demo/demo2.c
+++ b/c7/demo/demo2.c
@@ -3,6 +3,7 @@
 #include <assert.h>
 #include <pthread.h>
 #include <semaphore.h>
+#include <time.h>
 
 #ifdef USE_HTM
 #  include "../../htm-c7/stmgc.h"
@@ -59,12 +60,25 @@
 }
 void stmcb_commit_soon() {}
 
-static void expand_marker(char *base, uintptr_t odd_number,
-                          object_t *following_object,
-                          char *outputbuf, size_t outputbufsize)
+static void timing_event(stm_thread_local_t *tl, /* the local thread */
+                         enum stm_event_e event,
+                         stm_loc_marker_t *markers)
 {
-    assert(following_object == NULL);
-    snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number);
+    static char *event_names[] = { STM_EVENT_NAMES };
+
+    char buf[1024], *p;
+    struct timespec tp;
+    clock_gettime(CLOCK_MONOTONIC, &tp);
+
+    p = buf;
+    p += sprintf(p, "{%.9f} %p %s", tp.tv_sec + 0.000000001 * tp.tv_nsec,
+                 tl, event_names[event]);
+    if (markers != NULL) {
+        p += sprintf(p, ", markers: %lu, %lu",
+                     markers[0].odd_number, markers[1].odd_number);
+    }
+    sprintf(p, "\n");
+    fputs(buf, stderr);
 }
 
 
@@ -108,18 +122,6 @@
 
     stm_start_transaction(&stm_thread_local);
 
-    if (stm_thread_local.longest_marker_state != 0) {
-        fprintf(stderr, "[%p] marker %d for %.6f seconds:\n",
-                &stm_thread_local,
-                stm_thread_local.longest_marker_state,
-                stm_thread_local.longest_marker_time);
-        fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n",
-                stm_thread_local.longest_marker_self,
-                stm_thread_local.longest_marker_other);
-        stm_thread_local.longest_marker_state = 0;
-        stm_thread_local.longest_marker_time = 0.0;
-    }
-
     nodeptr_t prev = initial;
     stm_read((objptr_t)prev);
 
@@ -223,7 +225,6 @@
 
 void unregister_thread_local(void)
 {
-    stm_flush_timing(&stm_thread_local, 1);
     stm_unregister_thread_local(&stm_thread_local);
 }
 
@@ -295,9 +296,15 @@
 
     stm_setup();
     stm_register_thread_local(&stm_thread_local);
+
+    /* check that we can use stm_start_inevitable_transaction() without
+       any rjbuf on the stack */
+    stm_start_inevitable_transaction(&stm_thread_local);
+    stm_commit_transaction();
+
+
     stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
-    stmcb_expand_marker = expand_marker;
-
+    stmcb_timing_event = timing_event;
 
     setup_list();
 
diff --git a/c7/demo/demo_hashtable1.c b/c7/demo/demo_hashtable1.c
new file mode 100644
--- /dev/null
+++ b/c7/demo/demo_hashtable1.c
@@ -0,0 +1,217 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "stmgc.h"
+
+#define NUMTHREADS  4
+
+
+typedef TLPREFIX struct node_s node_t;
+typedef TLPREFIX struct dict_s dict_t;
+
+
+struct node_s {
+    struct object_s header;
+    int typeid;
+    intptr_t freevalue;
+};
+
+struct dict_s {
+    struct node_s hdr;
+    stm_hashtable_t *hashtable;
+};
+
+#define TID_NODE       0x01234567
+#define TID_DICT       0x56789ABC
+#define TID_DICTENTRY  0x6789ABCD
+
+
+static sem_t done;
+__thread stm_thread_local_t stm_thread_local;
+
+// global and per-thread-data
+time_t default_seed;
+dict_t *global_dict;
+
+struct thread_data {
+    unsigned int thread_seed;
+};
+__thread struct thread_data td;
+
+
+ssize_t stmcb_size_rounded_up(struct object_s *ob)
+{
+    if (((struct node_s*)ob)->typeid == TID_NODE)
+        return sizeof(struct node_s);
+    if (((struct node_s*)ob)->typeid == TID_DICT)
+        return sizeof(struct dict_s);
+    if (((struct node_s*)ob)->typeid == TID_DICTENTRY)
+        return sizeof(struct stm_hashtable_entry_s);
+    abort();
+}
+
+void stmcb_trace(struct object_s *obj, void visit(object_t **))
+{
+    struct node_s *n;
+    n = (struct node_s*)obj;
+    if (n->typeid == TID_NODE) {
+        return;
+    }
+    if (n->typeid == TID_DICT) {
+        stm_hashtable_tracefn(((struct dict_s *)n)->hashtable, visit);
+        return;
+    }
+    if (n->typeid == TID_DICTENTRY) {
+        object_t **ref = &((struct stm_hashtable_entry_s *)obj)->object;
+        visit(ref);
+        return;
+    }
+    abort();
+}
+
+void stmcb_commit_soon() {}
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+    return 0;
+}
+void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
+                       uintptr_t start, uintptr_t stop) {
+    abort();
+}
+void stmcb_get_card_base_itemsize(struct object_s *obj,
+                                  uintptr_t offset_itemsize[2]) {
+    abort();
+}
+
+int get_rand(int max)
+{
+    if (max == 0)
+        return 0;
+    return (int)(rand_r(&td.thread_seed) % (unsigned int)max);
+}
+
+
+void populate_hashtable(int keymin, int keymax)
+{
+    int i;
+    int diff = get_rand(keymax - keymin);
+    for (i = 0; i < keymax - keymin; i++) {
+        int key = keymin + i + diff;
+        if (key >= keymax)
+            key -= (keymax - keymin);
+        object_t *o = stm_allocate(sizeof(struct node_s));
+        ((node_t *)o)->typeid = TID_NODE;
+        ((node_t *)o)->freevalue = key;
+        assert(global_dict->hdr.freevalue == 42);
+        stm_hashtable_write((object_t *)global_dict, global_dict->hashtable,
+                            key, o, &stm_thread_local);
+    }
+}
+
+void setup_thread(void)
+{
+    memset(&td, 0, sizeof(struct thread_data));
+    td.thread_seed = default_seed++;
+}
+
+void *demo_random(void *arg)
+{
+    int threadnum = (uintptr_t)arg;
+    int status;
+    rewind_jmp_buf rjbuf;
+    stm_register_thread_local(&stm_thread_local);
+    stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+
+    setup_thread();
+
+    volatile int start_count = 0;
+
+    stm_start_transaction(&stm_thread_local);
+    ++start_count;
+    assert(start_count == 1);  // all the writes that follow must not conflict
+    populate_hashtable(1291 * threadnum, 1291 * (threadnum + 1));
+    stm_commit_transaction();
+
+    stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+    stm_unregister_thread_local(&stm_thread_local);
+
+    status = sem_post(&done); assert(status == 0);
+    return NULL;
+}
+
+void newthread(void*(*func)(void*), void *arg)
+{
+    pthread_t th;
+    int status = pthread_create(&th, NULL, func, arg);
+    if (status != 0)
+        abort();
+    pthread_detach(th);
+    printf("started new thread\n");
+}
+
+void setup_globals(void)
+{
+    stm_hashtable_t *my_hashtable = stm_hashtable_create();
+    struct dict_s new_templ = {
+        .hdr = {
+            .typeid = TID_DICT,
+            .freevalue = 42,
+        },
+        .hashtable = my_hashtable,
+    };
+
+    stm_start_inevitable_transaction(&stm_thread_local);
+    global_dict = (dict_t *)stm_setup_prebuilt(
+                      (object_t* )(uintptr_t)&new_templ);
+    assert(global_dict->hashtable);
+    stm_commit_transaction();
+}
+
+
+int main(void)
+{
+    int i, status;
+    rewind_jmp_buf rjbuf;
+
+    stm_hashtable_entry_userdata = TID_DICTENTRY;
+
+    /* pick a random seed from the time in seconds.
+       A bit pointless for now... because the interleaving of the
+       threads is really random. */
+    default_seed = time(NULL);
+    printf("running with seed=%lld\n", (long long)default_seed);
+
+    status = sem_init(&done, 0, 0);
+    assert(status == 0);
+
+
+    stm_setup();
+    stm_register_thread_local(&stm_thread_local);
+    stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
+
+    setup_globals();
+
+    for (i = 0; i < NUMTHREADS; i++) {
+        newthread(demo_random, (void *)(uintptr_t)i);
+    }
+
+    for (i=0; i < NUMTHREADS; i++) {
+        status = sem_wait(&done);
+        assert(status == 0);
+        printf("thread finished\n");
+    }
+
+    printf("Test OK!\n");
+
+    stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
+    stm_unregister_thread_local(&stm_thread_local);
+    stm_teardown();
+
+    return 0;
+}
diff --git a/c7/llvmfix/README.txt b/c7/llvmfix/README.txt
--- a/c7/llvmfix/README.txt
+++ b/c7/llvmfix/README.txt
@@ -1,3 +1,22 @@
+Apply these patches to llvm, svn revision 201645,
+which you get from:
+
+ svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm -r 201645
+ cd llvm/tools
+ svn co http://llvm.org/svn/llvm-project/cfe/trunk clang -r 201645
+ cd ../..
+ cd llvm/projects
+ svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk compiler-rt -r 
201645
+ cd ../..
+ cd llvm
+ patch -p0 < ~/.../c7/llvmfix/...diff
+ # ^^^ repeat that line for all patches in this directory
+ cd ..
+ mkdir llvm-build
+ cd llvm-build
+ ../llvm/configure --enable-optimized      # requires gcc >= 4.7!
+ make
+
 
 no-introduce-bogus-cast-in-combine.diff
 
diff --git a/c7/stm/contention.c b/c7/stm/contention.c
--- a/c7/stm/contention.c
+++ b/c7/stm/contention.c
@@ -3,34 +3,50 @@
 #endif
 
 
-enum contention_kind_e {
+/* Here are the possible kinds of contention:
 
-    /* A write-write contention occurs when we running our transaction
-       and detect that we are about to write to an object that another
-       thread is also writing to.  This kind of contention must be
-       resolved before continuing.  This *must* abort one of the two
-       threads: the caller's thread is not at a safe-point, so cannot
-       wait! */
-    WRITE_WRITE_CONTENTION,
+   STM_CONTENTION_WRITE_WRITE
 
-    /* A write-read contention occurs when we are trying to commit: it
+       A write-write contention occurs when we are running our
+       transaction and detect that we are about to write to an object
+       that another thread is also writing to.  This kind of
+       contention must be resolved before continuing.  This *must*
+       abort one of the two threads: the caller's thread is not at a
+       safe-point, so cannot wait!
+
+       It is reported as a timing event with the following two markers:
+       the current thread (i.e. where the second-in-time write occurs);
+       and the other thread (from its 'modified_old_objects_markers',
+       where the first-in-time write occurred).
+
+   STM_CONTENTION_WRITE_READ
+
+       A write-read contention occurs when we are trying to commit: it
        means that an object we wrote to was also read by another
        transaction.  Even though it would seem obvious that we should
        just abort the other thread and proceed in our commit, a more
        subtle answer would be in some cases to wait for the other thread
        to commit first.  It would commit having read the old value, and
-       then we can commit our change to it. */
-    WRITE_READ_CONTENTION,
+       then we can commit our change to it.
 
-    /* An inevitable contention occurs when we're trying to become
+       It is reported as a timing event with only one marker: the
+       older location of the write that was done by the current thread.
+
+    STM_CONTENTION_INEVITABLE
+
+       An inevitable contention occurs when we're trying to become
        inevitable but another thread already is.  We can never abort the
        other thread in this case, but we still have the choice to abort
-       ourselves or pause until the other thread commits. */
-    INEVITABLE_CONTENTION,
-};
+       ourselves or pause until the other thread commits.
+
+       It is reported with two markers, one for the current thread and
+       one for the other thread.  Each marker gives the location that
+       attempts to make the transaction inevitable.
+*/
+
 
 struct contmgr_s {
-    enum contention_kind_e kind;
+    enum stm_event_e kind;
     struct stm_priv_segment_info_s *other_pseg;
     bool abort_other;
     bool try_sleep;  // XXX add a way to timeout, but should handle repeated
@@ -99,7 +115,7 @@
 
 
 static bool contention_management(uint8_t other_segment_num,
-                                  enum contention_kind_e kind,
+                                  enum stm_event_e kind,
                                   object_t *obj)
 {
     assert(_has_mutex());
@@ -109,6 +125,9 @@
     if (must_abort())
         abort_with_mutex();
 
+    /* Report the contention */
+    timing_contention(kind, other_segment_num, obj);
+
     /* Who should abort here: this thread, or the other thread? */
     struct contmgr_s contmgr;
     contmgr.kind = kind;
@@ -138,20 +157,9 @@
         contmgr.abort_other = false;
     }
 
-
-    int wait_category =
-        kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ :
-        kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE :
-        STM_TIME_WAIT_OTHER;
-
-    int abort_category =
-        kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE :
-        kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ :
-        kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE :
-        STM_TIME_RUN_ABORTED_OTHER;
-
-
-    if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION &&
+    /* Do one of three things here...
+     */
+    if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE &&
         contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) {
         others_may_have_run = true;
         /* Sleep.
@@ -164,30 +172,24 @@
              itself already paused here.
         */
         contmgr.other_pseg->signal_when_done = true;
-        marker_contention(kind, false, other_segment_num, obj);
-
-        change_timing_state(wait_category);
 
         /* tell the other to commit ASAP */
         signal_other_to_commit_soon(contmgr.other_pseg);
 
         dprintf(("pausing...\n"));
+
+        timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION);
+
         cond_signal(C_AT_SAFE_POINT);
         STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE;
         cond_wait(C_TRANSACTION_DONE);
         STM_PSEGMENT->safe_point = SP_RUNNING;
         dprintf(("pausing done\n"));
 
+        timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
+
         if (must_abort())
             abort_with_mutex();
-
-        struct stm_priv_segment_info_s *pseg =
-            get_priv_segment(STM_SEGMENT->segment_num);
-        double elapsed =
-            change_timing_state_tl(pseg->pub.running_thread,
-                                   STM_TIME_RUN_CURRENT);
-        marker_copy(pseg->pub.running_thread, pseg,
-                    wait_category, elapsed);
     }
 
     else if (!contmgr.abort_other) {
@@ -195,16 +197,16 @@
         signal_other_to_commit_soon(contmgr.other_pseg);
 
         dprintf(("abort in contention: kind %d\n", kind));
-        STM_SEGMENT->nursery_end = abort_category;
-        marker_contention(kind, false, other_segment_num, obj);
         abort_with_mutex();
     }
 
     else {
         /* We have to signal the other thread to abort, and wait until
            it does. */
-        contmgr.other_pseg->pub.nursery_end = abort_category;
-        marker_contention(kind, true, other_segment_num, obj);
+        contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT;
+
+        timing_event(STM_SEGMENT->running_thread,
+                     STM_ABORTING_OTHER_CONTENTION);
 
         int sp = contmgr.other_pseg->safe_point;
         switch (sp) {
@@ -296,7 +298,8 @@
         assert(get_priv_segment(other_segment_num)->write_lock_num ==
                prev_owner);
 
-        contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj);
+        contention_management(other_segment_num,
+                              STM_CONTENTION_WRITE_WRITE, obj);
 
         /* now we return into _stm_write_slowpath() and will try again
            to acquire the write lock on our object. */
@@ -308,10 +311,12 @@
 static bool write_read_contention_management(uint8_t other_segment_num,
                                              object_t *obj)
 {
-    return contention_management(other_segment_num, WRITE_READ_CONTENTION, 
obj);
+    return contention_management(other_segment_num,
+                                 STM_CONTENTION_WRITE_READ, obj);
 }
 
 static void inevitable_contention_management(uint8_t other_segment_num)
 {
-    contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL);
+    contention_management(other_segment_num,
+                          STM_CONTENTION_INEVITABLE, NULL);
 }
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -124,17 +124,13 @@
 
         dprintf_test(("write_slowpath %p -> mod_old\n", obj));
 
-        /* First change to this old object from this transaction.
+        /* Add the current marker, recording where we wrote to this object */
+        timing_record_write();
+
+        /* Change to this old object from this transaction.
            Add it to the list 'modified_old_objects'. */
         LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj);
 
-        /* Add the current marker, recording where we wrote to this object */
-        uintptr_t marker[2];
-        marker_fetch(STM_SEGMENT->running_thread, marker);
-        STM_PSEGMENT->modified_old_objects_markers =
-            list_append2(STM_PSEGMENT->modified_old_objects_markers,
-                         marker[0], marker[1]);
-
         release_marker_lock(STM_SEGMENT->segment_base);
 
         /* We need to privatize the pages containing the object, if they
@@ -313,44 +309,41 @@
     /* force-reset all read markers to 0 */
 
     char *readmarkers = REAL_ADDRESS(STM_SEGMENT->segment_base,
-                                     FIRST_READMARKER_PAGE * 4096UL);
+                                     FIRST_OLD_RM_PAGE * 4096UL);
+    uintptr_t num_bytes = 4096UL *
+        (NB_READMARKER_PAGES - (FIRST_OLD_RM_PAGE - FIRST_READMARKER_PAGE));
+
     dprintf(("reset_transaction_read_version: %p %ld\n", readmarkers,
-             (long)(NB_READMARKER_PAGES * 4096UL)));
-    if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL,
+             (long)num_bytes));
+
+    if (mmap(readmarkers, num_bytes,
              PROT_READ | PROT_WRITE,
-             MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) {
-        /* fall-back */
-#if STM_TESTS
+             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+             -1, 0) != readmarkers) {
+        /* failed */
         stm_fatalerror("reset_transaction_read_version: %m");
-#endif
-        memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL);
     }
     STM_SEGMENT->transaction_read_version = 1;
 }
 
-static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable)
+static uint64_t _global_start_time = 0;
+
+static void _stm_start_transaction(stm_thread_local_t *tl)
 {
     assert(!_stm_in_transaction(tl));
 
-  retry:
-    if (inevitable) {
-        wait_for_end_of_inevitable_transaction(tl);
-    }
-
-    if (!acquire_thread_segment(tl))
-        goto retry;
+    while (!acquire_thread_segment(tl))
+        ;
     /* GS invalid before this point! */
 
     assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION);
     assert(STM_PSEGMENT->transaction_state == TS_NONE);
-    change_timing_state(STM_TIME_RUN_CURRENT);
-    STM_PSEGMENT->start_time = tl->_timing_cur_start;
+    timing_event(tl, STM_TRANSACTION_START);
+    STM_PSEGMENT->start_time = _global_start_time++;
     STM_PSEGMENT->signalled_to_commit_soon = false;
     STM_PSEGMENT->safe_point = SP_RUNNING;
-    STM_PSEGMENT->marker_inev[1] = 0;
-    if (inevitable)
-        marker_fetch_inev();
-    STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : 
TS_REGULAR);
+    STM_PSEGMENT->marker_inev.object = NULL;
+    STM_PSEGMENT->transaction_state = TS_REGULAR;
 #ifndef NDEBUG
     STM_PSEGMENT->running_pthread = pthread_self();
 #endif
@@ -376,13 +369,16 @@
 
     assert(list_is_empty(STM_PSEGMENT->modified_old_objects));
     assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers));
+    assert(list_is_empty(STM_PSEGMENT->modified_old_hashtables));
     assert(list_is_empty(STM_PSEGMENT->young_weakrefs));
     assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery));
     assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows));
     assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0]));
     assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
+    assert(list_is_empty(STM_PSEGMENT->young_objects_with_light_finalizers));
     assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL);
     assert(STM_PSEGMENT->large_overflow_objects == NULL);
+    assert(STM_PSEGMENT->finalizers == NULL);
 #ifndef NDEBUG
     /* this should not be used when objects_pointing_to_nursery == NULL */
     STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L;
@@ -399,14 +395,21 @@
 #else
     long repeat_count = stm_rewind_jmp_setjmp(tl);
 #endif
-    _stm_start_transaction(tl, false);
+    _stm_start_transaction(tl);
     return repeat_count;
 }
 
 void stm_start_inevitable_transaction(stm_thread_local_t *tl)
 {
-    s_mutex_lock();
-    _stm_start_transaction(tl, true);
+    /* used to be more efficient, starting directly an inevitable transaction,
+       but there is no real point any more, I believe */
+    rewind_jmp_buf rjbuf;
+    stm_rewind_jmp_enterframe(tl, &rjbuf);
+
+    stm_start_transaction(tl);
+    stm_become_inevitable(tl, "start_inevitable_transaction");
+
+    stm_rewind_jmp_leaveframe(tl, &rjbuf);
 }
 
 
@@ -431,27 +434,50 @@
             continue;    /* no need to check: is pending immediate abort */
 
         char *remote_base = get_segment_base(i);
-        uint8_t remote_version = get_segment(i)->transaction_read_version;
+        object_t *conflicting_obj;
+        uintptr_t j, limit;
+        struct list_s *lst;
 
-        LIST_FOREACH_R(
-            STM_PSEGMENT->modified_old_objects,
-            object_t * /*item*/,
-            ({
-                if (was_read_remote(remote_base, item, remote_version)) {
-                    /* A write-read conflict! */
-                    dprintf(("write-read conflict on %p, our seg: %d, other: 
%ld\n",
-                             item, STM_SEGMENT->segment_num, i));
-                    if (write_read_contention_management(i, item)) {
-                        /* If we reach this point, we didn't abort, but we
-                           had to wait for the other thread to commit.  If we
-                           did, then we have to restart committing from our 
call
-                           to synchronize_all_threads(). */
-                        return true;
-                    }
-                    /* we aborted the other transaction without waiting, so
-                       we can just continue */
-                }
-            }));
+        /* Look in forward order: this is an attempt to report the _first_
+           write that conflicts with another segment's reads
+        */
+        lst = STM_PSEGMENT->modified_old_objects;
+        limit = list_count(lst);
+        for (j = 0; j < limit; j++) {
+            object_t *obj = (object_t *)list_item(lst, j);
+            if (was_read_remote(remote_base, obj)) {
+                conflicting_obj = obj;
+                goto found_conflict;
+            }
+        }
+
+        lst = STM_PSEGMENT->modified_old_hashtables;
+        limit = list_count(lst);
+        for (j = 0; j < limit; j += 2) {
+            object_t *hobj = (object_t *)list_item(lst, j);
+            if (was_read_remote(remote_base, hobj)) {
+                conflicting_obj = (object_t *)list_item(lst, j + 1);
+                goto found_conflict;
+            }
+        }
+
+        continue;
+
+     found_conflict:
+        /* A write-read conflict! */
+        dprintf(("write-read conflict on %p, our seg: %d, other: %ld\n",
+                 conflicting_obj, STM_SEGMENT->segment_num, i));
+        if (write_read_contention_management(i, conflicting_obj)) {
+            /* If we reach this point, we didn't abort, but we
+               had to wait for the other thread to commit.  If we
+               did, then we have to restart committing from our call
+               to synchronize_all_threads(). */
+            return true;
+        }
+        /* we aborted the other transaction without waiting, so we can
+           just ignore the rest of this (now aborted) segment.  Let's
+           move on to the next one. */
+        continue;
     }
 
     return false;
@@ -588,7 +614,6 @@
     uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj);
     uintptr_t card_index = 1;
     uintptr_t last_card_index = get_index_to_card_index(real_idx_count - 1); 
/* max valid index */
-    long myself = STM_SEGMENT->segment_num;
 
     /* simple heuristic to check if probably the whole object is
        marked anyway so we should do page-wise synchronize */
@@ -662,7 +687,7 @@
             /* dprintf(("copy %lu bytes\n", copy_size)); */
 
             /* since we have marked cards, at least one page here must be 
private */
-            assert(_has_private_page_in_range(myself, start, copy_size));
+            assert(_has_private_page_in_range(STM_SEGMENT->segment_num, start, 
copy_size));
 
             /* push to seg0 and enqueue for synchronization */
             _synchronize_fragment((stm_char *)start, copy_size);
@@ -806,15 +831,16 @@
     synchronize_objects_flush();
     list_clear(STM_PSEGMENT->modified_old_objects);
     list_clear(STM_PSEGMENT->modified_old_objects_markers);
+    list_clear(STM_PSEGMENT->modified_old_hashtables);
 }
 
-static void _finish_transaction(int attribute_to)
+static void _finish_transaction(enum stm_event_e event)
 {
     STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
     STM_PSEGMENT->transaction_state = TS_NONE;
 
     /* marker_inev is not needed anymore */
-    STM_PSEGMENT->marker_inev[1] = 0;
+    STM_PSEGMENT->marker_inev.object = NULL;
 
     /* reset these lists to NULL for the next transaction */
     
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
@@ -822,24 +848,24 @@
     list_clear(STM_PSEGMENT->old_objects_with_cards);
     LIST_FREE(STM_PSEGMENT->large_overflow_objects);
 
-    timing_end_transaction(attribute_to);
+    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+    timing_event(tl, event);
 
-    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
     release_thread_segment(tl);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 }
 
 void stm_commit_transaction(void)
 {
+ restart_all:
+    exec_local_finalizers();
+
     assert(!_has_mutex());
     assert(STM_PSEGMENT->safe_point == SP_RUNNING);
     assert(STM_PSEGMENT->running_pthread == pthread_self());
 
     minor_collection(/*commit=*/ true);
 
-    /* the call to minor_collection() above leaves us with
-       STM_TIME_BOOKKEEPING */
-
     /* synchronize overflow objects living in privatized pages */
     push_overflow_objects_from_privatized_pages();
 
@@ -851,6 +877,11 @@
        Important: we should not call cond_wait() in the meantime. */
     synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
 
+    if (any_local_finalizers()) {
+        s_mutex_unlock();
+        goto restart_all;
+    }
+
     /* detect conflicts */
     if (detect_write_read_conflicts())
         goto restart;
@@ -863,15 +894,17 @@
 
     /* if a major collection is required, do it here */
     if (is_major_collection_requested()) {
-        int oldstate = change_timing_state(STM_TIME_MAJOR_GC);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
         major_collection_now_at_safe_point();
-        change_timing_state(oldstate);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
     }
 
     /* synchronize modified old objects to other threads */
     push_modified_to_other_segments();
     
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
 
+    commit_finalizers();
+
     /* update 'overflow_number' if needed */
     if (STM_PSEGMENT->overflow_number_has_been_used) {
         highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0;
@@ -892,10 +925,13 @@
     }
 
     /* done */
-    _finish_transaction(STM_TIME_RUN_COMMITTED);
+    stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+    _finish_transaction(STM_TRANSACTION_COMMIT);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 
     s_mutex_unlock();
+
+    invoke_general_finalizers(tl);
 }
 
 void stm_abort_transaction(void)
@@ -957,6 +993,7 @@
 
     list_clear(pseg->modified_old_objects);
     list_clear(pseg->modified_old_objects_markers);
+    list_clear(pseg->modified_old_hashtables);
 }
 
 static void abort_data_structures_from_segment_num(int segment_num)
@@ -985,9 +1022,7 @@
                        (int)pseg->transaction_state);
     }
 
-    /* if we don't have marker information already, look up and preserve
-       the marker information from the shadowstack as a string */
-    marker_default_for_abort(pseg);
+    abort_finalizers(pseg);
 
     /* throw away the content of the nursery */
     long bytes_in_nursery = throw_away_nursery(pseg);
@@ -1077,16 +1112,13 @@
     /* invoke the callbacks */
     invoke_and_clear_user_callbacks(1);   /* for abort */
 
-    int attribute_to = STM_TIME_RUN_ABORTED_OTHER;
-
     if (is_abort(STM_SEGMENT->nursery_end)) {
         /* done aborting */
-        attribute_to = STM_SEGMENT->nursery_end;
         STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE
                                                    : NURSERY_END;
     }
 
-    _finish_transaction(attribute_to);
+    _finish_transaction(STM_TRANSACTION_ABORT);
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 
     /* Broadcast C_ABORTED to wake up contention.c */
@@ -1128,8 +1160,10 @@
     if (STM_PSEGMENT->transaction_state == TS_REGULAR) {
         dprintf(("become_inevitable: %s\n", msg));
 
-        marker_fetch_inev();
-        wait_for_end_of_inevitable_transaction(NULL);
+        timing_fetch_inev();
+        write_fence();    /* make sure others see a correct 'marker_inev'
+                             if they see TS_INEVITABLE */
+        wait_for_end_of_inevitable_transaction();
         STM_PSEGMENT->transaction_state = TS_INEVITABLE;
         stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
         invoke_and_clear_user_callbacks(0);   /* for commit */
@@ -1150,3 +1184,23 @@
     synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE);
     s_mutex_unlock();
 }
+
+void stm_stop_all_other_threads(void)
+{
+    if (!stm_is_inevitable())         /* may still abort */
+        _stm_become_inevitable("stop_all_other_threads");
+
+    s_mutex_lock();
+    synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE);
+    s_mutex_unlock();
+}
+
+void stm_resume_all_other_threads(void)
+{
+    /* this calls 'committed_globally_unique_transaction()' even though
+       we're not committing now.  It's a way to piggyback on the existing
+       implementation for stm_become_globally_unique_transaction(). */
+    s_mutex_lock();
+    committed_globally_unique_transaction();
+    s_mutex_unlock();
+}
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -43,13 +43,18 @@
     */
     GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER,
 
+    /* This flag is set by gcpage.c for all objects living in
+       uniformly-sized pages of small objects.
+    */
+    GCFLAG_SMALL_UNIFORM = 0x02,
+
     /* The following flag is set on nursery objects of which we asked
        the id or the identityhash.  It means that a space of the size of
        the object has already been allocated in the nonmovable part.
        The same flag is abused to mark prebuilt objects whose hash has
        been taken during translation and is statically recorded just
        after the object. */
-    GCFLAG_HAS_SHADOW = 0x02,
+    GCFLAG_HAS_SHADOW = 0x04,
 
     /* Set on objects that are large enough (_STM_MIN_CARD_OBJ_SIZE)
        to have multiple cards (at least _STM_MIN_CARD_COUNT), and that
@@ -64,7 +69,7 @@
        current transaction that have been flushed out of the nursery,
        which occurs if the same transaction allocates too many objects.
     */
-    GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8   /* must be last */
+    GCFLAG_OVERFLOW_NUMBER_bit0 = 0x10   /* must be last */
 };
 
 #define SYNC_QUEUE_SIZE    31
@@ -92,6 +97,15 @@
     struct list_s *modified_old_objects_markers;
     uintptr_t modified_old_objects_markers_num_old;
 
+    /* This list contains all old hashtables that have entries that we
+       modified.  It's actually a list of pairs hashtable/sample_entry.
+       Note that several transactions can all commit if
+       they have the same hashtable listed here.  The point of this
+       list is only that if another segment does a global "read" of
+       the hashtable (stm_hashtable_list), then it conflicts with this
+       segment if it has got any change to the hashtable. */
+    struct list_s *modified_old_hashtables;
+
     /* List of out-of-nursery objects that may contain pointers to
        nursery objects.  This is used to track the GC status: they are
        all objects outside the nursery on which an stm_write() occurred
@@ -135,7 +149,7 @@
 
     /* Start time: to know approximately for how long a transaction has
        been running, in contention management */
-    double start_time;
+    uint64_t start_time;
 
     /* This is the number stored in the overflowed objects (a multiple of
        GCFLAG_OVERFLOW_NUMBER_bit0).  It is incremented when the
@@ -202,10 +216,15 @@
     int sq_fragsizes[SYNC_QUEUE_SIZE];
     int sq_len;
 
-    /* Temporarily stores the marker information */
-    char marker_self[_STM_MARKER_LEN];
-    char marker_other[_STM_MARKER_LEN];
-    uintptr_t marker_inev[2];  /* marker where this thread became inevitable */
+    /* marker where this thread became inevitable */
+    stm_loc_marker_t marker_inev;
+
+    /* light finalizers */
+    struct list_s *young_objects_with_light_finalizers;
+    struct list_s *old_objects_with_light_finalizers;
+
+    /* regular finalizers (objs from the current transaction only) */
+    struct finalizers_s *finalizers;
 };
 
 enum /* safe_point */ {
@@ -285,9 +304,11 @@
 static stm_thread_local_t *abort_with_mutex_no_longjmp(void);
 static void abort_data_structures_from_segment_num(int segment_num);
 
-static inline bool was_read_remote(char *base, object_t *obj,
-                                   uint8_t other_transaction_read_version)
+static inline bool was_read_remote(char *base, object_t *obj)
 {
+    uint8_t other_transaction_read_version =
+        ((struct stm_segment_info_s *)REAL_ADDRESS(base, STM_PSEGMENT))
+            ->transaction_read_version;
     uint8_t rm = ((struct stm_read_marker_s *)
                   (base + (((uintptr_t)obj) >> 4)))->rm;
     assert(rm <= other_transaction_read_version);
diff --git a/c7/stm/extra.c b/c7/stm/extra.c
--- a/c7/stm/extra.c
+++ b/c7/stm/extra.c
@@ -6,15 +6,24 @@
 static long register_callbacks(stm_thread_local_t *tl,
                                void *key, void callback(void *), long index)
 {
-    if (!_stm_in_transaction(tl)) {
-        /* check that the current thread-local is really running a
+    dprintf(("register_callbacks: tl=%p key=%p callback=%p index=%ld\n",
+             tl, key, callback, index));
+    if (tl->associated_segment_num == -1) {
+        /* check that the provided thread-local is really running a
            transaction, and do nothing otherwise. */
+        dprintf(("  NOT IN TRANSACTION\n"));
         return -1;
     }
-
-    if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
+    /* The tl was only here to check that.  We're really using
+       STM_PSEGMENT below, which is often but not always the
+       segment corresponding to the tl.  One case where it's not
+       the case is if this gets called from stmcb_light_finalizer()
+       from abort_finalizers() from major collections or contention.
+    */
+    if (STM_PSEGMENT->transaction_state != TS_REGULAR) {
         /* ignore callbacks if we're in an inevitable transaction
-           (which cannot abort) */
+           (which cannot abort) or no transaction at all in this segment */
+        dprintf(("  STATE = %d\n", (int)STM_PSEGMENT->transaction_state));
         return -1;
     }
 
@@ -23,10 +32,13 @@
 
     if (callback == NULL) {
         /* double-unregistering works, but return 0 */
-        return tree_delete_item(callbacks, (uintptr_t)key);
+        long res = tree_delete_item(callbacks, (uintptr_t)key);
+        dprintf(("  DELETED %ld\n", res));
+        return res;
     }
     else {
         /* double-registering the same key will crash */
+        dprintf(("  INSERTING\n"));
         tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback);
         return 1;
     }
@@ -39,6 +51,7 @@
     if (result < 0 && callback != NULL) {
         /* no regular transaction running, invoke the callback
            immediately */
+        dprintf(("stm_call_on_commit calls now: %p(%p)\n", callback, key));
         callback(key);
     }
     return result;
@@ -72,8 +85,11 @@
         assert(key != NULL);
         assert(callback != NULL);
 
-        /* The callback may call stm_call_on_abort(key, NULL).  It is ignored,
-           because 'callbacks_on_commit_and_abort' was cleared already. */
+        /* The callback may call stm_call_on_abort(key, NULL)
+           (so with callback==NULL).  It is ignored, because
+           'callbacks_on_commit_and_abort' was cleared already. */
+        dprintf(("invoke_and_clear_user_callbacks(%ld): %p(%p)\n",
+                 index, callback, key));
         callback(key);
 
     } TREE_LOOP_END;
diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c
new file mode 100644
--- /dev/null
+++ b/c7/stm/finalizer.c
@@ -0,0 +1,473 @@
+
+
+/* callbacks */
+void (*stmcb_light_finalizer)(object_t *);
+void (*stmcb_finalizer)(object_t *);
+
+
+static void init_finalizers(struct finalizers_s *f)
+{
+    f->objects_with_finalizers = list_create();
+    f->count_non_young = 0;
+    f->run_finalizers = NULL;
+    f->running_next = NULL;
+}
+
+static void setup_finalizer(void)
+{
+    init_finalizers(&g_finalizers);
+}
+
+static void teardown_finalizer(void)
+{
+    if (g_finalizers.run_finalizers != NULL)
+        list_free(g_finalizers.run_finalizers);
+    list_free(g_finalizers.objects_with_finalizers);
+    memset(&g_finalizers, 0, sizeof(g_finalizers));
+}
+
+static void _commit_finalizers(void)
+{
+    if (STM_PSEGMENT->finalizers->run_finalizers != NULL) {
+        /* copy 'STM_PSEGMENT->finalizers->run_finalizers' into
+           'g_finalizers.run_finalizers', dropping any initial NULLs
+           (finalizers already called) */
+        struct list_s *src = STM_PSEGMENT->finalizers->run_finalizers;
+        uintptr_t frm = 0;
+        if (STM_PSEGMENT->finalizers->running_next != NULL) {
+            frm = *STM_PSEGMENT->finalizers->running_next;
+            assert(frm <= list_count(src));
+            *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1;
+        }
+        if (frm < list_count(src)) {
+            if (g_finalizers.run_finalizers == NULL)
+                g_finalizers.run_finalizers = list_create();
+            g_finalizers.run_finalizers = list_extend(
+                g_finalizers.run_finalizers,
+                src, frm);
+        }
+        list_free(src);
+    }
+
+    /* copy the whole 'STM_PSEGMENT->finalizers->objects_with_finalizers'
+       into 'g_finalizers.objects_with_finalizers' */
+    g_finalizers.objects_with_finalizers = list_extend(
+        g_finalizers.objects_with_finalizers,
+        STM_PSEGMENT->finalizers->objects_with_finalizers, 0);
+    list_free(STM_PSEGMENT->finalizers->objects_with_finalizers);
+
+    free(STM_PSEGMENT->finalizers);
+    STM_PSEGMENT->finalizers = NULL;
+}
+
+static void abort_finalizers(struct stm_priv_segment_info_s *pseg)
+{
+    /* like _commit_finalizers(), but forget everything from the
+       current transaction */
+    if (pseg->finalizers != NULL) {
+        if (pseg->finalizers->run_finalizers != NULL) {
+            if (pseg->finalizers->running_next != NULL) {
+                *pseg->finalizers->running_next = (uintptr_t)-1;
+            }
+            list_free(pseg->finalizers->run_finalizers);
+        }
+        list_free(pseg->finalizers->objects_with_finalizers);
+        free(pseg->finalizers);
+        pseg->finalizers = NULL;
+    }
+
+    /* call the light finalizers for objects that are about to
+       be forgotten from the current transaction */
+    char *old_gs_register = STM_SEGMENT->segment_base;
+    bool must_fix_gs = old_gs_register != pseg->pub.segment_base;
+
+    struct list_s *lst = pseg->young_objects_with_light_finalizers;
+    long i, count = list_count(lst);
+    if (lst > 0) {
+        for (i = 0; i < count; i++) {
+            object_t *obj = (object_t *)list_item(lst, i);
+            assert(_is_young(obj));
+            if (must_fix_gs) {
+                set_gs_register(pseg->pub.segment_base);
+                must_fix_gs = false;
+            }
+            stmcb_light_finalizer(obj);
+        }
+        list_clear(lst);
+    }
+
+    /* also deals with overflow objects: they are at the tail of
+       old_objects_with_light_finalizers (this list is kept in order
+       and we cannot add any already-committed object) */
+    lst = pseg->old_objects_with_light_finalizers;
+    count = list_count(lst);
+    while (count > 0) {
+        object_t *obj = (object_t *)list_item(lst, --count);
+        if (!IS_OVERFLOW_OBJ(pseg, obj))
+            break;
+        lst->count = count;
+        if (must_fix_gs) {
+            set_gs_register(pseg->pub.segment_base);
+            must_fix_gs = false;
+        }
+        stmcb_light_finalizer(obj);
+    }
+
+    if (STM_SEGMENT->segment_base != old_gs_register)
+        set_gs_register(old_gs_register);
+}
+
+
+void stm_enable_light_finalizer(object_t *obj)
+{
+    if (_is_young(obj)) {
+        LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj);
+    }
+    else {
+        assert(_is_from_same_transaction(obj));
+        LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj);
+    }
+}
+
+object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up)
+{
+    object_t *obj = _stm_allocate_external(size_rounded_up);
+
+    if (STM_PSEGMENT->finalizers == NULL) {
+        struct finalizers_s *f = malloc(sizeof(struct finalizers_s));
+        if (f == NULL)
+            stm_fatalerror("out of memory in create_finalizers");   /* XXX */
+        init_finalizers(f);
+        STM_PSEGMENT->finalizers = f;
+    }
+    assert(STM_PSEGMENT->finalizers->count_non_young
+           <= list_count(STM_PSEGMENT->finalizers->objects_with_finalizers));
+    LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj);
+    return obj;
+}
+
+
+/************************************************************/
+/*  Light finalizers
+*/
+
+static void deal_with_young_objects_with_finalizers(void)
+{
+    /* for light finalizers */
+    struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers;
+    long i, count = list_count(lst);
+    for (i = 0; i < count; i++) {
+        object_t *obj = (object_t *)list_item(lst, i);
+        assert(_is_young(obj));
+
+        object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj;
+        if (pforwarded_array[0] != GCWORD_MOVED) {
+            /* not moved: the object dies */
+            stmcb_light_finalizer(obj);
+        }
+        else {
+            obj = pforwarded_array[1]; /* moved location */
+            assert(!_is_young(obj));
+            LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj);
+        }
+    }
+    list_clear(lst);
+}
+
+static void deal_with_old_objects_with_finalizers(void)
+{
+    /* for light finalizers */
+    int old_gs_register = STM_SEGMENT->segment_num;
+    int current_gs_register = old_gs_register;
+    long j;
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+
+        struct list_s *lst = pseg->old_objects_with_light_finalizers;
+        long i, count = list_count(lst);
+        lst->count = 0;
+        for (i = 0; i < count; i++) {
+            object_t *obj = (object_t *)list_item(lst, i);
+            if (!mark_visited_test(obj)) {
+                /* not marked: object dies */
+                /* we're calling the light finalizer in the same
+                   segment as where it was originally registered.  For
+                   objects that existed since a long time, it doesn't
+                   change anything: any thread should see the same old
+                   content (because if it wasn't the case, the object
+                   would be in a 'modified_old_objects' list
+                   somewhere, and so it wouldn't be dead).  But it's
+                   important if the object was created by the same
+                   transaction: then only that segment sees valid
+                   content.
+                */
+                if (j != current_gs_register) {
+                    set_gs_register(get_segment_base(j));
+                    current_gs_register = j;
+                }
+                stmcb_light_finalizer(obj);
+            }
+            else {
+                /* object survives */
+                list_set_item(lst, lst->count++, (uintptr_t)obj);
+            }
+        }
+    }
+    if (old_gs_register != current_gs_register)
+        set_gs_register(get_segment_base(old_gs_register));
+}
+
+
+/************************************************************/
+/*  Algorithm for regular (non-light) finalizers.
+    Follows closely pypy/doc/discussion/finalizer-order.rst
+    as well as rpython/memory/gc/minimark.py.
+*/
+
+static inline int _finalization_state(object_t *obj)
+{
+    /* Returns the state, "0", 1, 2 or 3, as per finalizer-order.rst.
+       One difference is that the official state 0 is returned here
+       as a number that is <= 0. */
+    uintptr_t lock_idx = mark_loc(obj);
+    return write_locks[lock_idx] - (WL_FINALIZ_ORDER_1 - 1);
+}
+
+static void _bump_finalization_state_from_0_to_1(object_t *obj)
+{
+    uintptr_t lock_idx = mark_loc(obj);
+    assert(write_locks[lock_idx] < WL_FINALIZ_ORDER_1);
+    write_locks[lock_idx] = WL_FINALIZ_ORDER_1;
+}
+
+static struct list_s *_finalizer_tmpstack;
+static struct list_s *_finalizer_emptystack;
+static struct list_s *_finalizer_pending;
+
+static inline void _append_to_finalizer_tmpstack(object_t **pobj)
+{
+    object_t *obj = *pobj;
+    if (obj != NULL)
+        LIST_APPEND(_finalizer_tmpstack, obj);
+}
+
+static inline struct list_s *finalizer_trace(char *base, object_t *obj,
+                                             struct list_s *lst)
+{
+    struct object_s *realobj = (struct object_s *)REAL_ADDRESS(base, obj);
+    _finalizer_tmpstack = lst;
+    stmcb_trace(realobj, &_append_to_finalizer_tmpstack);
+    return _finalizer_tmpstack;
+}
+
+static void _recursively_bump_finalization_state(char *base, object_t *obj,
+                                                 int to_state)
+{
+    struct list_s *tmpstack = _finalizer_emptystack;
+    assert(list_is_empty(tmpstack));
+
+    while (1) {
+        if (_finalization_state(obj) == to_state - 1) {
+            /* bump to the next state */
+            write_locks[mark_loc(obj)]++;
+
+            /* trace */
+            tmpstack = finalizer_trace(base, obj, tmpstack);
+        }
+
+        if (list_is_empty(tmpstack))
+            break;
+
+        obj = (object_t *)list_pop_item(tmpstack);
+    }
+    _finalizer_emptystack = tmpstack;
+}
+
+static struct list_s *mark_finalize_step1(char *base, struct finalizers_s *f)
+{
+    if (f == NULL)
+        return NULL;
+
+    struct list_s *marked = list_create();
+
+    struct list_s *lst = f->objects_with_finalizers;
+    long i, count = list_count(lst);
+    lst->count = 0;
+    f->count_non_young = 0;
+
+    for (i = 0; i < count; i++) {
+        object_t *x = (object_t *)list_item(lst, i);
+
+        assert(_finalization_state(x) != 1);
+        if (_finalization_state(x) >= 2) {
+            list_set_item(lst, lst->count++, (uintptr_t)x);
+            continue;
+        }
+        LIST_APPEND(marked, x);
+
+        struct list_s *pending = _finalizer_pending;
+        LIST_APPEND(pending, x);
+        while (!list_is_empty(pending)) {
+            object_t *y = (object_t *)list_pop_item(pending);
+            int state = _finalization_state(y);
+            if (state <= 0) {
+                _bump_finalization_state_from_0_to_1(y);
+                pending = finalizer_trace(base, y, pending);
+            }
+            else if (state == 2) {
+                _recursively_bump_finalization_state(base, y, 3);
+            }
+        }
+        _finalizer_pending = pending;
+        assert(_finalization_state(x) == 1);
+        _recursively_bump_finalization_state(base, x, 2);
+    }
+    return marked;
+}
+
+static void mark_finalize_step2(char *base, struct finalizers_s *f,
+                                struct list_s *marked)
+{
+    if (f == NULL)
+        return;
+
+    struct list_s *run_finalizers = f->run_finalizers;
+
+    long i, count = list_count(marked);
+    for (i = 0; i < count; i++) {
+        object_t *x = (object_t *)list_item(marked, i);
+
+        int state = _finalization_state(x);
+        assert(state >= 2);
+        if (state == 2) {
+            if (run_finalizers == NULL)
+                run_finalizers = list_create();
+            LIST_APPEND(run_finalizers, x);
+            _recursively_bump_finalization_state(base, x, 3);
+        }
+        else {
+            struct list_s *lst = f->objects_with_finalizers;
+            list_set_item(lst, lst->count++, (uintptr_t)x);
+        }
+    }
+    list_free(marked);
+
+    f->run_finalizers = run_finalizers;
+}
+
+static void deal_with_objects_with_finalizers(void)
+{
+    /* for non-light finalizers */
+
+    /* there is one 'objects_with_finalizers' list per segment.
+       Objects that die at a major collection running in the same
+       transaction as they were created will be put in the
+       'run_finalizers' list of that segment.  Objects that survive at
+       least one commit move to the global g_objects_with_finalizers,
+       and when they die they go to g_run_finalizers.  The former kind
+       of dying object must have its finalizer called in the correct
+       thread; the latter kind can be called in any thread, through
+       any segment, because they all should see the same old content
+       anyway.  (If the content was different between segments at this
+       point, the object would be in a 'modified_old_objects' list
+       somewhere, and so it wouldn't be dead).
+    */
+    struct list_s *marked_seg[NB_SEGMENTS + 1];
+    LIST_CREATE(_finalizer_emptystack);
+    LIST_CREATE(_finalizer_pending);
+
+    long j;
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+        marked_seg[j] = mark_finalize_step1(pseg->pub.segment_base,
+                                            pseg->finalizers);
+    }
+    marked_seg[0] = mark_finalize_step1(stm_object_pages, &g_finalizers);
+
+    LIST_FREE(_finalizer_pending);
+
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+        mark_finalize_step2(pseg->pub.segment_base, pseg->finalizers,
+                            marked_seg[j]);
+    }
+    mark_finalize_step2(stm_object_pages, &g_finalizers, marked_seg[0]);
+
+    LIST_FREE(_finalizer_emptystack);
+}
+
+static void mark_visit_from_finalizer1(char *base, struct finalizers_s *f)
+{
+    if (f != NULL && f->run_finalizers != NULL) {
+        LIST_FOREACH_R(f->run_finalizers, object_t * /*item*/,
+                       mark_visit_object(item, base));
+    }
+}
+
+static void mark_visit_from_finalizer_pending(void)
+{
+    long j;
+    for (j = 1; j <= NB_SEGMENTS; j++) {
+        struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+        mark_visit_from_finalizer1(pseg->pub.segment_base, pseg->finalizers);
+    }
+    mark_visit_from_finalizer1(stm_object_pages, &g_finalizers);
+}
+
+static void _execute_finalizers(struct finalizers_s *f)
+{
+    if (f->run_finalizers == NULL)
+        return;   /* nothing to do */
+
+ restart:
+    if (f->running_next != NULL)
+        return;   /* in a nested invocation of execute_finalizers() */
+
+    uintptr_t next = 0, total = list_count(f->run_finalizers);
+    f->running_next = &next;
+
+    while (next < total) {
+        object_t *obj = (object_t *)list_item(f->run_finalizers, next);
+        list_set_item(f->run_finalizers, next, 0);
+        next++;
+
+        stmcb_finalizer(obj);
+    }
+    if (next == (uintptr_t)-1) {
+        /* transaction committed: the whole 'f' was freed */
+        return;
+    }
+    f->running_next = NULL;
+
+    if (f->run_finalizers->count > total) {
+        memmove(f->run_finalizers->items,
+                f->run_finalizers->items + total,
+                (f->run_finalizers->count - total) * sizeof(uintptr_t));
+        goto restart;
+    }
+
+    LIST_FREE(f->run_finalizers);
+}
+
+static void _invoke_general_finalizers(stm_thread_local_t *tl)
+{
+    /* called between transactions */
+    static int lock = 0;
+
+    if (__sync_lock_test_and_set(&lock, 1) != 0) {
+        /* can't acquire the lock: someone else is likely already
+           running this function, so don't wait. */
+        return;
+    }
+
+    rewind_jmp_buf rjbuf;
+    stm_rewind_jmp_enterframe(tl, &rjbuf);
+    stm_start_transaction(tl);
+
+    _execute_finalizers(&g_finalizers);
+
+    stm_commit_transaction();
+    stm_rewind_jmp_leaveframe(tl, &rjbuf);
+
+    __sync_lock_release(&lock);
+}
diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h
new file mode 100644
--- /dev/null
+++ b/c7/stm/finalizer.h
@@ -0,0 +1,43 @@
+
+struct finalizers_s {
+    struct list_s *objects_with_finalizers;
+    uintptr_t count_non_young;
+    struct list_s *run_finalizers;
+    uintptr_t *running_next;
+};
+
+static void mark_visit_from_finalizer_pending(void);
+static void deal_with_young_objects_with_finalizers(void);
+static void deal_with_old_objects_with_finalizers(void);
+static void deal_with_objects_with_finalizers(void);
+
+static void setup_finalizer(void);
+static void teardown_finalizer(void);
+
+static void _commit_finalizers(void);
+static void abort_finalizers(struct stm_priv_segment_info_s *);
+
+#define commit_finalizers()   do {              \
+    if (STM_PSEGMENT->finalizers != NULL)       \
+        _commit_finalizers();                   \
+} while (0)
+
+
+/* regular finalizers (objs from already-committed transactions) */
+static struct finalizers_s g_finalizers;
+
+static void _invoke_general_finalizers(stm_thread_local_t *tl);
+
+#define invoke_general_finalizers(tl)    do {   \
+    if (g_finalizers.run_finalizers != NULL)    \
+        _invoke_general_finalizers(tl);         \
+} while (0)
+
+static void _execute_finalizers(struct finalizers_s *f);
+
+#define any_local_finalizers() (STM_PSEGMENT->finalizers != NULL &&         \
+                               STM_PSEGMENT->finalizers->run_finalizers != 
NULL)
+#define exec_local_finalizers()  do {                   \
+    if (any_local_finalizers())                         \
+        _execute_finalizers(STM_PSEGMENT->finalizers);  \
+} while (0)
diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c
--- a/c7/stm/forksupport.c
+++ b/c7/stm/forksupport.c
@@ -12,16 +12,6 @@
 static stm_thread_local_t *fork_this_tl;
 static bool fork_was_in_transaction;
 
-static bool page_is_null(char *p)
-{
-    long *q = (long *)p;
-    long i;
-    for (i = 0; i < 4096 / sizeof(long); i++)
-        if (q[i] != 0)
-            return false;
-    return true;
-}
-
 
 static void forksupport_prepare(void)
 {
@@ -55,14 +45,12 @@
     s_mutex_unlock();
 
     bool was_in_transaction = _stm_in_transaction(this_tl);
-    if (was_in_transaction) {
-        stm_become_inevitable(this_tl, "fork");
-        /* Note that the line above can still fail and abort, which should
-           be fine */
-    }
-    else {
-        stm_start_inevitable_transaction(this_tl);
-    }
+    if (!was_in_transaction)
+        stm_start_transaction(this_tl);
+
+    stm_become_inevitable(this_tl, "fork");
+    /* Note that the line above can still fail and abort, which should
+       be fine */
 
     s_mutex_lock();
     synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
@@ -73,27 +61,6 @@
     int big_copy_fd;
     char *big_copy = setup_mmap("stmgc's fork support", &big_copy_fd);
 
-    /* Copy each of the segment infos into the new mmap, nurseries,
-       and associated read markers
-     */
-    long i;
-    for (i = 1; i <= NB_SEGMENTS; i++) {
-        char *src, *dst;
-        struct stm_priv_segment_info_s *psrc = get_priv_segment(i);
-        dst = big_copy + (((char *)psrc) - stm_object_pages);
-        *(struct stm_priv_segment_info_s *)dst = *psrc;
-
-        src = get_segment_base(i) + FIRST_READMARKER_PAGE * 4096UL;
-        dst = big_copy + (src - stm_object_pages);
-        long j;
-        for (j = 0; j < END_NURSERY_PAGE - FIRST_READMARKER_PAGE; j++) {
-            if (!page_is_null(src))
-                pagecopy(dst, src);
-            src += 4096;
-            dst += 4096;
-        }
-    }
-
     /* Copy all the data from the two ranges of objects (large, small)
        into the new mmap
     */
@@ -187,7 +154,6 @@
 #ifndef NDEBUG
     pr->running_pthread = pthread_self();
 #endif
-    strcpy(pr->marker_self, "fork");
     tl->shadowstack = NULL;
     pr->shadowstack_at_start_of_transaction = NULL;
     stm_rewind_jmp_forget(tl);
@@ -204,16 +170,24 @@
        just release these locks early */
     s_mutex_unlock();
 
-    /* Move the copy of the mmap over the old one, overwriting it
-       and thus freeing the old mapping in this process
+    /* Move the copy of the mmap over the old one, overwriting it,
+       with "holes" for each segment's read markers (which are already
+       MAP_PRIVATE and shouldn't be overwritten).  Then free the copy.
     */
     assert(fork_big_copy != NULL);
     assert(stm_object_pages != NULL);
-    void *res = mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY,
-                       MREMAP_MAYMOVE | MREMAP_FIXED,
-                       stm_object_pages);
-    if (res != stm_object_pages)
-        stm_fatalerror("after fork: mremap failed: %m");
+
+    long j;
+    for (j = 0; j <= NB_SEGMENTS; j++) {
+        char *dst = get_segment_base(j) + END_NURSERY_PAGE * 4096UL;
+        char *src = fork_big_copy + (dst - stm_object_pages);
+        uintptr_t num_bytes = (NB_PAGES - END_NURSERY_PAGE) * 4096UL;
+        void *res = mremap(src, num_bytes, num_bytes,
+                           MREMAP_MAYMOVE | MREMAP_FIXED, dst);
+        if (res != dst)
+            stm_fatalerror("after fork: mremap failed: %m");
+    }
+    munmap(fork_big_copy, TOTAL_MEMORY);
     fork_big_copy = NULL;
     close_fd_mmap(stm_object_pages_fd);
     stm_object_pages_fd = fork_big_copy_fd;
diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -84,6 +84,34 @@
     return o;
 }
 
+object_t *stm_allocate_preexisting(ssize_t size_rounded_up,
+                                   const char *initial_data)
+{
+    acquire_privatization_lock();
+
+    char *p = allocate_outside_nursery_large(size_rounded_up);
+    uintptr_t nobj = p - stm_object_pages;
+    dprintf(("allocate_preexisting: %p\n", (object_t *)nobj));
+    long j;
+    for (j = 0; j <= NB_SEGMENTS; j++) {
+        char *dest = get_segment_base(j) + nobj;
+        memcpy(dest, initial_data, size_rounded_up);
+        ((struct object_s *)dest)->stm_flags = GCFLAG_WRITE_BARRIER;
+#ifdef STM_TESTS
+        /* can't really enable this check outside tests, because there is
+           a change that the transaction_state changes in parallel */
+        if (j && get_priv_segment(j)->transaction_state != TS_NONE) {
+            assert(!was_read_remote(get_segment_base(j), (object_t *)nobj));
+        }
+#endif
+    }
+
+    release_privatization_lock();
+
+    write_fence();     /* make sure 'nobj' is fully initialized from
+                          all threads here */
+    return (object_t *)nobj;
+}
 
 /************************************************************/
 
@@ -98,7 +126,7 @@
 
     if (is_major_collection_requested()) {   /* if still true */
 
-        int oldstate = change_timing_state(STM_TIME_MAJOR_GC);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START);
 
         synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
 
@@ -106,10 +134,11 @@
             major_collection_now_at_safe_point();
         }
 
-        change_timing_state(oldstate);
+        timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE);
     }
 
     s_mutex_unlock();
+    exec_local_finalizers();
 }
 
 
@@ -118,7 +147,11 @@
 
 static struct list_s *mark_objects_to_trace;
 
-#define WL_VISITED   255
+#define WL_FINALIZ_ORDER_1    253
+#define WL_FINALIZ_ORDER_2    254
+#define WL_FINALIZ_ORDER_3    WL_VISITED
+
+#define WL_VISITED            255
 
 
 static inline uintptr_t mark_loc(object_t *obj)
@@ -296,6 +329,8 @@
     LIST_APPEND(mark_objects_to_trace, obj);
 }
 
+#define TRACE_FOR_MAJOR_COLLECTION  (&mark_record_trace)
+
 static void mark_trace(object_t *obj, char *segment_base)
 {
     assert(list_is_empty(mark_objects_to_trace));
@@ -304,7 +339,7 @@
         /* trace into the object (the version from 'segment_base') */
         struct object_s *realobj =
             (struct object_s *)REAL_ADDRESS(segment_base, obj);
-        stmcb_trace(realobj, &mark_record_trace);
+        stmcb_trace(realobj, TRACE_FOR_MAJOR_COLLECTION);
 
         if (list_is_empty(mark_objects_to_trace))
             break;
@@ -340,12 +375,12 @@
 
     stm_thread_local_t *tl = stm_all_thread_locals;
     do {
-        /* If 'tl' is currently running, its 'associated_segment_num'
+        /* If 'tl' is currently running, its 'last_associated_segment_num'
            field is the segment number that contains the correct
            version of its overflowed objects.  If not, then the
            field is still some correct segment number, and it doesn't
            matter which one we pick. */
-        char *segment_base = get_segment_base(tl->associated_segment_num);
+        char *segment_base = get_segment_base(tl->last_associated_segment_num);
 
         struct stm_shadowentry_s *current = tl->shadowstack;
         struct stm_shadowentry_s *base = tl->shadowstack_base;
@@ -375,18 +410,33 @@
 {
     /* The modified objects are the ones that may exist in two different
        versions: one in the segment that modified it, and another in all
-       other segments.  (It can also be more than two if we don't have
-       eager write locking.)
+       other segments.  (It could also be more than two if we did't have
+       eager write locking, but for now we do.)
     */
     long i;
     for (i = 1; i <= NB_SEGMENTS; i++) {
+        LIST_FOREACH_R(
+            get_priv_segment(i)->modified_old_objects,
+            object_t * /*item*/,
+            ({
+                /* This function is called first, and there should not be
+                   any duplicate in modified_old_objects. */
+                if (mark_visited_test_and_set(item)) {
+                    assert(!"duplicate in modified_old_objects!");
+                }
+            }));
+    }
+
+    /* Now that we have marked all modified_old_objects, trace them
+       (which will mark more objects).
+    */
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         char *base = get_segment_base(i);
 
         LIST_FOREACH_R(
             get_priv_segment(i)->modified_old_objects,
             object_t * /*item*/,
             ({
-                mark_visited_test_and_set(item);
                 mark_trace(item, stm_object_pages);  /* shared version */
                 mark_trace(item, base);              /* private version */
             }));
@@ -403,9 +453,9 @@
         for (i = list_count(lst); i > 0; i -= 2) {
             mark_visit_object((object_t *)list_item(lst, i - 1), base);
         }
-        if (get_priv_segment(j)->marker_inev[1]) {
-            uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1];
-            mark_visit_object((object_t *)marker_inev_obj, base);
+        if (get_priv_segment(j)->marker_inev.segment_base) {
+            object_t *marker_inev_obj = 
get_priv_segment(j)->marker_inev.object;
+            mark_visit_object(marker_inev_obj, base);
         }
     }
 }
@@ -481,10 +531,28 @@
             uintptr_t n = list_count(lst);
             while (n > 0) {
                 object_t *obj = (object_t *)list_item(lst, --n);
-                if (!mark_visited_test(obj)) {
+                if (!mark_visited_test(obj))
                     list_set_item(lst, n, list_pop_item(lst));
+            }
+        }
+
+        /* Remove from 'modified_old_hashtables' all old hashtables that
+           die, but keeping the order */
+        {
+            lst = pseg->modified_old_hashtables;
+            uintptr_t j, k = 0, limit = list_count(lst);
+            for (j = 0; j < limit; j += 2) {
+                object_t *hobj = (object_t *)list_item(lst, j);
+                if (mark_visited_test(hobj)) {
+                    /* hobj does not die */
+                    if (j != k) {
+                        list_set_item(lst, k, (uintptr_t)hobj);
+                        list_set_item(lst, k + 1, list_item(lst, j + 1));
+                    }
+                    k += 2;
                 }
             }
+            lst->count = k;
         }
     }
 #pragma pop_macro("STM_SEGMENT")
@@ -581,10 +649,17 @@
     mark_visit_from_modified_objects();
     mark_visit_from_markers();
     mark_visit_from_roots();
+    mark_visit_from_finalizer_pending();
     LIST_FREE(mark_objects_to_trace);
 
-    /* weakrefs: */
+    /* finalizer support: will mark as WL_VISITED all objects with a
+       finalizer and all objects reachable from there, and also moves
+       some objects from 'objects_with_finalizers' to 'run_finalizers'. */
+    deal_with_objects_with_finalizers();
+
+    /* weakrefs and old light finalizers */
     stm_visit_old_weakrefs();
+    deal_with_old_objects_with_finalizers();
 
     /* cleanup */
     clean_up_segment_lists();
diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c
new file mode 100644
--- /dev/null
+++ b/c7/stm/hashtable.c
@@ -0,0 +1,514 @@
+/*
+Design of stmgc's "hashtable" objects
+=====================================
+
+A "hashtable" is theoretically a lazily-filled array of objects of
+length 2**64.  Initially it is full of NULLs.  It's obviously
+implemented as a dictionary in which NULL objects are not needed.
+
+A real dictionary can be implemented on top of it, by using the index
+`hash(key)` in the hashtable, and storing a list of `(key, value)`
+pairs at that index (usually only one, unless there is a hash
+collision).
+
+The main operations on a hashtable are reading or writing an object at a
+given index.  It also supports fetching the list of non-NULL entries.
+
+There are two markers for every index (a read and a write marker).
+This is unlike regular arrays, which have only two markers in total.
+
+Additionally, we use the read marker for the hashtable object itself
+to mean "we have read the complete list of keys".  This plays the role
+of a "global" read marker: when any thread adds a new key/value object
+to the hashtable, this new object's read marker is initialized with a
+copy of the "global" read marker --- in all segments.
+
+
+Implementation
+--------------
+
+First idea: have the hashtable in raw memory, pointing to "entry"
+objects (which are regular, GC- and STM-managed objects).  The entry
+objects themselves point to the user-specified objects.  The entry
+objects hold the read/write markers.  Every entry object, once
+created, stays around.  It is only removed by the next major GC if it
+points to NULL and its read/write markers are not set in any
+currently-running transaction.
+
+References
+----------
+
+Inspired by: http://ppl.stanford.edu/papers/podc011-bronson.pdf
+*/
+
+
+uint32_t stm_hashtable_entry_userdata;
+
+
+#define INITIAL_HASHTABLE_SIZE   8
+#define PERTURB_SHIFT            5
+#define RESIZING_LOCK            0
+
+typedef struct {
+    uintptr_t mask;
+
+    /* 'resize_counter' start at an odd value, and is decremented (by
+       6) for every new item put in 'items'.  When it crosses 0, we
+       instead allocate a bigger table and change 'resize_counter' to
+       be a regular pointer to it (which is then even).  The whole
+       structure is immutable then.
+
+       The field 'resize_counter' also works as a write lock: changes
+       go via the intermediate value RESIZING_LOCK (0).
+    */
+    uintptr_t resize_counter;
+
+    stm_hashtable_entry_t *items[INITIAL_HASHTABLE_SIZE];
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to