Author: Maciej Fijalkowski <[email protected]>
Branch:
Changeset: r367:158be99cc7db
Date: 2013-07-08 10:07 +0200
http://bitbucket.org/pypy/stmgc/changeset/158be99cc7db/
Log: merge
diff --git a/c4/Makefile b/c4/Makefile
--- a/c4/Makefile
+++ b/c4/Makefile
@@ -16,10 +16,10 @@
H_FILES = atomic_ops.h stmgc.h stmimpl.h \
et.h lists.h steal.h nursery.h gcpage.h \
- stmsync.h dbgmem.h fprintcolor.h
+ stmsync.h extra.h dbgmem.h fprintcolor.h
C_FILES = et.c lists.c steal.c nursery.c gcpage.c \
- stmsync.c dbgmem.c fprintcolor.c
+ stmsync.c extra.c dbgmem.c fprintcolor.c
DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1
-D_GC_DEBUGPRINTS=1
diff --git a/c4/et.c b/c4/et.c
--- a/c4/et.c
+++ b/c4/et.c
@@ -248,6 +248,36 @@
}
}
+gcptr stm_RepeatReadBarrier(gcptr P)
+{
+ /* Version of stm_DirectReadBarrier() that doesn't abort and assumes
+ * that 'P' was already an up-to-date result of a previous
+ * stm_DirectReadBarrier(). We only have to check if we did in the
+ * meantime a stm_write_barrier().
+ */
+ if (P->h_tid & GCFLAG_PUBLIC)
+ {
+ if (P->h_tid & GCFLAG_NURSERY_MOVED)
+ {
+ P = (gcptr)P->h_revision;
+ assert(P->h_tid & GCFLAG_PUBLIC);
+ }
+ if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)
+ {
+ struct tx_descriptor *d = thread_descriptor;
+ wlog_t *item;
+ G2L_FIND(d->public_to_private, P, item, goto no_private_obj);
+
+ P = item->val;
+ assert(!(P->h_tid & GCFLAG_PUBLIC));
+ no_private_obj:
+ ;
+ }
+ }
+ assert(!(P->h_tid & GCFLAG_STUB));
+ return P;
+}
+
static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj,
int from_stolen)
{
@@ -422,29 +452,6 @@
goto restart_all;
}
-#if 0
-void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset)
-{
- return _direct_read_barrier((gcptr)G1, (gcptr)R_Container1, offset);
-}
-#endif
-
-gcptr stm_RepeatReadBarrier(gcptr O)
-{
- abort();//XXX
-#if 0
- // LatestGlobalRevision(O) would either return O or abort
- // the whole transaction, so omitting it is not wrong
- struct tx_descriptor *d = thread_descriptor;
- gcptr L;
- wlog_t *entry;
- G2L_FIND(d->global_to_local, O, entry, return O);
- L = entry->val;
- assert(L->h_revision == stm_local_revision);
- return L;
-#endif
-}
-
static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P)
{
gcptr B;
@@ -749,10 +756,10 @@
smp_spinloop();
}
-#if 0
-size_t _stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
- int abort_reason, char *output);
-#endif
+void stm_abort_and_retry(void)
+{
+ AbortTransaction(ABRT_MANUAL);
+}
void AbortPrivateFromProtected(struct tx_descriptor *d);
@@ -795,41 +802,24 @@
elapsed_time = 1;
}
-#if 0
- size_t size;
if (elapsed_time >= d->longest_abort_info_time)
{
/* decode the 'abortinfo' and produce a human-readable summary in
the string 'longest_abort_info' */
- size = _stm_decode_abort_info(d, elapsed_time, num, NULL);
+ size_t size = stm_decode_abort_info(d, elapsed_time, num, NULL);
free(d->longest_abort_info);
d->longest_abort_info = malloc(size);
if (d->longest_abort_info == NULL)
d->longest_abort_info_time = 0; /* out of memory! */
else
{
- if (_stm_decode_abort_info(d, elapsed_time,
+ if (stm_decode_abort_info(d, elapsed_time,
num, d->longest_abort_info) != size)
stm_fatalerror("during stm abort: object mutated unexpectedly\n");
d->longest_abort_info_time = elapsed_time;
}
}
-#endif
-
-#if 0
- /* run the undo log in reverse order, cancelling the values set by
- stm_ThreadLocalRef_LLSet(). */
- if (d->undolog.size > 0) {
- gcptr *item = d->undolog.items;
- long i;
- for (i=d->undolog.size; i>=0; i-=2) {
- void **addr = (void **)(item[i-2]);
- void *oldvalue = (void *)(item[i-1]);
- *addr = oldvalue;
- }
- }
-#endif
/* upon abort, set the reads size limit to 94% of how much was read
so far. This should ensure that, assuming the retry does the same
@@ -936,10 +926,7 @@
d->count_reads = 1;
fxcache_clear(&d->recent_reads_cache);
-#if 0
- gcptrlist_clear(&d->undolog);
gcptrlist_clear(&d->abortinfo);
-#endif
}
void BeginTransaction(jmp_buf* buf)
@@ -1496,17 +1483,6 @@
/************************************************************/
-#if 0
-void stm_ThreadLocalRef_LLSet(void **addr, void *newvalue)
-{
- struct tx_descriptor *d = thread_descriptor;
- gcptrlist_insert2(&d->undolog, (gcptr)addr, (gcptr)*addr);
- *addr = newvalue;
-}
-#endif
-
-/************************************************************/
-
struct tx_descriptor *stm_tx_head = NULL;
struct tx_public_descriptor *stm_descriptor_array[MAX_THREADS] = {0};
static revision_t descriptor_array_free_list = 0;
@@ -1635,11 +1611,8 @@
assert(d->private_from_protected.size == 0);
gcptrlist_delete(&d->private_from_protected);
gcptrlist_delete(&d->list_of_read_objects);
-#if 0
gcptrlist_delete(&d->abortinfo);
free(d->longest_abort_info);
- gcptrlist_delete(&d->undolog);
-#endif
int num_aborts = 0, num_spinloops = 0;
char line[256], *p = line;
diff --git a/c4/et.h b/c4/et.h
--- a/c4/et.h
+++ b/c4/et.h
@@ -152,9 +152,9 @@
unsigned int num_aborts[ABORT_REASONS];
unsigned int num_spinloops[SPINLOOP_REASONS];
struct GcPtrList list_of_read_objects;
- //struct GcPtrList abortinfo;
struct GcPtrList private_from_protected;
struct G2L public_to_private;
+ struct GcPtrList abortinfo;
char *longest_abort_info;
long long longest_abort_info_time;
revision_t *private_revision_ref;
diff --git a/c4/extra.c b/c4/extra.c
new file mode 100644
--- /dev/null
+++ b/c4/extra.c
@@ -0,0 +1,263 @@
+#include "stmimpl.h"
+
+
+void stm_copy_to_old_id_copy(gcptr obj, gcptr id)
+{
+ //assert(!is_in_nursery(thread_descriptor, id));
+ assert(id->h_tid & GCFLAG_OLD);
+
+ size_t size = stmgc_size(obj);
+ memcpy(id, obj, size);
+ id->h_tid &= ~GCFLAG_HAS_ID;
+ id->h_tid |= GCFLAG_OLD;
+ dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id));
+}
+
+/************************************************************/
+/* Each object has a h_original pointer to an old copy of
+ the same object (e.g. an old revision), the "original".
+ The memory location of this old object is used as the ID
+ for this object. If h_original is NULL *and* it is an
+ old object copy, it itself is the original. This invariant
+ must be upheld by all code dealing with h_original.
+ The original copy must never be moved again. Also, it may
+ be just a stub-object.
+
+ If we want the ID of an object which is still young,
+ we must preallocate an old shadow-original that is used
+ as the target of the young object in a minor collection.
+ In this case, we set the HAS_ID flag on the young obj
+ to notify minor_collect.
+ This flag can be lost if the young obj is stolen. Then
+ the stealing thread uses the shadow-original itself and
+ minor_collect must not overwrite it again.
+ Also, if there is already a backup-copy around, we use
+ this instead of allocating another old object to use as
+ the shadow-original.
+ */
+
+static revision_t mangle_hash(revision_t n)
+{
+ /* To hash pointers in dictionaries. Assumes that i shows some
+ alignment (to 4, 8, maybe 16 bytes), so we use the following
+ formula to avoid the trailing bits being always 0.
+ This formula is reversible: two different values of 'i' will
+ always give two different results.
+ */
+ return n ^ (((urevision_t)n) >> 4);
+}
+
+
+revision_t stm_hash(gcptr p)
+{
+ /* Prebuilt objects may have a specific hash stored in an extra
+ field. For now, we will simply always follow h_original and
+ see, if it is a prebuilt object (XXX: maybe propagate a flag
+ to all copies of a prebuilt to avoid this cache miss).
+ */
+ if (p->h_original) {
+ if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
+ return p->h_original;
+ }
+ gcptr orig = (gcptr)p->h_original;
+ if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) {
+ return orig->h_original;
+ }
+ }
+ return mangle_hash(stm_id(p));
+}
+
+
+revision_t stm_id(gcptr p)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ revision_t result;
+
+ if (p->h_original) { /* fast path */
+ if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
+ /* h_original may contain a specific hash value,
+ but in case of the prebuilt original version,
+ its memory location is the id */
+ return (revision_t)p;
+ }
+
+ dprintf(("stm_id(%p) has orig fst: %p\n",
+ p, (gcptr)p->h_original));
+ return p->h_original;
+ }
+ else if (p->h_tid & GCFLAG_OLD) {
+ /* old objects must have an h_original xOR be
+ the original itself. */
+ dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p));
+ return (revision_t)p;
+ }
+
+ spinlock_acquire(d->public_descriptor->collection_lock, 'I');
+ /* old objects must have an h_original xOR be
+ the original itself.
+ if some thread stole p when it was still young,
+ it must have set h_original. stealing an old obj
+ makes the old obj "original".
+ */
+ if (p->h_original) { /* maybe now? */
+ result = p->h_original;
+ dprintf(("stm_id(%p) has orig: %p\n",
+ p, (gcptr)p->h_original));
+ }
+ else {
+ /* must create shadow original object XXX: or use
+ backup, if exists */
+
+ /* XXX use stmgcpage_malloc() directly, we don't need to copy
+ * the contents yet */
+ gcptr O = stmgc_duplicate_old(p);
+ p->h_original = (revision_t)O;
+ p->h_tid |= GCFLAG_HAS_ID;
+
+ if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
+ gcptr B = (gcptr)p->h_revision;
+ B->h_original = (revision_t)O;
+ }
+
+ result = (revision_t)O;
+ dprintf(("stm_id(%p) young, make shadow %p\n", p, O));
+ }
+
+ spinlock_release(d->public_descriptor->collection_lock);
+ return result;
+}
+
+_Bool stm_pointer_equal(gcptr p1, gcptr p2)
+{
+ /* fast path for two equal pointers */
+ if (p1 == p2)
+ return 1;
+ /* if p1 or p2 is NULL (but not both, because they are different
+ pointers), then return 0 */
+ if (p1 == NULL || p2 == NULL)
+ return 0;
+ /* types must be the same */
+ if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK))
+ return 0;
+ return stm_id(p1) == stm_id(p2);
+}
+
+/************************************************************/
+
+void stm_abort_info_push(gcptr obj, long fieldoffsets[])
+{
+ struct tx_descriptor *d = thread_descriptor;
+ obj = stm_read_barrier(obj);
+ gcptrlist_insert2(&d->abortinfo, obj, (gcptr)fieldoffsets);
+}
+
+void stm_abort_info_pop(long count)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ long newsize = d->abortinfo.size - 2 * count;
+ gcptrlist_reduce_size(&d->abortinfo, newsize < 0 ? 0 : newsize);
+}
+
+size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
+ int abort_reason, char *output)
+{
+ /* re-encodes the abort info as a single string.
+ For convenience (no escaping needed, no limit on integer
+ sizes, etc.) we follow the bittorrent format. */
+ size_t totalsize = 0;
+ long i;
+ char buffer[32];
+ size_t res_size;
+#define WRITE(c) { totalsize++; if (output) *output++=(c); }
+#define WRITE_BUF(p, sz) { totalsize += (sz); \
+ if (output) { \
+ memcpy(output, (p), (sz)); output += (sz); \
+ } \
+ }
+ WRITE('l');
+ WRITE('l');
+ res_size = sprintf(buffer, "i%llde", (long long)elapsed_time);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%de", (int)abort_reason);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lde", (long)d->public_descriptor_index);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lde", (long)d->atomic);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%de", (int)d->active);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lue", (unsigned long)d->count_reads);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lue",
+ (unsigned long)d->reads_size_limit_nonatomic);
+ WRITE_BUF(buffer, res_size);
+ WRITE('e');
+ for (i=0; i<d->abortinfo.size; i+=2) {
+ char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]);
+ long *fieldoffsets = (long*)d->abortinfo.items[i+1];
+ long kind, offset;
+ size_t rps_size;
+ char *rps;
+
+ while (1) {
+ kind = *fieldoffsets++;
+ if (kind <= 0) {
+ if (kind == -2) {
+ WRITE('l'); /* '[', start of sublist */
+ continue;
+ }
+ if (kind == -1) {
+ WRITE('e'); /* ']', end of sublist */
+ continue;
+ }
+ break; /* 0, terminator */
+ }
+ offset = *fieldoffsets++;
+ switch(kind) {
+ case 1: /* signed */
+ res_size = sprintf(buffer, "i%lde",
+ *(long*)(object + offset));
+ WRITE_BUF(buffer, res_size);
+ break;
+ case 2: /* unsigned */
+ res_size = sprintf(buffer, "i%lue",
+ *(unsigned long*)(object + offset));
+ WRITE_BUF(buffer, res_size);
+ break;
+ case 3: /* a string of bytes from the target object */
+ rps = *(char **)(object + offset);
+ offset = *fieldoffsets++;
+ if (rps) {
+ /* xxx a bit ad-hoc: it's a string whose length is a
+ * long at 'offset', following immediately the offset */
+ rps_size = *(long *)(rps + offset);
+ offset += sizeof(long);
+ assert(rps_size >= 0);
+ res_size = sprintf(buffer, "%zu:", rps_size);
+ WRITE_BUF(buffer, res_size);
+ WRITE_BUF(rps + offset, rps_size);
+ }
+ else {
+ WRITE_BUF("0:", 2);
+ }
+ break;
+ default:
+ stm_fatalerror("corrupted abort log\n");
+ }
+ }
+ }
+ WRITE('e');
+ WRITE('\0'); /* final null character */
+#undef WRITE
+#undef WRITE_BUF
+ return totalsize;
+}
+
+char *stm_inspect_abort_info(void)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ if (d->longest_abort_info_time <= 0)
+ return NULL;
+ d->longest_abort_info_time = 0;
+ return d->longest_abort_info;
+}
diff --git a/c4/extra.h b/c4/extra.h
new file mode 100644
--- /dev/null
+++ b/c4/extra.h
@@ -0,0 +1,9 @@
+#ifndef _SRCSTM_EXTRA_H
+#define _SRCSTM_EXTRA_H
+
+
+void stm_copy_to_old_id_copy(gcptr obj, gcptr id);
+size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
+ int abort_reason, char *output);
+
+#endif
diff --git a/c4/gcpage.c b/c4/gcpage.c
--- a/c4/gcpage.c
+++ b/c4/gcpage.c
@@ -225,7 +225,8 @@
id_copy->h_tid |= GCFLAG_VISITED;
/* XXX: may not always need tracing? */
- gcptrlist_insert(&objects_to_trace, id_copy);
+ //if (!(id_copy->h_tid & GCFLAG_STUB))
+ // gcptrlist_insert(&objects_to_trace, id_copy);
}
else {
/* prebuilt originals won't get collected anyway
diff --git a/c4/nursery.c b/c4/nursery.c
--- a/c4/nursery.c
+++ b/c4/nursery.c
@@ -125,131 +125,6 @@
}
/************************************************************/
-/* Each object has a h_original pointer to an old copy of
- the same object (e.g. an old revision), the "original".
- The memory location of this old object is used as the ID
- for this object. If h_original is NULL *and* it is an
- old object copy, it itself is the original. This invariant
- must be upheld by all code dealing with h_original.
- The original copy must never be moved again. Also, it may
- be just a stub-object.
-
- If we want the ID of an object which is still young,
- we must preallocate an old shadow-original that is used
- as the target of the young object in a minor collection.
- In this case, we set the HAS_ID flag on the young obj
- to notify minor_collect.
- This flag can be lost if the young obj is stolen. Then
- the stealing thread uses the shadow-original itself and
- minor_collect must not overwrite it again.
- Also, if there is already a backup-copy around, we use
- this instead of allocating another old object to use as
- the shadow-original.
- */
-
-static revision_t mangle_hash(revision_t n)
-{
- /* To hash pointers in dictionaries. Assumes that i shows some
- alignment (to 4, 8, maybe 16 bytes), so we use the following
- formula to avoid the trailing bits being always 0.
- This formula is reversible: two different values of 'i' will
- always give two different results.
- */
- return n ^ (((urevision_t)n) >> 4);
-}
-
-
-revision_t stm_hash(gcptr p)
-{
- /* Prebuilt objects may have a specific hash stored in an extra
- field. For now, we will simply always follow h_original and
- see, if it is a prebuilt object (XXX: maybe propagate a flag
- to all copies of a prebuilt to avoid this cache miss).
- */
- if (p->h_original) {
- if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
- return p->h_original;
- }
- gcptr orig = (gcptr)p->h_original;
- if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) {
- return orig->h_original;
- }
- }
- return mangle_hash(stm_id(p));
-}
-
-
-revision_t stm_id(gcptr p)
-{
- struct tx_descriptor *d = thread_descriptor;
- revision_t result;
-
- if (p->h_original) { /* fast path */
- if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
- /* h_original may contain a specific hash value,
- but in case of the prebuilt original version,
- its memory location is the id */
- return (revision_t)p;
- }
-
- dprintf(("stm_id(%p) has orig fst: %p\n",
- p, (gcptr)p->h_original));
- return p->h_original;
- }
- else if (p->h_tid & GCFLAG_OLD) {
- /* old objects must have an h_original xOR be
- the original itself. */
- dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p));
- return (revision_t)p;
- }
-
- spinlock_acquire(d->public_descriptor->collection_lock, 'I');
- /* old objects must have an h_original xOR be
- the original itself.
- if some thread stole p when it was still young,
- it must have set h_original. stealing an old obj
- makes the old obj "original".
- */
- if (p->h_original) { /* maybe now? */
- result = p->h_original;
- dprintf(("stm_id(%p) has orig: %p\n",
- p, (gcptr)p->h_original));
- }
- else {
- /* must create shadow original object XXX: or use
- backup, if exists */
-
- /* XXX use stmgcpage_malloc() directly, we don't need to copy
- * the contents yet */
- gcptr O = stmgc_duplicate_old(p);
- p->h_original = (revision_t)O;
- p->h_tid |= GCFLAG_HAS_ID;
-
- if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
- gcptr B = (gcptr)p->h_revision;
- B->h_original = (revision_t)O;
- }
-
- result = (revision_t)O;
- dprintf(("stm_id(%p) young, make shadow %p\n", p, O));
- }
-
- spinlock_release(d->public_descriptor->collection_lock);
- return result;
-}
-
-_Bool stm_pointer_equal(gcptr p1, gcptr p2)
-{
- /* fast path for two equal pointers */
- if (p1 == p2)
- return 1;
- /* types must be the same */
- if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK))
- return 0;
- return stm_id(p1) == stm_id(p2);
-}
-
-/************************************************************/
static inline gcptr create_old_object_copy(gcptr obj)
{
@@ -266,18 +141,6 @@
return fresh_old_copy;
}
-void copy_to_old_id_copy(gcptr obj, gcptr id)
-{
- assert(!is_in_nursery(thread_descriptor, id));
- assert(id->h_tid & GCFLAG_OLD);
-
- size_t size = stmgc_size(obj);
- memcpy(id, obj, size);
- id->h_tid &= ~GCFLAG_HAS_ID;
- id->h_tid |= GCFLAG_OLD;
- dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id));
-}
-
static void visit_if_young(gcptr *root)
{
gcptr obj = *root;
@@ -303,7 +166,7 @@
/* already has a place to go to */
gcptr id_obj = (gcptr)obj->h_original;
- copy_to_old_id_copy(obj, id_obj);
+ stm_copy_to_old_id_copy(obj, id_obj);
fresh_old_copy = id_obj;
obj->h_tid &= ~GCFLAG_HAS_ID;
}
@@ -485,7 +348,7 @@
we may occasionally see a PUBLIC object --- one that was
a private/protected object when it was added to
old_objects_to_trace, and has been stolen. So we have to
- check and not do any change the obj->h_tid in that case.
+ check and not do any change to the obj->h_tid in that case.
Otherwise this conflicts with the rule that we may only
modify obj->h_tid of a public object in order to add
PUBLIC_TO_PRIVATE.
diff --git a/c4/steal.c b/c4/steal.c
--- a/c4/steal.c
+++ b/c4/steal.c
@@ -1,8 +1,6 @@
#include "stmimpl.h"
-void copy_to_old_id_copy(gcptr obj, gcptr id);
-
gcptr stm_stub_malloc(struct tx_public_descriptor *pd)
{
assert(pd->collection_lock != 0);
@@ -167,7 +165,7 @@
/* use id-copy for us */
O = (gcptr)L->h_original;
L->h_tid &= ~GCFLAG_HAS_ID;
- copy_to_old_id_copy(L, O);
+ stm_copy_to_old_id_copy(L, O);
O->h_original = 0;
} else {
/* Copy the object out of the other thread's nursery,
diff --git a/c4/stmgc.c b/c4/stmgc.c
--- a/c4/stmgc.c
+++ b/c4/stmgc.c
@@ -9,5 +9,6 @@
#include "nursery.c"
#include "gcpage.c"
#include "stmsync.c"
+#include "extra.c"
#include "dbgmem.c"
#include "fprintcolor.c"
diff --git a/c4/stmgc.h b/c4/stmgc.h
--- a/c4/stmgc.h
+++ b/c4/stmgc.h
@@ -101,6 +101,22 @@
It is set to NULL by stm_initialize(). */
extern __thread gcptr stm_thread_local_obj;
+/* For tracking where aborts occurs, you can push/pop information
+ into this stack. When an abort occurs this information is encoded
+ and flattened into a buffer which can later be retrieved with
+ stm_inspect_abort_info(). (XXX details not documented yet) */
+void stm_abort_info_push(gcptr obj, long fieldoffsets[]);
+void stm_abort_info_pop(long count);
+char *stm_inspect_abort_info(void);
+
+/* mostly for debugging support */
+void stm_abort_and_retry(void);
+void stm_minor_collect(void);
+void stm_major_collect(void);
+
+
+/**************** END OF PUBLIC INTERFACE *****************/
+/************************************************************/
/* macro-like functionality */
diff --git a/c4/stmimpl.h b/c4/stmimpl.h
--- a/c4/stmimpl.h
+++ b/c4/stmimpl.h
@@ -12,7 +12,7 @@
# endif
#endif
-#ifdef _GC_DEBUG
+#if defined(_GC_DEBUG) && !defined(DUMP_EXTRA)
# if _GC_DEBUG >= 2
# define DUMP_EXTRA
# endif
@@ -35,5 +35,6 @@
#include "et.h"
#include "steal.h"
#include "stmsync.h"
+#include "extra.h"
#endif
diff --git a/c4/stmsync.c b/c4/stmsync.c
--- a/c4/stmsync.c
+++ b/c4/stmsync.c
@@ -328,6 +328,18 @@
AbortNowIfDelayed(); /* if another thread ran a major GC */
}
+void stm_minor_collect(void)
+{
+ stmgc_minor_collect();
+ stmgcpage_possibly_major_collect(0);
+}
+
+void stm_major_collect(void)
+{
+ stmgc_minor_collect();
+ stmgcpage_possibly_major_collect(1);
+}
+
/************************************************************/
/***** Prebuilt roots, added in the list as the transaction that changed
diff --git a/c4/test/support.py b/c4/test/support.py
--- a/c4/test/support.py
+++ b/c4/test/support.py
@@ -11,11 +11,11 @@
header_files = [os.path.join(parent_dir, _n) for _n in
"et.h lists.h steal.h nursery.h gcpage.h "
- "stmsync.h dbgmem.h fprintcolor.h "
+ "stmsync.h extra.h dbgmem.h fprintcolor.h "
"stmgc.h stmimpl.h atomic_ops.h".split()]
source_files = [os.path.join(parent_dir, _n) for _n in
"et.c lists.c steal.c nursery.c gcpage.c "
- "stmsync.c dbgmem.c fprintcolor.c".split()]
+ "stmsync.c extra.c dbgmem.c fprintcolor.c".split()]
_pycache_ = os.path.join(parent_dir, 'test', '__pycache__')
if os.path.exists(_pycache_):
@@ -65,6 +65,10 @@
long stm_atomic(long delta);
int stm_enter_callback_call(void);
void stm_leave_callback_call(int);
+ void stm_abort_info_push(gcptr obj, long fieldoffsets[]);
+ void stm_abort_info_pop(long count);
+ char *stm_inspect_abort_info(void);
+ void stm_abort_and_retry(void);
/* extra non-public code */
void printfcolor(char *msg);
@@ -619,7 +623,7 @@
assert fine == [True]
def abort_and_retry():
- lib.AbortTransaction(lib.ABRT_MANUAL)
+ lib.stm_abort_and_retry()
def classify(p):
private_from_protected = (p.h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) != 0
diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py
new file mode 100644
--- /dev/null
+++ b/c4/test/test_extra.py
@@ -0,0 +1,116 @@
+import py, sys, struct
+from support import *
+
+
+def setup_function(f):
+ lib.stm_clear_between_tests()
+ lib.stm_initialize_tests(getattr(f, 'max_aborts', 0))
+
+def teardown_function(_):
+ lib.stm_finalize()
+
+
+def test_abort_info_stack():
+ p = nalloc(HDR)
+ q = nalloc(HDR)
+ lib.stm_abort_info_push(p, ffi.cast("long *", 123))
+ lib.stm_abort_info_push(q, ffi.cast("long *", 125))
+ lib.stm_abort_info_pop(2)
+ # no real test here
+
+def test_inspect_abort_info_signed():
+ fo1 = ffi.new("long[]", [-2, 1, HDR, -1, 0])
+ #
+ @perform_transaction
+ def run(retry_counter):
+ if retry_counter == 0:
+ p = nalloc(HDR + WORD)
+ lib.setlong(p, 0, -421289712)
+ lib.stm_abort_info_push(p, fo1)
+ abort_and_retry()
+ else:
+ c = lib.stm_inspect_abort_info()
+ assert c
+ assert ffi.string(c).endswith("eli-421289712eee")
+
+def test_inspect_abort_info_nested_unsigned():
+ fo1 = ffi.new("long[]", [-2, 2, HDR, 0])
+ fo2 = ffi.new("long[]", [2, HDR + WORD, -1, 0])
+ #
+ @perform_transaction
+ def run(retry_counter):
+ if retry_counter == 0:
+ p = nalloc(HDR + WORD)
+ q = nalloc(HDR + 2 * WORD)
+ lib.setlong(p, 0, sys.maxint)
+ lib.setlong(q, 1, -1)
+ lib.stm_abort_info_push(p, fo1)
+ lib.stm_abort_info_push(q, fo2)
+ abort_and_retry()
+ else:
+ c = lib.stm_inspect_abort_info()
+ assert c
+ assert ffi.string(c).endswith("eli%dei%deee" % (
+ sys.maxint, sys.maxint * 2 + 1))
+
+def test_inspect_abort_info_string():
+ fo1 = ffi.new("long[]", [3, HDR + WORD, HDR, 0])
+ #
+ @perform_transaction
+ def run(retry_counter):
+ if retry_counter == 0:
+ p = nalloc_refs(2)
+ q = nalloc(HDR + 2 * WORD)
+ lib.setptr(p, 1, q)
+ lib.setlong(q, 0, 3)
+ word = "ABC" + "\xFF" * (WORD - 3)
+ lib.setlong(q, 1, struct.unpack("l", word)[0])
+ lib.stm_abort_info_push(p, fo1)
+ abort_and_retry()
+ else:
+ c = lib.stm_inspect_abort_info()
+ assert c
+ assert ffi.string(c).endswith("e3:ABCe")
+
+def test_inspect_null():
+ fo1 = ffi.new("long[]", [3, HDR, HDR + 1, 0])
+ #
+ @perform_transaction
+ def run(retry_counter):
+ if retry_counter == 0:
+ p = nalloc_refs(1)
+ lib.setptr(p, 0, ffi.NULL) # default
+ lib.stm_abort_info_push(p, fo1)
+ abort_and_retry()
+ else:
+ c = lib.stm_inspect_abort_info()
+ assert c
+ assert ffi.string(c).endswith("e0:e")
+
+def test_latest_version():
+ fo1 = ffi.new("long[]", [1, HDR, 0])
+ p = palloc(HDR + WORD)
+ lib.rawsetlong(p, 0, -9827892)
+ #
+ @perform_transaction
+ def run(retry_counter):
+ if retry_counter == 0:
+ lib.stm_abort_info_push(p, fo1)
+ lib.setlong(p, 0, 424242)
+ abort_and_retry()
+ else:
+ c = lib.stm_inspect_abort_info()
+ assert c
+ assert ffi.string(c).endswith("ei424242ee")
+
+def test_pointer_equal():
+ p = palloc(HDR)
+ assert lib.stm_pointer_equal(p, p)
+ assert not lib.stm_pointer_equal(p, ffi.NULL)
+ assert not lib.stm_pointer_equal(ffi.NULL, p)
+ assert lib.stm_pointer_equal(ffi.NULL, ffi.NULL)
+ q = lib.stm_write_barrier(p)
+ assert q != p
+ assert lib.stm_pointer_equal(p, q)
+ assert lib.stm_pointer_equal(q, q)
+ assert lib.stm_pointer_equal(q, p)
diff --git a/duhton/Makefile b/duhton/Makefile
--- a/duhton/Makefile
+++ b/duhton/Makefile
@@ -5,7 +5,7 @@
gcc -pthread -g -O2 -o duhton *.c ../c4/stmgc.c -Wall -lrt
duhton_debug: *.c *.h ../c4/*.c ../c4/*.h
- gcc -pthread -g -DDu_DEBUG -D_GC_DEBUG=2 -DGC_NURSERY=2048 -o
duhton_debug *.c ../c4/stmgc.c -Wall -lrt
+ gcc -pthread -g -DDu_DEBUG -D_GC_DEBUGPRINTS=1 -DGC_NURSERY=2048 -o
duhton_debug *.c ../c4/stmgc.c -Wall -lrt
clean:
rm -f duhton duhton_debug
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit