Author: Remi Meier <[email protected]>
Branch: copy-over-original2
Changeset: r427:b39faaf63e68
Date: 2013-07-19 16:15 +0200
http://bitbucket.org/pypy/stmgc/changeset/b39faaf63e68/
Log: merge weakrefs
diff --git a/c4/Makefile b/c4/Makefile
--- a/c4/Makefile
+++ b/c4/Makefile
@@ -16,10 +16,10 @@
H_FILES = atomic_ops.h stmgc.h stmimpl.h \
et.h lists.h steal.h nursery.h gcpage.h \
- stmsync.h extra.h dbgmem.h fprintcolor.h
+ stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h
C_FILES = et.c lists.c steal.c nursery.c gcpage.c \
- stmsync.c extra.c dbgmem.c fprintcolor.c
+ stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c
DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1
-D_GC_DEBUGPRINTS=1
diff --git a/c4/demo_random.c b/c4/demo_random.c
--- a/c4/demo_random.c
+++ b/c4/demo_random.c
@@ -25,27 +25,46 @@
// SUPPORT
#define GCTID_STRUCT_NODE 123
+#define GCTID_WEAKREF 122
+
+struct node;
+typedef struct node * nodeptr;
+struct weak_node {
+ struct stm_object_s hdr;
+ nodeptr node;
+};
+typedef struct weak_node * weaknodeptr;
+#define WEAKNODE_SIZE sizeof(struct weak_node)
struct node {
struct stm_object_s hdr;
long value;
revision_t id;
revision_t hash;
- struct node *next;
+ nodeptr next;
+ weaknodeptr weakref;
};
-typedef struct node * nodeptr;
+
+
size_t stmcb_size(gcptr ob)
{
- assert(stm_get_tid(ob) == GCTID_STRUCT_NODE);
- return sizeof(struct node);
+ if (stm_get_tid(ob) == GCTID_STRUCT_NODE)
+ return sizeof(struct node);
+ else if (stm_get_tid(ob) == GCTID_WEAKREF)
+ return WEAKNODE_SIZE;
+ assert(0);
}
+
void stmcb_trace(gcptr ob, void visit(gcptr *))
{
nodeptr n;
+ if (stm_get_tid(ob) == GCTID_WEAKREF)
+ return;
assert(stm_get_tid(ob) == GCTID_STRUCT_NODE);
n = (nodeptr)ob;
visit((gcptr *)&n->next);
+ visit((gcptr *)&n->weakref);
}
@@ -53,11 +72,6 @@
time_t default_seed;
gcptr shared_roots[SHARED_ROOTS];
-#define CACHE_MASK 65535
-#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *))
-#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache) \
- + ((revision_t)(obj) & CACHE_MASK)))
-
struct thread_data {
unsigned int thread_seed;
gcptr roots[MAXROOTS];
@@ -67,7 +81,6 @@
int steps_left;
int interruptible;
int atomic;
- revision_t writeable[CACHE_ENTRIES];
};
__thread struct thread_data td;
@@ -105,6 +118,21 @@
return (int)(rand_r(&td.thread_seed) % (unsigned int)max);
}
+gcptr get_random_root()
+{
+ int num = get_rand(td.num_roots + 1);
+ if (num == 0)
+ return stm_thread_local_obj;
+ else
+ return td.roots[num - 1];
+}
+
+gcptr get_random_shared_root()
+{
+ int num = get_rand(SHARED_ROOTS);
+ return shared_roots[num];
+}
+
void copy_roots(gcptr *from, gcptr *to, int num)
{
int i;
@@ -137,7 +165,7 @@
return x;
}
-void push_roots(int with_cache)
+void push_roots()
{
int i;
for (i = 0; i < td.num_roots; i++) {
@@ -145,30 +173,11 @@
if (td.roots[i])
stm_push_root(td.roots[i]);
}
-
- if (with_cache) {
- stm_push_root(NULL);
- for (i = 0; i < CACHE_ENTRIES; i++) {
- if (td.writeable[i])
- stm_push_root((gcptr)td.writeable[i]);
- }
- }
}
-void pop_roots(int with_cache)
+void pop_roots()
{
int i;
- /* some objects may have changed positions */
- memset(td.writeable, 0, sizeof(td.writeable));
-
- if (with_cache) {
- gcptr obj = stm_pop_root();
- while (obj) {
- CACHE_AT(td.writeable, obj) = obj;
- obj = stm_pop_root();
- }
- }
-
for (i = td.num_roots - 1; i >= 0; i--) {
if (td.roots[i])
td.roots[i] = stm_pop_root();
@@ -186,12 +195,33 @@
nodeptr allocate_node()
{
nodeptr r;
- push_roots(1);
+ push_roots();
r = (nodeptr)stm_allocate(sizeof(struct node), GCTID_STRUCT_NODE);
- pop_roots(1);
+ pop_roots();
return r;
}
+
+weaknodeptr allocate_weaknodeptr(nodeptr to)
+{
+ weaknodeptr w;
+ push_roots();
+ w = (weaknodeptr)stm_weakref_allocate(WEAKNODE_SIZE, GCTID_WEAKREF,
+ (gcptr)to);
+ pop_roots();
+ return w;
+}
+
+void set_weakref(nodeptr n, nodeptr to)
+{
+ stm_push_root((gcptr)n);
+ weaknodeptr w = allocate_weaknodeptr(to);
+ n = (nodeptr)stm_pop_root();
+ n = (nodeptr)stm_write_barrier((gcptr)n);
+ n->weakref = w;
+ dprintf(("set_weakref %p -> %p -> %p\n", n, w, to));
+}
+
int is_shared_prebuilt(gcptr p)
{
int i;
@@ -252,7 +282,6 @@
if (p != NULL) {
check(p);
w = stm_write_barrier(p);
- CACHE_AT(td.writeable, w) = w;
check(w);
assert(is_private(w));
}
@@ -369,22 +398,22 @@
{
int k = get_rand(100);
if (k < 10) {
- push_roots(1);
+ push_roots();
stm_push_root(p);
stm_become_inevitable("fun");
p = stm_pop_root();
- pop_roots(1);
+ pop_roots();
}
else if (k < 40) {
- push_roots(1);
+ push_roots();
stmgc_minor_collect();
- pop_roots(1);
+ pop_roots();
p = NULL;
} else if (k < 41 && DO_MAJOR_COLLECTS) {
fprintf(stdout, "major collect\n");
- push_roots(1);
+ push_roots();
stmgcpage_possibly_major_collect(1);
- pop_roots(1);
+ pop_roots();
p = NULL;
}
return p;
@@ -423,10 +452,7 @@
break;
case 7: // set 'p' as *next in one of the roots
check(_r);
- if (CACHE_AT(td.writeable, _r) == _r)
- w_r = (nodeptr)_r;
- else
- w_r = (nodeptr)write_barrier(_r);
+ w_r = (nodeptr)write_barrier(_r);
check((gcptr)w_r);
check(p);
w_r->next = (struct node*)p;
@@ -447,6 +473,46 @@
return p;
}
+gcptr weakref_events(gcptr p, gcptr _r, gcptr _sr)
+{
+ nodeptr t;
+ weaknodeptr w, ww;
+ gcptr ptrs[] = {_r, _sr};
+
+ int i = get_rand(2);
+ int k = get_rand(3);
+ switch (k) {
+ case 0: // check weakref
+ t = (nodeptr)read_barrier(ptrs[i]);
+ w = t->weakref;
+ if(w) {
+ ww = (weaknodeptr)stm_read_barrier((gcptr)w);
+ assert(stm_get_tid((gcptr)ww) == GCTID_WEAKREF);
+ if (ww->node) {
+ check((gcptr)ww->node);
+ }
+ else {
+ t->weakref = NULL;
+ }
+ }
+ p = NULL;
+ break;
+ case 1: // set weakref to something
+ if (p)
+ set_weakref((nodeptr)_r, (nodeptr)p);
+ else
+ set_weakref((nodeptr)_r, (nodeptr)get_random_root());
+ p = NULL;
+ break;
+ case 2: // set weakref on shared roots
+ set_weakref((nodeptr)_sr, (nodeptr)get_random_shared_root());
+ p = NULL;
+ break;
+ }
+ return p;
+}
+
+
gcptr shared_roots_events(gcptr p, gcptr _r, gcptr _sr)
{
nodeptr w_sr;
@@ -461,7 +527,7 @@
break;
case 2:
w_sr = (nodeptr)write_barrier(_sr);
- w_sr->next = (nodeptr)shared_roots[get_rand(SHARED_ROOTS)];
+ w_sr->next = (nodeptr)get_random_shared_root();
break;
}
return p;
@@ -485,10 +551,7 @@
assert(w_t->id == stm_id((gcptr)_t));
}
else {
- if (CACHE_AT(td.writeable, _t) == _t)
- w_t = (nodeptr)_t;
- else
- w_t = (nodeptr)write_barrier(_t);
+ w_t = (nodeptr)write_barrier(_t);
w_t->id = stm_id((gcptr)w_t);
assert(w_t->id == stm_id((gcptr)_t));
}
@@ -504,10 +567,7 @@
assert(w_t->hash == stm_hash((gcptr)_t));
}
else {
- if (CACHE_AT(td.writeable, _t) == _t)
- w_t = (nodeptr)_t;
- else
- w_t = (nodeptr)write_barrier(_t);
+ w_t = (nodeptr)write_barrier(_t);
w_t->hash = stm_hash((gcptr)w_t);
assert(w_t->hash == stm_hash((gcptr)_t));
}
@@ -526,18 +586,12 @@
gcptr do_step(gcptr p)
{
gcptr _r, _sr;
- int num, k;
+ int k;
- num = get_rand(td.num_roots+1);
- if (num == 0)
- _r = stm_thread_local_obj;
- else
- _r = td.roots[num - 1];
-
- num = get_rand(SHARED_ROOTS);
- _sr = shared_roots[num];
+ _r = get_random_root();
+ _sr = get_random_shared_root();
- k = get_rand(9);
+ k = get_rand(11);
check(p);
assert(thread_descriptor->active);
@@ -549,6 +603,8 @@
p = id_hash_events(p, _r, _sr);
else if (k < 8)
p = rare_events(p, _r, _sr);
+ else if (k < 10)
+ p = weakref_events(p, _r, _sr);
else if (get_rand(20) == 1) {
// transaction break
fprintf(stdout, "|");
@@ -563,7 +619,7 @@
void transaction_break()
{
- push_roots(0);
+ push_roots();
td.interruptible = 1;
copy_roots(td.roots, td.roots_outside_perform, td.num_roots);
@@ -575,9 +631,7 @@
copy_roots(td.roots_outside_perform, td.roots, td.num_roots);
td.interruptible = 0;
- pop_roots(0);
-
- /* done by pop_roots() memset(&td.writeable, 0, sizeof(td.writeable)); */
+ pop_roots();
}
@@ -592,8 +646,8 @@
assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF);
arg1 = stm_pop_root();
assert(arg1 == NULL);
- pop_roots(0);
- push_roots(0);
+ pop_roots();
+ push_roots();
stm_push_root(arg1);
stm_push_root(end_marker);
@@ -609,9 +663,6 @@
{
gcptr p = NULL;
- // clear cache of writeables:
- memset(&td.writeable, 0, sizeof(td.writeable));
-
while (td.steps_left-->0 || td.atomic) {
if (td.steps_left % 8 == 0)
fprintf(stdout, "#");
diff --git a/c4/et.c b/c4/et.c
--- a/c4/et.c
+++ b/c4/et.c
@@ -6,6 +6,29 @@
*/
#include "stmimpl.h"
+#ifdef _GC_DEBUG
+char tmp_buf[128];
+char* stm_dbg_get_hdr_str(gcptr obj)
+{
+ char *cur;
+ char *flags[] = GC_FLAG_NAMES;
+ int i;
+
+ i = 0;
+ cur = tmp_buf;
+ cur += sprintf(cur, "%p:", obj);
+ while (flags[i]) {
+ if (obj->h_tid & (STM_FIRST_GCFLAG << i)) {
+ cur += sprintf(cur, "%s|", flags[i]);
+ }
+ i++;
+ }
+ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj));
+ return tmp_buf;
+}
+#endif
+
+
__thread struct tx_descriptor *thread_descriptor = NULL;
@@ -545,6 +568,7 @@
gcptr stm_WriteBarrier(gcptr P)
{
+ assert(!(P->h_tid & GCFLAG_IMMUTABLE));
if (is_private(P))
{
/* If we have GCFLAG_WRITE_BARRIER in P, then list it into
diff --git a/c4/et.h b/c4/et.h
--- a/c4/et.h
+++ b/c4/et.h
@@ -72,6 +72,8 @@
static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8;
static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9;
static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10;
+static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11;
+
/* this value must be reflected in PREBUILT_FLAGS in stmgc.h */
#define GCFLAG_PREBUILT (GCFLAG_VISITED | \
@@ -89,6 +91,8 @@
"BACKUP_COPY", \
"STUB", \
"PRIVATE_FROM_PROTECTED", \
+ "HAS_ID", \
+ "IMMUTABLE", \
NULL }
#define IS_POINTER(v) (!((v) & 1)) /* even-valued number */
@@ -196,4 +200,7 @@
void DescriptorInit(void);
void DescriptorDone(void);
+#ifdef _GC_DEBUG
+char* stm_dbg_get_hdr_str(gcptr obj);
+#endif
#endif /* _ET_H */
diff --git a/c4/extra.c b/c4/extra.c
--- a/c4/extra.c
+++ b/c4/extra.c
@@ -3,7 +3,7 @@
void stm_copy_to_old_id_copy(gcptr obj, gcptr id)
{
- //assert(!is_in_nursery(thread_descriptor, id));
+ //assert(!stmgc_is_in_nursery(thread_descriptor, id));
assert(id->h_tid & GCFLAG_OLD);
size_t size = stmgc_size(obj);
diff --git a/c4/gcpage.c b/c4/gcpage.c
--- a/c4/gcpage.c
+++ b/c4/gcpage.c
@@ -222,12 +222,14 @@
if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) {
id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE;
/* see fix_outdated() */
- id_copy->h_tid |= GCFLAG_VISITED;
- assert(!(id_copy->h_tid & GCFLAG_MOVED));
+ if (!(id_copy->h_tid & GCFLAG_VISITED)) {
+ id_copy->h_tid |= GCFLAG_VISITED;
+ assert(!(id_copy->h_tid & GCFLAG_MOVED));
- /* XXX: may not always need tracing? */
- if (!(id_copy->h_tid & GCFLAG_STUB))
- gcptrlist_insert(&objects_to_trace, id_copy);
+ /* XXX: may not always need tracing? */
+ if (!(id_copy->h_tid & GCFLAG_STUB))
+ gcptrlist_insert(&objects_to_trace, id_copy);
+ }
}
else {
/* prebuilt originals won't get collected anyway
@@ -237,6 +239,14 @@
}
}
+static void visit(gcptr *pobj);
+
+gcptr stmgcpage_visit(gcptr obj)
+{
+ visit(&obj);
+ return obj;
+}
+
static gcptr copy_over_original(gcptr obj)
{
assert(!(obj->h_tid & GCFLAG_VISITED));
@@ -346,10 +356,10 @@
keep_original_alive(prev_obj);
assert(*pobj == prev_obj);
- gcptr obj1 = obj;
- visit(&obj1); /* recursion, but should be only once */
+ /* recursion, but should be only once */
+ obj = stmgcpage_visit(obj);
assert(prev_obj->h_tid & GCFLAG_STUB);
- prev_obj->h_revision = ((revision_t)obj1) | 2;
+ prev_obj->h_revision = ((revision_t)obj) | 2;
return;
}
}
@@ -481,8 +491,6 @@
static void mark_all_stack_roots(void)
{
- int i;
- gcptr *items;
struct tx_descriptor *d;
struct G2L new_public_to_private;
memset(&new_public_to_private, 0, sizeof(struct G2L));
@@ -493,15 +501,6 @@
/* the roots pushed on the shadowstack */
mark_roots(d->shadowstack, *d->shadowstack_end_ref);
- /* some roots (^^^) can also be in this list, and
- we may have a stolen priv_from_prot in here that,
- when visited, resolves to its backup (or further) */
- items = d->old_objects_to_trace.items;
- for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) {
- visit(&items[i]);
- gcptrlist_insert(&objects_to_trace, items[i]);
- }
-
/* the thread-local object */
visit(d->thread_local_obj_ref);
visit(&d->old_thread_local_obj);
@@ -570,11 +569,11 @@
assert(gcptrlist_size(&d->public_with_young_copy) == 0);
assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0);
assert(gcptrlist_size(&d->public_descriptor->stolen_young_stubs) == 0);
+ assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
/* NOT NECESSARILY EMPTY:
- list_of_read_objects
- private_from_protected
- public_to_private
- - old_objects_to_trace
*/
assert(gcptrlist_size(&d->list_of_read_objects) ==
d->num_read_objects_known_old);
@@ -608,23 +607,20 @@
}
}
-
- /* we visit old_objects_to_trace during marking and thus, they
- should be up-to-date */
-#ifdef _GC_DEBUG
- items = d->old_objects_to_trace.items;
- for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) {
- gcptr obj = items[i];
- assert(!(obj->h_tid & GCFLAG_MOVED));
- assert(obj->h_tid & GCFLAG_VISITED);
- }
-#endif
+ assert(d->old_objects_to_trace.size == 0);
/* If we're aborting this transaction anyway, we don't need to do
* more here.
*/
- if (d->active < 0)
- return; /* already "aborted" during forced minor collection */
+ if (d->active < 0) {
+ /* already "aborted" during forced minor collection
+ clear list of read objects so that a possible minor collection
+ before the abort doesn't trip
+ fix_list_of_read_objects should not run */
+ gcptrlist_clear(&d->list_of_read_objects);
+ d->num_read_objects_known_old = 0;
+ return;
+ }
if (d->active == 2) {
/* inevitable transaction: clear the list of read objects */
@@ -661,6 +657,9 @@
dprintf(("ABRT_COLLECT_MAJOR %p: "
"%p was read but modified already\n", d, obj));
AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR);
+ /* fix_list_of_read_objects should not run */
+ gcptrlist_clear(&d->list_of_read_objects);
+ d->num_read_objects_known_old = 0;
return;
}
@@ -919,9 +918,13 @@
assert(gcptrlist_size(&objects_to_trace) == 0);
mark_prebuilt_roots();
mark_all_stack_roots();
- visit_all_objects();
+ do {
+ visit_all_objects();
+ stm_visit_old_weakrefs();
+ } while (gcptrlist_size(&objects_to_trace) != 0);
gcptrlist_delete(&objects_to_trace);
clean_up_lists_of_read_objects_and_fix_outdated_flags();
+ stm_clean_old_weakrefs();
mc_total_in_use = mc_total_reserved = 0;
free_all_unused_local_pages();
diff --git a/c4/gcpage.h b/c4/gcpage.h
--- a/c4/gcpage.h
+++ b/c4/gcpage.h
@@ -45,7 +45,8 @@
/* These fields are in tx_public_descriptor rather than tx_descriptor.
The indirection allows us to keep around the lists of pages even
- after the thread finishes, until the next major collection.
+ after the thread finishes. Such a "zombie" tx_public_descriptor
+ is reused by the next thread that starts.
*/
#define GCPAGE_FIELDS_DECL \
/* The array 'pages_for_size' contains GC_SMALL_REQUESTS \
@@ -65,7 +66,10 @@
/* A set of all non-small objects (outside the nursery). \
We could also have a single global set, but this avoids \
locking in stmgcpage_malloc/free. */ \
- struct G2L nonsmall_objects;
+ struct G2L nonsmall_objects; \
+ \
+ /* Weakref support */ \
+ struct GcPtrList old_weakrefs;
#define LOCAL_GCPAGES() (thread_descriptor->public_descriptor)
@@ -80,6 +84,7 @@
void stmgcpage_add_prebuilt_root(gcptr obj);
void stmgcpage_possibly_major_collect(int force);
long stmgcpage_count(int quantity);
+gcptr stmgcpage_visit(gcptr);
extern struct GcPtrList stm_prebuilt_gcroots;
diff --git a/c4/nursery.c b/c4/nursery.c
--- a/c4/nursery.c
+++ b/c4/nursery.c
@@ -1,7 +1,6 @@
#include "stmimpl.h"
-
-static int is_in_nursery(struct tx_descriptor *d, gcptr obj)
+int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj)
{
return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end);
}
@@ -54,6 +53,7 @@
gcptrlist_delete(&d->old_objects_to_trace);
gcptrlist_delete(&d->public_with_young_copy);
+ gcptrlist_delete(&d->young_weakrefs);
}
void stmgc_minor_collect_soon(void)
@@ -100,6 +100,13 @@
return P;
}
+gcptr stm_allocate_immutable(size_t size, unsigned long tid)
+{
+ gcptr P = stm_allocate(size, tid);
+ P->h_tid |= GCFLAG_IMMUTABLE;
+ return P;
+}
+
gcptr stmgc_duplicate(gcptr P)
{
size_t size = stmgc_size(P);
@@ -125,9 +132,6 @@
}
/************************************************************/
-/* list for private/protected, old roots that need to be
- kept in old_objects_to_trace */
-static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL};
static inline gcptr create_old_object_copy(gcptr obj)
{
@@ -150,7 +154,7 @@
gcptr fresh_old_copy;
struct tx_descriptor *d = thread_descriptor;
- if (!is_in_nursery(d, obj)) {
+ if (!stmgc_is_in_nursery(d, obj)) {
/* not a nursery object */
}
else {
@@ -207,22 +211,6 @@
(revision_t)END_MARKER_ON)) {
/* 'item' is a regular, non-null pointer */
visit_if_young(end);
- item = *end;
- /* if private or protected, this object needs to be
- traced again in the next minor_collect if it is
- currently in old_objects_to_trace. Because then
- it may be seen as write-ready in the view of
- someone:
- pw = write_barrier(); push_root(pw);
- minor_collect(); pw = pop_root(); // pw still write-ready
- */
- if (item
- && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in
- obj_to_trace*/
- && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED
- || item->h_revision == stm_private_rev_num)) {
- gcptrlist_insert(&private_or_protected_roots, item);
- }
}
else if (item != NULL) {
if (item == END_MARKER_OFF)
@@ -377,29 +365,12 @@
stmgc_trace(obj, &visit_if_young);
}
-
- while (gcptrlist_size(&private_or_protected_roots) > 0) {
- gcptr obj = gcptrlist_pop(&private_or_protected_roots);
- /* if it has the write_barrier flag, clear it so that
- it doesn't get inserted twice by a later write-barrier */
- if (obj->h_tid & GCFLAG_WRITE_BARRIER) {
- /* only insert those that were in old_obj_to_trace
- and that we didn't insert already */
- obj->h_tid &= ~GCFLAG_WRITE_BARRIER;
- gcptrlist_insert(&d->old_objects_to_trace, obj);
- dprintf(("re-add %p to old_objects_to_trace\n", obj));
- }
- }
}
static void fix_list_of_read_objects(struct tx_descriptor *d)
{
long i, limit = d->num_read_objects_known_old;
gcptr *items = d->list_of_read_objects.items;
-
- if (d->active < 0)
- return; // aborts anyway
-
assert(d->list_of_read_objects.size >= limit);
if (d->active == 2) {
@@ -410,7 +381,7 @@
for (i = d->list_of_read_objects.size - 1; i >= limit; --i) {
gcptr obj = items[i];
- if (!is_in_nursery(d, obj)) {
+ if (!stmgc_is_in_nursery(d, obj)) {
/* non-young or visited young objects are kept */
continue;
}
@@ -442,8 +413,9 @@
static void teardown_minor_collect(struct tx_descriptor *d)
{
- //assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
+ assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
assert(gcptrlist_size(&d->public_with_young_copy) == 0);
+ assert(gcptrlist_size(&d->young_weakrefs) == 0);
assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0);
spinlock_release(d->public_descriptor->collection_lock);
@@ -479,6 +451,8 @@
surviving young-but-outside-the-nursery objects have been flagged
with GCFLAG_OLD
*/
+ stm_move_young_weakrefs(d);
+
teardown_minor_collect(d);
assert(!stm_has_got_any_lock(d));
@@ -545,9 +519,9 @@
!g2l_any_entry(&d->young_objects_outside_nursery)*/ ) {
/* there is no young object */
assert(gcptrlist_size(&d->public_with_young_copy) == 0);
- assert(IMPLIES(d->active > 0,
- gcptrlist_size(&d->list_of_read_objects) >=
- d->num_read_objects_known_old));
+ assert(gcptrlist_size(&d->young_weakrefs) == 0);
+ assert(gcptrlist_size(&d->list_of_read_objects) >=
+ d->num_read_objects_known_old);
assert(gcptrlist_size(&d->private_from_protected) >=
d->num_private_from_protected_known_old);
d->num_read_objects_known_old =
diff --git a/c4/nursery.h b/c4/nursery.h
--- a/c4/nursery.h
+++ b/c4/nursery.h
@@ -50,7 +50,10 @@
still in the same transaction, to know that the initial \
part of the lists cannot contain young objects any more. */ \
long num_private_from_protected_known_old; \
- long num_read_objects_known_old;
+ long num_read_objects_known_old; \
+ \
+ /* Weakref support */ \
+ struct GcPtrList young_weakrefs;
struct tx_descriptor; /* from et.h */
@@ -64,5 +67,6 @@
size_t stmgc_size(gcptr);
void stmgc_trace(gcptr, void visit(gcptr *));
void stmgc_minor_collect_soon(void);
+int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj);
#endif
diff --git a/c4/steal.c b/c4/steal.c
--- a/c4/steal.c
+++ b/c4/steal.c
@@ -23,9 +23,56 @@
{
gcptr stub, obj = *pobj;
if (obj == NULL || (obj->h_tid & (GCFLAG_PUBLIC | GCFLAG_OLD)) ==
- (GCFLAG_PUBLIC | GCFLAG_OLD))
+ (GCFLAG_PUBLIC | GCFLAG_OLD))
return;
+ if (obj->h_tid & GCFLAG_IMMUTABLE) {
+ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
+ if (obj->h_tid & GCFLAG_PUBLIC) {
+ /* young public, replace with stolen old copy */
+ assert(obj->h_tid & GCFLAG_MOVED);
+ assert(IS_POINTER(obj->h_revision));
+ stub = (gcptr)obj->h_revision;
+ assert(!IS_POINTER(stub->h_revision)); /* not outdated */
+ goto done;
+ }
+
+ /* old or young protected! mark as PUBLIC */
+ if (!(obj->h_tid & GCFLAG_OLD)) {
+ /* young protected */
+ gcptr O;
+
+ if (obj->h_tid & GCFLAG_HAS_ID) {
+ /* use id-copy for us */
+ O = (gcptr)obj->h_original;
+ obj->h_tid &= ~GCFLAG_HAS_ID;
+ stm_copy_to_old_id_copy(obj, O);
+ O->h_original = 0;
+ } else {
+ O = stmgc_duplicate_old(obj);
+
+ /* young and without original? */
+ if (!(obj->h_original))
+ obj->h_original = (revision_t)O;
+ }
+ obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC);
+ obj->h_revision = (revision_t)O;
+
+ O->h_tid |= GCFLAG_PUBLIC;
+ /* here it is fine if it stays in read caches because
+ the object is immutable anyway and there are no
+ write_barriers allowed. */
+ dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O));
+ stub = O;
+ goto done;
+ }
+ /* old protected: */
+ dprintf(("prot immutable -> public: %p\n", obj));
+ obj->h_tid |= GCFLAG_PUBLIC;
+
+ return;
+ }
+
/* we use 'all_stubs', a dictionary, in order to try to avoid
duplicate stubs for the same object. XXX maybe it would be
better to use a fast approximative cache that stays around for
diff --git a/c4/stmgc.c b/c4/stmgc.c
--- a/c4/stmgc.c
+++ b/c4/stmgc.c
@@ -10,5 +10,6 @@
#include "gcpage.c"
#include "stmsync.c"
#include "extra.c"
+#include "weakref.c"
#include "dbgmem.c"
#include "fprintcolor.c"
diff --git a/c4/stmgc.h b/c4/stmgc.h
--- a/c4/stmgc.h
+++ b/c4/stmgc.h
@@ -29,6 +29,9 @@
/* allocate an object out of the local nursery */
gcptr stm_allocate(size_t size, unsigned long tid);
+/* allocate an object that is be immutable. it cannot be changed with
+ a stm_write_barrier() or after the next commit */
+gcptr stm_allocate_immutable(size_t size, unsigned long tid);
/* returns a never changing hash for the object */
revision_t stm_hash(gcptr);
@@ -54,11 +57,19 @@
int stm_enter_callback_call(void);
void stm_leave_callback_call(int);
-/* read/write barriers (the most general versions only for now) */
-#if 0 // (optimized version below)
-gcptr stm_read_barrier(gcptr);
-gcptr stm_write_barrier(gcptr);
-#endif
+/* read/write barriers (the most general versions only for now).
+
+ - the read barrier must be applied before reading from an object.
+ the result is valid as long as we're in the same transaction,
+ and stm_write_barrier() is not called on the same object.
+
+ - the write barrier must be applied before writing to an object.
+ the result is valid for a shorter period of time: we have to
+ do stm_write_barrier() again if we ended the transaction, or
+ if we did a potential collection (e.g. stm_allocate()).
+*/
+static inline gcptr stm_read_barrier(gcptr);
+static inline gcptr stm_write_barrier(gcptr);
/* start a new transaction, calls callback(), and when it returns
finish that transaction. callback() is called with the 'arg'
@@ -114,6 +125,14 @@
void stm_minor_collect(void);
void stm_major_collect(void);
+/* weakref support: allocate a weakref object, and set it to point
+ weakly to 'obj'. The weak pointer offset is hard-coded to be at
+ 'size - WORD'. Important: stmcb_trace() must NOT trace it.
+ Weakrefs are *immutable*! Don't attempt to use stm_write_barrier()
+ on them. */
+gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj);
+
+
/**************** END OF PUBLIC INTERFACE *****************/
/************************************************************/
diff --git a/c4/stmimpl.h b/c4/stmimpl.h
--- a/c4/stmimpl.h
+++ b/c4/stmimpl.h
@@ -36,5 +36,6 @@
#include "steal.h"
#include "stmsync.h"
#include "extra.h"
+#include "weakref.h"
#endif
diff --git a/c4/test/support.py b/c4/test/support.py
--- a/c4/test/support.py
+++ b/c4/test/support.py
@@ -11,11 +11,11 @@
header_files = [os.path.join(parent_dir, _n) for _n in
"et.h lists.h steal.h nursery.h gcpage.h "
- "stmsync.h extra.h dbgmem.h fprintcolor.h "
+ "stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h "
"stmgc.h stmimpl.h atomic_ops.h".split()]
source_files = [os.path.join(parent_dir, _n) for _n in
"et.c lists.c steal.c nursery.c gcpage.c "
- "stmsync.c extra.c dbgmem.c fprintcolor.c".split()]
+ "stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c".split()]
_pycache_ = os.path.join(parent_dir, 'test', '__pycache__')
if os.path.exists(_pycache_):
@@ -46,7 +46,7 @@
#define PREBUILT_FLAGS ...
#define PREBUILT_REVISION ...
- gcptr stm_allocate(size_t size, unsigned int tid);
+ gcptr stm_allocate(size_t size, unsigned long tid);
revision_t stm_hash(gcptr);
revision_t stm_id(gcptr);
_Bool stm_pointer_equal(gcptr, gcptr);
@@ -69,6 +69,7 @@
void stm_abort_info_pop(long count);
char *stm_inspect_abort_info(void);
void stm_abort_and_retry(void);
+ gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj);
/* extra non-public code */
void printfcolor(char *msg);
@@ -133,6 +134,7 @@
#define GCFLAG_STUB ...
#define GCFLAG_PRIVATE_FROM_PROTECTED ...
#define GCFLAG_HAS_ID ...
+ #define GCFLAG_IMMUTABLE ...
#define ABRT_MANUAL ...
typedef struct { ...; } page_header_t;
''')
@@ -164,14 +166,18 @@
gcptr rawgetptr(gcptr obj, long index)
{
- assert(gettid(obj) > 42142 + index);
+ revision_t t = gettid(obj);
+ if (t == 42142) t++;
+ assert(t > 42142 + index);
return ((gcptr *)(obj + 1))[index];
}
void rawsetptr(gcptr obj, long index, gcptr newvalue)
{
fprintf(stderr, "%p->[%ld] = %p\n", obj, index, newvalue);
- assert(gettid(obj) > 42142 + index);
+ revision_t t = gettid(obj);
+ if (t == 42142) t++;
+ assert(t > 42142 + index);
((gcptr *)(obj + 1))[index] = newvalue;
}
@@ -282,6 +288,8 @@
else {
int nrefs = gettid(obj) - 42142;
assert(nrefs < 100);
+ if (nrefs == 0) /* weakrefs */
+ nrefs = 1;
return sizeof(*obj) + nrefs * sizeof(gcptr);
}
}
@@ -484,7 +492,7 @@
def oalloc_refs(nrefs):
"""Allocate an 'old' protected object, outside any nursery,
with nrefs pointers"""
- size = HDR + WORD * nrefs
+ size = HDR + WORD * (nrefs or 1)
p = lib.stmgcpage_malloc(size)
lib.memset(p, 0, size)
p.h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER
@@ -506,9 +514,9 @@
def nalloc_refs(nrefs):
"Allocate a fresh object from the nursery, with nrefs pointers"
- p = lib.stm_allocate(HDR + WORD * nrefs, 42142 + nrefs)
+ p = lib.stm_allocate(HDR + WORD * (nrefs or 1), 42142 + nrefs)
assert p.h_revision == lib.get_private_rev_num()
- for i in range(nrefs):
+ for i in range(nrefs or 1):
assert rawgetptr(p, i) == ffi.NULL # must already be zero-filled
return p
@@ -524,9 +532,9 @@
def palloc_refs(nrefs, prehash=None):
"Get a ``prebuilt'' object with nrefs pointers."
if prehash is None:
- p = lib.pseudoprebuilt(HDR + WORD * nrefs, 42142 + nrefs)
+ p = lib.pseudoprebuilt(HDR + WORD * (nrefs or 1), 42142 + nrefs)
else:
- p = lib.pseudoprebuilt_with_hash(HDR + WORD * nrefs,
+ p = lib.pseudoprebuilt_with_hash(HDR + WORD * (nrefs or 1),
42142 + nrefs, prehash)
return p
@@ -686,5 +694,8 @@
should_break_transaction = lib.stm_should_break_transaction
-
+WEAKREF_SIZE = HDR + WORD
+WEAKREF_TID = 42142
+
+
nrb_protected = ffi.cast("gcptr", -1)
diff --git a/c4/test/test_et.py b/c4/test/test_et.py
--- a/c4/test/test_et.py
+++ b/c4/test/test_et.py
@@ -205,6 +205,8 @@
assert list_of_read_objects() == [p2]
def test_write_barrier_after_minor_collect():
+ # should fail
+ py.test.skip("should fail now")
p = oalloc_refs(1)
pw = lib.stm_write_barrier(p)
@@ -220,8 +222,10 @@
assert pw.h_tid & GCFLAG_OLD
rawsetptr(pw, 0, r)
- # pw needs to be readded to old_objects_to_trace
- # before the next minor gc in order for this test to pass
+ # pw not in old_objects_to_trace. A
+ # repeated write_barrier before
+ # rawsetptr() would fix that
+
lib.stm_push_root(r)
minor_collect()
minor_collect()
@@ -232,6 +236,10 @@
pr = lib.stm_read_barrier(p)
assert r != r2
+ # these will fail because pw/pr was
+ # not traced in the last minor_collect,
+ # because they were not registered in
+ # old_objects_to_trace.
assert getptr(pr, 0) != r
assert getptr(pr, 0) == r2
@@ -251,6 +259,7 @@
assert getptr(pr, 0) != q2
def test_write_barrier_after_minor_collect_young_to_old():
+ py.test.skip("should fail now")
p = nalloc_refs(1)
pw = lib.stm_write_barrier(p)
diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py
--- a/c4/test/test_nursery.py
+++ b/c4/test/test_nursery.py
@@ -200,6 +200,84 @@
check_not_free(p2)
assert classify(p2) == "private"
+def test_old_private_from_protected_to_young_private_2():
+ py.test.skip("not valid")
+ p0 = nalloc_refs(1)
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ lib.setptr(p0, 0, ffi.NULL)
+ assert classify(p0) == "private_from_protected"
+ assert lib.in_nursery(p0) # a young private_from_protected
+ #
+ lib.stm_push_root(p0)
+ minor_collect()
+ p0 = lib.stm_pop_root()
+ assert classify(p0) == "private_from_protected"
+ assert not lib.in_nursery(p0) # becomes an old private_from_protected
+ #
+ # Because it's a private_from_protected, its h_revision is a pointer
+ # to the backup copy, and not stm_private_rev_num. It means that the
+ # write barrier will always enter its slow path, even though the
+ # GCFLAG_WRITE_BARRIER is not set.
+ assert p0.h_revision != lib.get_private_rev_num()
+ assert not (p0.h_tid & GCFLAG_WRITE_BARRIER)
+ #
+ p1 = nalloc(HDR)
+ lib.setptr(p0, 0, p1) # should trigger the write barrier again
+ assert classify(p0) == "private_from_protected"
+ lib.stm_push_root(p0)
+ minor_collect()
+ p0b = lib.stm_pop_root()
+ assert p0b == p0
+ check_nursery_free(p1)
+ assert classify(p0) == "private_from_protected"
+ p2 = lib.getptr(p0, 0)
+ assert not lib.in_nursery(p2)
+ check_not_free(p2)
+ assert classify(p2) == "private"
+
+def test_old_private_from_protected_to_young_private_3():
+ p0 = palloc_refs(1)
+ pw = lib.stm_write_barrier(p0)
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ pr = lib.stm_read_barrier(p0)
+ assert classify(pr) == "protected"
+ assert lib.in_nursery(pr) # a young protected
+ #
+ minor_collect()
+ # each minor collect adds WRITE_BARRIER to protected/private
+ # objects it moves out of the nursery
+ pr = lib.stm_read_barrier(p0)
+ assert pr.h_tid & GCFLAG_WRITE_BARRIER
+ pw = lib.stm_write_barrier(pr)
+ # added to old_obj_to_trace
+ assert not (pw.h_tid & GCFLAG_WRITE_BARRIER)
+
+ lib.setptr(pw, 0, ffi.NULL)
+ assert classify(pw) == "private_from_protected"
+ assert not lib.in_nursery(pw)
+
+ assert pw.h_revision != lib.get_private_rev_num()
+ assert not (pw.h_tid & GCFLAG_WRITE_BARRIER)
+ # #
+
+ lib.stm_push_root(pw)
+ minor_collect()
+ p1 = nalloc(HDR)
+ pw = lib.stm_pop_root()
+ assert pw.h_tid & GCFLAG_WRITE_BARRIER
+ lib.setptr(pw, 0, p1) # should trigger the write barrier again
+ assert classify(pr) == "private_from_protected"
+ minor_collect()
+ check_nursery_free(p1)
+ pr = lib.stm_read_barrier(p0)
+ assert classify(pr) == "private_from_protected"
+ p2 = lib.getptr(pr, 0)
+ assert not lib.in_nursery(p2)
+ check_not_free(p2)
+ assert classify(p2) == "private"
+
def test_new_version():
p1 = oalloc(HDR)
assert lib.stm_write_barrier(p1) == p1
diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py
new file mode 100644
--- /dev/null
+++ b/c4/test/test_weakref.py
@@ -0,0 +1,120 @@
+import py
+from support import *
+
+
+class BaseTest(object):
+ def setup_method(self, meth):
+ lib.stm_clear_between_tests()
+ lib.stm_initialize_tests(0)
+ def teardown_method(self, meth):
+ lib.stm_finalize()
+
+
+class TestMinorCollection(BaseTest):
+
+ def test_weakref_invalidate(self):
+ p2 = nalloc(HDR)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE
+ assert p1.h_revision == lib.get_private_rev_num()
+ assert lib.rawgetptr(p1, 0) == p2
+ lib.stm_push_root(p1)
+ minor_collect()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == ffi.NULL
+
+ def test_weakref_itself_dies(self):
+ p2 = nalloc(HDR)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ minor_collect()
+
+ def test_weakref_keep(self):
+ p2 = nalloc(HDR)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE
+ assert p1.h_revision == lib.get_private_rev_num()
+ assert lib.rawgetptr(p1, 0) == p2
+ lib.stm_push_root(p1)
+ lib.stm_push_root(p2)
+ minor_collect()
+ p2 = lib.stm_pop_root()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2
+
+ def test_weakref_old_keep(self):
+ p2 = oalloc(HDR)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE
+ assert p1.h_revision == lib.get_private_rev_num()
+ assert lib.rawgetptr(p1, 0) == p2
+ lib.stm_push_root(p1)
+ lib.stm_push_root(p2)
+ minor_collect()
+ p2 = lib.stm_pop_root()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2
+
+
+class TestMajorCollection(BaseTest):
+
+ def test_weakref_old(self):
+ p2 = nalloc(HDR)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ #
+ lib.stm_push_root(p1)
+ lib.stm_push_root(p2)
+ major_collect()
+ p2 = lib.stm_pop_root()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2
+ #
+ lib.stm_push_root(p1)
+ major_collect()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == ffi.NULL
+
+ def test_weakref_to_prebuilt(self):
+ p2 = palloc(HDR)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ #
+ lib.stm_push_root(p1)
+ major_collect()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2
+
+ def test_weakref_update_version(self):
+ p2 = oalloc(HDR + WORD); make_public(p2)
+ p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2)
+ #
+ lib.stm_push_root(p1)
+ lib.stm_push_root(p2)
+ major_collect()
+ p2 = lib.stm_pop_root()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2
+ #
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ #
+ lib.setlong(p2, 0, 912809218) # write barrier
+ assert lib.rawgetlong(p2, 0) == 0
+ lib.stm_push_root(p1)
+ lib.stm_push_root(p2)
+ major_collect()
+ p2 = lib.stm_pop_root()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2
+ assert lib.rawgetlong(p2, 0) == 0
+ #
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ #
+ assert lib.rawgetlong(p2, 0) == 0
+ lib.stm_push_root(p1)
+ lib.stm_push_root(p2)
+ major_collect()
+ p2b = lib.stm_pop_root()
+ p1 = lib.stm_pop_root()
+ assert lib.rawgetptr(p1, 0) == p2b
+ assert p2b != p2
+ assert lib.getlong(p2b, 0) == 912809218
diff --git a/c4/weakref.c b/c4/weakref.c
new file mode 100644
--- /dev/null
+++ b/c4/weakref.c
@@ -0,0 +1,210 @@
+#include "stmimpl.h"
+
+#define WEAKREF_PTR(wr, sz) (*(gcptr *)(((char *)(wr)) + (sz) - WORD))
+
+
+gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj)
+{
+ stm_push_root(obj);
+ gcptr weakref = stm_allocate_immutable(size, tid);
+ obj = stm_pop_root();
+ assert(!(weakref->h_tid & GCFLAG_OLD)); /* 'size' too big? */
+ assert(stmgc_size(weakref) == size);
+ WEAKREF_PTR(weakref, size) = obj;
+ gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref);
+ dprintf(("alloc weakref %p -> %p\n", weakref, obj));
+ return weakref;
+}
+
+
+/***** Minor collection *****/
+
+void stm_move_young_weakrefs(struct tx_descriptor *d)
+{
+ /* The code relies on the fact that no weakref can be an old object
+ weakly pointing to a young object. Indeed, weakrefs are immutable
+ so they cannot point to an object that was created after it.
+ */
+ while (gcptrlist_size(&d->young_weakrefs) > 0) {
+ gcptr weakref = gcptrlist_pop(&d->young_weakrefs);
+ if (!(weakref->h_tid & GCFLAG_MOVED))
+ continue; /* the weakref itself dies */
+
+ weakref = (gcptr)weakref->h_revision;
+ size_t size = stmgc_size(weakref);
+ gcptr pointing_to = WEAKREF_PTR(weakref, size);
+ assert(pointing_to != NULL);
+
+ if (stmgc_is_in_nursery(d, pointing_to)) {
+ if (pointing_to->h_tid & GCFLAG_MOVED) {
+ dprintf(("weakref ptr moved %p->%p\n",
+ WEAKREF_PTR(weakref, size),
+ (gcptr)pointing_to->h_revision));
+ WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision;
+ }
+ else {
+ dprintf(("weakref lost ptr %p\n", WEAKREF_PTR(weakref, size)));
+ WEAKREF_PTR(weakref, size) = NULL;
+ continue; /* no need to remember this weakref any longer */
+ }
+ }
+ else {
+ /* # see test_weakref_to_prebuilt: it's not useful to put
+ # weakrefs into 'old_objects_with_weakrefs' if they point
+ # to a prebuilt object (they are immortal). If moreover
+ # the 'pointing_to' prebuilt object still has the
+ # GCFLAG_NO_HEAP_PTRS flag, then it's even wrong, because
+ # 'pointing_to' will not get the GCFLAG_VISITED during
+ # the next major collection. Solve this by not registering
+ # the weakref into 'old_objects_with_weakrefs'.
+ */
+ }
+ gcptrlist_insert(&d->public_descriptor->old_weakrefs, weakref);
+ }
+}
+
+
+/***** Major collection *****/
+
+static _Bool is_partially_visited(gcptr obj)
+{
+ /* Based on gcpage.c:visit(). Check the code here if we simplify
+ visit(). Returns True or False depending on whether we find any
+ version of 'obj' to be VISITED or not.
+ */
+ restart:
+ if (obj->h_tid & GCFLAG_VISITED)
+ return 1;
+
+ if (obj->h_revision & 1) {
+ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
+ assert(!(obj->h_tid & GCFLAG_STUB));
+ return 0;
+ }
+ else if (obj->h_tid & GCFLAG_PUBLIC) {
+ /* h_revision is a ptr: we have a more recent version */
+ if (!(obj->h_revision & 2)) {
+ /* go visit the more recent version */
+ obj = (gcptr)obj->h_revision;
+ }
+ else {
+ /* it's a stub */
+ assert(obj->h_tid & GCFLAG_STUB);
+ obj = (gcptr)(obj->h_revision - 2);
+ }
+ goto restart;
+ }
+ else {
+ assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED);
+ gcptr B = (gcptr)obj->h_revision;
+ assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY));
+ if (B->h_tid & GCFLAG_VISITED)
+ return 1;
+ assert(!(obj->h_tid & GCFLAG_STUB));
+ assert(!(B->h_tid & GCFLAG_STUB));
+
+ if (IS_POINTER(B->h_revision)) {
+ assert(B->h_tid & GCFLAG_PUBLIC);
+ assert(!(B->h_tid & GCFLAG_BACKUP_COPY));
+ assert(!(B->h_revision & 2));
+
+ obj = (gcptr)B->h_revision;
+ goto restart;
+ }
+ }
+ return 0;
+}
+
+static void visit_old_weakrefs(struct tx_public_descriptor *gcp)
+{
+ /* Note: it's possible that a weakref points to a public stub to a
+ protected object, and only the protected object was marked as
+ VISITED so far. In this case, this function needs to mark the
+ public stub as VISITED too.
+ */
+ long i, size = gcp->old_weakrefs.size;
+ gcptr *items = gcp->old_weakrefs.items;
+
+ for (i = 0; i < size; i++) {
+ gcptr weakref = items[i];
+
+ /* weakrefs are immutable: during a major collection, they
+ cannot be in the nursery, and so there should be only one
+ version of each weakref object. XXX relying on this is
+ a bit fragile, but simplifies things a lot... */
+ assert(weakref->h_revision & 1);
+
+ if (!(weakref->h_tid & GCFLAG_VISITED)) {
+ /* the weakref itself dies */
+ }
+ else {
+ size_t size = stmgc_size(weakref);
+ gcptr pointing_to = WEAKREF_PTR(weakref, size);
+ assert(pointing_to != NULL);
+ if (is_partially_visited(pointing_to)) {
+ pointing_to = stmgcpage_visit(pointing_to);
+ dprintf(("mweakref ptr moved %p->%p\n",
+ WEAKREF_PTR(weakref, size),
+ pointing_to));
+
+ assert(pointing_to->h_tid & GCFLAG_VISITED);
+ WEAKREF_PTR(weakref, size) = pointing_to;
+ }
+ else {
+ /* the weakref appears to be pointing to a dying object,
+ but we don't know for sure now. Clearing it is left
+ to clean_old_weakrefs(). */
+ }
+ }
+ }
+}
+
+static void clean_old_weakrefs(struct tx_public_descriptor *gcp)
+{
+ long i, size = gcp->old_weakrefs.size;
+ gcptr *items = gcp->old_weakrefs.items;
+
+ for (i = size - 1; i >= 0; i--) {
+ gcptr weakref = items[i];
+ assert(weakref->h_revision & 1);
+ if (weakref->h_tid & GCFLAG_VISITED) {
+ size_t size = stmgc_size(weakref);
+ gcptr pointing_to = WEAKREF_PTR(weakref, size);
+ if (pointing_to->h_tid & GCFLAG_VISITED) {
+ continue; /* the target stays alive, the weakref remains */
+ }
+ dprintf(("mweakref lost ptr %p\n", WEAKREF_PTR(weakref, size)));
+ WEAKREF_PTR(weakref, size) = NULL; /* the target dies */
+ }
+ /* remove this weakref from the list */
+ items[i] = items[--gcp->old_weakrefs.size];
+ }
+ gcptrlist_compress(&gcp->old_weakrefs);
+}
+
+static void for_each_public_descriptor(
+ void visit(struct tx_public_descriptor *)) {
+ struct tx_descriptor *d;
+ for (d = stm_tx_head; d; d = d->tx_next)
+ visit(d->public_descriptor);
+
+ struct tx_public_descriptor *gcp;
+ revision_t index = -1;
+ while ((gcp = stm_get_free_public_descriptor(&index)) != NULL)
+ visit(gcp);
+}
+
+void stm_visit_old_weakrefs(void)
+{
+ /* Figure out which weakrefs survive, which possibly
+ adds more objects to 'objects_to_trace'.
+ */
+ for_each_public_descriptor(visit_old_weakrefs);
+}
+
+void stm_clean_old_weakrefs(void)
+{
+ /* Clean up the non-surviving weakrefs
+ */
+ for_each_public_descriptor(clean_old_weakrefs);
+}
diff --git a/c4/weakref.h b/c4/weakref.h
new file mode 100644
--- /dev/null
+++ b/c4/weakref.h
@@ -0,0 +1,10 @@
+#ifndef _SRCSTM_WEAKREF_H
+#define _SRCSTM_WEAKREF_H
+
+
+void stm_move_young_weakrefs(struct tx_descriptor *);
+void stm_visit_old_weakrefs(void);
+void stm_clean_old_weakrefs(void);
+
+
+#endif
diff --git a/duhton/listobject.c b/duhton/listobject.c
--- a/duhton/listobject.c
+++ b/duhton/listobject.c
@@ -75,7 +75,7 @@
void _list_append(DuListObject *ob, DuObject *x)
{
- _du_write1(ob);
+ _du_read1(ob);
DuTupleObject *olditems = ob->ob_tuple;
_du_read1(olditems);
@@ -85,6 +85,8 @@
DuTupleObject *newitems = DuTuple_New(newcount);
_du_restore3(ob, x, olditems);
+ _du_write1(ob);
+
for (i=0; i<newcount-1; i++)
newitems->ob_items[i] = olditems->ob_items[i];
newitems->ob_items[newcount-1] = x;
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit