Author: Remi Meier <[email protected]>
Branch: finalizer-queues
Changeset: r1999:a59a745d2093
Date: 2016-11-16 16:48 +0100
http://bitbucket.org/pypy/stmgc/changeset/a59a745d2093/
Log: WIP: new finalizer API
oldstyle-finalizers should still work, but the new finalizer queues
are untested as of this commit.
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -1048,7 +1048,6 @@
assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0]));
assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
assert(list_is_empty(STM_PSEGMENT->young_objects_with_destructors));
- assert(STM_PSEGMENT->finalizers == NULL);
assert(STM_PSEGMENT->active_queues == NULL);
#ifndef NDEBUG
/* this should not be used when objects_pointing_to_nursery == NULL */
@@ -1207,8 +1206,6 @@
static void _core_commit_transaction(bool external)
{
- exec_local_finalizers();
-
assert(!_has_mutex());
assert(STM_PSEGMENT->safe_point == SP_RUNNING);
assert(STM_PSEGMENT->transaction_state != TS_NONE);
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -146,7 +146,7 @@
pthread_t running_pthread;
#endif
- /* light finalizers */
+ /* destructors */
struct list_s *young_objects_with_destructors;
struct list_s *old_objects_with_destructors;
diff --git a/c8/stm/finalizer.c b/c8/stm/finalizer.c
--- a/c8/stm/finalizer.c
+++ b/c8/stm/finalizer.c
@@ -2,6 +2,10 @@
# error "must be compiled via stmgc.c"
# include "core.h" // silence flymake
#endif
+#include "finalizer.h"
+#include "fprintcolor.h"
+#include "nursery.h"
+#include "gcpage.h"
/* callbacks */
void (*stmcb_destructor)(object_t *);
@@ -11,22 +15,51 @@
static void init_finalizers(struct finalizers_s *f)
{
f->objects_with_finalizers = list_create();
- f->count_non_young = 0;
- f->run_finalizers = NULL;
+ f->probably_young_objects_with_finalizers = list_create();
+ f->run_finalizers = list_create();
f->running_next = NULL;
}
static void setup_finalizer(void)
{
init_finalizers(&g_finalizers);
+
+ for (long j = 1; j < NB_SEGMENTS; j++) {
+ struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
+
+ assert(pseg->finalizers == NULL);
+ struct finalizers_s *f = malloc(sizeof(struct finalizers_s));
+ if (f == NULL)
+ stm_fatalerror("out of memory in create_finalizers"); /* XXX */
+ init_finalizers(f);
+ pseg->finalizers = f;
+ }
}
-static void teardown_finalizer(void)
+void stm_setup_finalizer_queues(int number, stm_finalizer_trigger_fn *triggers)
{
- if (g_finalizers.run_finalizers != NULL)
- list_free(g_finalizers.run_finalizers);
- list_free(g_finalizers.objects_with_finalizers);
+ assert(g_finalizer_triggers.count == 0);
+ assert(g_finalizer_triggers.triggers == NULL);
+
+ g_finalizer_triggers.count = number;
+ g_finalizer_triggers.triggers = (stm_finalizer_trigger_fn *)
+ malloc(number * sizeof(stm_finalizer_trigger_fn));
+
+ for (int qindex = 0; qindex < number; qindex++) {
+ g_finalizer_triggers.triggers[qindex] = triggers[qindex];
+ dprintf(("setup_finalizer_queue(qindex=%d,fun=%p)\n", qindex,
triggers[qindex]));
+ }
+}
+
+static void teardown_finalizer(void) {
+ LIST_FREE(g_finalizers.run_finalizers);
+ LIST_FREE(g_finalizers.objects_with_finalizers);
+ LIST_FREE(g_finalizers.probably_young_objects_with_finalizers);
memset(&g_finalizers, 0, sizeof(g_finalizers));
+
+ if (g_finalizer_triggers.triggers)
+ free(g_finalizer_triggers.triggers);
+ memset(&g_finalizer_triggers, 0, sizeof(g_finalizer_triggers));
}
static void _commit_finalizers(void)
@@ -36,7 +69,7 @@
stm_spin_loop();
}
- if (STM_PSEGMENT->finalizers->run_finalizers != NULL) {
+ if (!list_is_empty(STM_PSEGMENT->finalizers->run_finalizers)) {
/* copy 'STM_PSEGMENT->finalizers->run_finalizers' into
'g_finalizers.run_finalizers', dropping any initial NULLs
(finalizers already called) */
@@ -48,24 +81,24 @@
*STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1;
}
if (frm < list_count(src)) {
- if (g_finalizers.run_finalizers == NULL)
- g_finalizers.run_finalizers = list_create();
g_finalizers.run_finalizers = list_extend(
g_finalizers.run_finalizers,
src, frm);
}
- list_free(src);
}
+ LIST_FREE(STM_PSEGMENT->finalizers->run_finalizers);
/* copy the whole 'STM_PSEGMENT->finalizers->objects_with_finalizers'
into 'g_finalizers.objects_with_finalizers' */
g_finalizers.objects_with_finalizers = list_extend(
g_finalizers.objects_with_finalizers,
STM_PSEGMENT->finalizers->objects_with_finalizers, 0);
- list_free(STM_PSEGMENT->finalizers->objects_with_finalizers);
+
assert(list_is_empty(STM_PSEGMENT->finalizers->probably_young_objects_with_finalizers));
+ LIST_FREE(STM_PSEGMENT->finalizers->objects_with_finalizers);
+
LIST_FREE(STM_PSEGMENT->finalizers->probably_young_objects_with_finalizers);
- free(STM_PSEGMENT->finalizers);
- STM_PSEGMENT->finalizers = NULL;
+ // re-init
+ init_finalizers(STM_PSEGMENT->finalizers);
__sync_lock_release(&g_finalizers.lock);
}
@@ -74,17 +107,14 @@
{
/* like _commit_finalizers(), but forget everything from the
current transaction */
- if (pseg->finalizers != NULL) {
- if (pseg->finalizers->run_finalizers != NULL) {
- if (pseg->finalizers->running_next != NULL) {
- *pseg->finalizers->running_next = (uintptr_t)-1;
- }
- list_free(pseg->finalizers->run_finalizers);
- }
- list_free(pseg->finalizers->objects_with_finalizers);
- free(pseg->finalizers);
- pseg->finalizers = NULL;
+ if (pseg->finalizers->running_next != NULL) {
+ *pseg->finalizers->running_next = (uintptr_t)-1;
}
+ LIST_FREE(pseg->finalizers->run_finalizers);
+ LIST_FREE(pseg->finalizers->objects_with_finalizers);
+ LIST_FREE(pseg->finalizers->probably_young_objects_with_finalizers);
+ // re-init
+ init_finalizers(pseg->finalizers);
/* call the light finalizers for objects that are about to
be forgotten from the current transaction */
@@ -139,31 +169,29 @@
}
}
-object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up)
+
+void stm_enable_finalizer(int queue_index, object_t *obj)
{
- object_t *obj = _stm_allocate_external(size_rounded_up);
-
- if (STM_PSEGMENT->finalizers == NULL) {
- struct finalizers_s *f = malloc(sizeof(struct finalizers_s));
- if (f == NULL)
- stm_fatalerror("out of memory in create_finalizers"); /* XXX */
- init_finalizers(f);
- STM_PSEGMENT->finalizers = f;
+ if (_is_young(obj)) {
+
LIST_APPEND(STM_PSEGMENT->finalizers->probably_young_objects_with_finalizers,
obj);
+
LIST_APPEND(STM_PSEGMENT->finalizers->probably_young_objects_with_finalizers,
queue_index);
}
- assert(STM_PSEGMENT->finalizers->count_non_young
- <= list_count(STM_PSEGMENT->finalizers->objects_with_finalizers));
- LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj);
- return obj;
+ else {
+ assert(_is_from_same_transaction(obj));
+ LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj);
+ LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers,
queue_index);
+ }
}
+
/************************************************************/
-/* Light finalizers
+/* Destructors
*/
-static void deal_with_young_objects_with_finalizers(void)
+static void deal_with_young_objects_with_destructors(void)
{
- /* for light finalizers: executes finalizers for objs that don't survive
+ /* for destructors: executes destructors for objs that don't survive
this minor gc */
struct list_s *lst = STM_PSEGMENT->young_objects_with_destructors;
long i, count = list_count(lst);
@@ -185,9 +213,9 @@
list_clear(lst);
}
-static void deal_with_old_objects_with_finalizers(void)
+static void deal_with_old_objects_with_destructors(void)
{
- /* for light finalizers */
+ /* for destructors */
int old_gs_register = STM_SEGMENT->segment_num;
int current_gs_register = old_gs_register;
long j;
@@ -230,6 +258,7 @@
}
+
/************************************************************/
/* Algorithm for regular (non-light) finalizers.
Follows closely pypy/doc/discussion/finalizer-order.rst
@@ -331,17 +360,19 @@
struct list_s *lst = f->objects_with_finalizers;
long i, count = list_count(lst);
lst->count = 0;
- f->count_non_young = 0;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count; i += 2) {
object_t *x = (object_t *)list_item(lst, i);
+ uintptr_t qindex = list_item(lst, i + 1);
assert(_finalization_state(x) != 1);
if (_finalization_state(x) >= 2) {
list_set_item(lst, lst->count++, (uintptr_t)x);
+ list_set_item(lst, lst->count++, qindex);
continue;
}
LIST_APPEND(marked, x);
+ LIST_APPEND(marked, qindex);
struct list_s *pending = _finalizer_pending;
LIST_APPEND(pending, x);
@@ -373,27 +404,29 @@
struct list_s *run_finalizers = f->run_finalizers;
long i, count = list_count(marked);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count; i += 2) {
object_t *x = (object_t *)list_item(marked, i);
+ uintptr_t qindex = list_item(marked, i + 1);
int state = _finalization_state(x);
assert(state >= 2);
if (state == 2) {
- if (run_finalizers == NULL)
- run_finalizers = list_create();
LIST_APPEND(run_finalizers, x);
+ LIST_APPEND(run_finalizers, qindex);
_recursively_bump_finalization_state_from_2_to_3(pseg, x);
}
else {
struct list_s *lst = f->objects_with_finalizers;
list_set_item(lst, lst->count++, (uintptr_t)x);
+ list_set_item(lst, lst->count++, qindex);
}
}
- list_free(marked);
+ LIST_FREE(marked);
f->run_finalizers = run_finalizers;
}
+
static void deal_with_objects_with_finalizers(void)
{
/* for non-light finalizers */
@@ -436,11 +469,10 @@
static void mark_visit_from_finalizer1(
struct stm_priv_segment_info_s *pseg, struct finalizers_s *f)
{
- if (f != NULL && f->run_finalizers != NULL) {
- LIST_FOREACH_R(f->run_finalizers, object_t * /*item*/,
- ({
- mark_visit_possibly_overflow_object(item, pseg);
- }));
+ long i, count = list_count(f->run_finalizers);
+ for (i = 0; i < count; i += 2) {
+ object_t *x = (object_t *)list_item(f->run_finalizers, i);
+ mark_visit_possibly_overflow_object(x, pseg);
}
}
@@ -454,40 +486,6 @@
mark_visit_from_finalizer1(get_priv_segment(0), &g_finalizers);
}
-static void _execute_finalizers(struct finalizers_s *f)
-{
- if (f->run_finalizers == NULL)
- return; /* nothing to do */
-
- restart:
- if (f->running_next != NULL)
- return; /* in a nested invocation of execute_finalizers() */
-
- uintptr_t next = 0, total = list_count(f->run_finalizers);
- f->running_next = &next;
-
- while (next < total) {
- object_t *obj = (object_t *)list_item(f->run_finalizers, next);
- list_set_item(f->run_finalizers, next, 0);
- next++;
-
- stmcb_finalizer(obj);
- }
- if (next == (uintptr_t)-1) {
- /* transaction committed: the whole 'f' was freed */
- return;
- }
- f->running_next = NULL;
-
- if (f->run_finalizers->count > total) {
- memmove(f->run_finalizers->items,
- f->run_finalizers->items + total,
- (f->run_finalizers->count - total) * sizeof(uintptr_t));
- goto restart;
- }
-
- LIST_FREE(f->run_finalizers);
-}
/* XXX: according to translator.backendopt.finalizer, getfield_gc
for primitive types is a safe op in light finalizers.
@@ -495,43 +493,107 @@
getfield on *dying obj*).
*/
+
static void _invoke_general_finalizers(stm_thread_local_t *tl)
{
- /* called between transactions */
- rewind_jmp_buf rjbuf;
- stm_rewind_jmp_enterframe(tl, &rjbuf);
- _stm_start_transaction(tl);
- /* XXX: become inevitable, bc. otherwise, we would need to keep
- around the original g_finalizers.run_finalizers to restore it
- in case of an abort. */
- _stm_become_inevitable(MSG_INEV_DONT_SLEEP);
- /* did it work? */
- if (STM_PSEGMENT->transaction_state != TS_INEVITABLE) { /* no */
- /* avoid blocking here, waiting for another INEV transaction.
- If we did that, application code could not proceed (start the
- next transaction) and it will not be obvious from the profile
- why we were WAITing. */
- _stm_commit_transaction();
- stm_rewind_jmp_leaveframe(tl, &rjbuf);
+ /* called between transactions
+ * run old-style finalizers (q_index=-1) and run triggers for all finalizer
+ * queues that are not empty. */
+ dprintf(("invoke_general_finalizers %lu\n",
list_count(g_finalizers.run_finalizers)));
+ if (list_is_empty(g_finalizers.run_finalizers))
return;
- }
+
+ bool *to_trigger = calloc(g_finalizer_triggers.count + 1, sizeof(bool));
while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) {
/* somebody is adding more finalizers (_commit_finalizer()) */
stm_spin_loop();
}
- struct finalizers_s copy = g_finalizers;
- assert(copy.running_next == NULL);
- g_finalizers.run_finalizers = NULL;
- /* others may add to g_finalizers again: */
+
+ int count = list_count(g_finalizers.run_finalizers);
+ for (int i = 0; i < count; i += 2) {
+ int qindex = (int)list_item(g_finalizers.run_finalizers, i + 1);
+ to_trigger[qindex + 1] = true; // also for -1!
+ }
+
__sync_lock_release(&g_finalizers.lock);
- if (copy.run_finalizers != NULL) {
- _execute_finalizers(©);
+ // trigger now:
+ for (int i = 1; i < g_finalizer_triggers.count; i++) {
+ if (to_trigger[i]) {
+ dprintf(("invoke-finalizer-trigger(qindex=%d)\n", i));
+ // XXX: check that triggers *really* cannot touch GC-memory,
+ // otherwise, this needs to run in an (inevitable) transaction
+ g_finalizer_triggers.triggers[i - 1]();
+ }
+ }
+
+ if (!to_trigger[0]) {
+ // 0 is the qindex for old-style finalizers,
+ // so nothing todo here.
+ free(to_trigger);
+ return;
+ }
+ free(to_trigger);
+
+ // run old-style finalizers:
+ dprintf(("invoke-oldstyle-finalizers\n"));
+ rewind_jmp_buf rjbuf;
+ stm_rewind_jmp_enterframe(tl, &rjbuf);
+ _stm_start_transaction(tl);
+
+ object_t *obj;
+ while ((obj = stm_next_to_finalize(-1)) != NULL) {
+ assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE);
+ stmcb_finalizer(obj);
}
_stm_commit_transaction();
stm_rewind_jmp_leaveframe(tl, &rjbuf);
+}
- LIST_FREE(copy.run_finalizers);
+object_t* stm_next_to_finalize(int queue_index) {
+ while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) {
+ /* somebody is adding more finalizers (_commit_finalizer()) */
+ stm_spin_loop();
+ }
+
+ int count = list_count(g_finalizers.run_finalizers);
+ for (int i = 0; i < count; i += 2) {
+ int qindex = (int)list_item(g_finalizers.run_finalizers, i + 1);
+ if (qindex == queue_index) {
+ /* XXX: become inevitable, bc. otherwise, we would need to keep
+ around the original g_finalizers.run_finalizers to restore it
+ in case of an abort. */
+ if (STM_PSEGMENT->transaction_state != TS_INEVITABLE) {
+ _stm_become_inevitable(MSG_INEV_DONT_SLEEP);
+ /* did it work? */
+ if (STM_PSEGMENT->transaction_state != TS_INEVITABLE) { /*
no */
+ /* avoid blocking here, waiting for another INEV
transaction.
+ If we did that, application code could not proceed
(start the
+ next transaction) and it will not be obvious from the
profile
+ why we were WAITing. */
+ __sync_lock_release(&g_finalizers.lock);
+ return NULL;
+ }
+ }
+
+ /* Remove obj from list and return it. */
+ object_t *obj = (object_t*)list_item(g_finalizers.run_finalizers,
i);
+ if (i < count - 2) {
+ memmove(&g_finalizers.run_finalizers->items[i],
+ &g_finalizers.run_finalizers->items[i + 2],
+ (count - 2) * sizeof(uintptr_t));
+ }
+ g_finalizers.run_finalizers->count -= 2;
+
+ __sync_lock_release(&g_finalizers.lock);
+ return obj;
+ }
+ }
+
+ /* others may add to g_finalizers again: */
+ __sync_lock_release(&g_finalizers.lock);
+
+ return NULL;
}
diff --git a/c8/stm/finalizer.h b/c8/stm/finalizer.h
--- a/c8/stm/finalizer.h
+++ b/c8/stm/finalizer.h
@@ -1,17 +1,20 @@
+#ifndef _STM_FINALIZER_H_
+#define _STM_FINALIZER_H_
+
#include <stdint.h>
/* see deal_with_objects_with_finalizers() for explanation of these fields */
struct finalizers_s {
long lock;
struct list_s *objects_with_finalizers;
- uintptr_t count_non_young;
+ struct list_s *probably_young_objects_with_finalizers;
struct list_s *run_finalizers;
uintptr_t *running_next;
};
static void mark_visit_from_finalizer_pending(void);
-static void deal_with_young_objects_with_finalizers(void);
-static void deal_with_old_objects_with_finalizers(void);
+static void deal_with_young_objects_with_destructors(void);
+static void deal_with_old_objects_with_destructors(void);
static void deal_with_objects_with_finalizers(void);
static void setup_finalizer(void);
@@ -28,19 +31,16 @@
/* regular finalizers (objs from already-committed transactions) */
static struct finalizers_s g_finalizers;
+static struct {
+ int count;
+ stm_finalizer_trigger_fn *triggers;
+} g_finalizer_triggers;
+
static void _invoke_general_finalizers(stm_thread_local_t *tl);
#define invoke_general_finalizers(tl) do { \
- if (g_finalizers.run_finalizers != NULL) \
- _invoke_general_finalizers(tl); \
+ _invoke_general_finalizers(tl); \
} while (0)
-static void _execute_finalizers(struct finalizers_s *f);
-
-#define any_local_finalizers() (STM_PSEGMENT->finalizers != NULL && \
- STM_PSEGMENT->finalizers->run_finalizers !=
NULL)
-#define exec_local_finalizers() do { \
- if (any_local_finalizers()) \
- _execute_finalizers(STM_PSEGMENT->finalizers); \
-} while (0)
+#endif
diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c
--- a/c8/stm/gcpage.c
+++ b/c8/stm/gcpage.c
@@ -213,7 +213,6 @@
}
s_mutex_unlock();
- exec_local_finalizers();
}
@@ -807,9 +806,9 @@
LIST_FREE(marked_objects_to_trace);
- /* weakrefs and execute old light finalizers */
+ /* weakrefs and execute old destructors */
stm_visit_old_weakrefs();
- deal_with_old_objects_with_finalizers();
+ deal_with_old_objects_with_destructors();
/* cleanup */
clean_up_segment_lists();
diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h
--- a/c8/stm/gcpage.h
+++ b/c8/stm/gcpage.h
@@ -1,3 +1,7 @@
+#ifndef _STM_GCPAGE_H_
+#define _STM_GCPAGE_H_
+
+#include <stdbool.h>
/* Granularity when grabbing more unused pages: take 20 at a time */
#define GCPAGE_NUM_PAGES 20
@@ -22,3 +26,9 @@
static void major_collection_with_mutex(void);
static bool largemalloc_keep_object_at(char *data); /* for largemalloc.c */
static bool smallmalloc_keep_object_at(char *data); /* for smallmalloc.c */
+
+static inline bool mark_visited_test(object_t *obj);
+static bool is_overflow_obj_safe(struct stm_priv_segment_info_s *pseg,
object_t *obj);
+static void mark_visit_possibly_overflow_object(object_t *obj, struct
stm_priv_segment_info_s *pseg);
+
+#endif
diff --git a/c8/stm/list.h b/c8/stm/list.h
--- a/c8/stm/list.h
+++ b/c8/stm/list.h
@@ -17,13 +17,13 @@
static struct list_s *list_create(void) __attribute__((unused));
-static inline void list_free(struct list_s *lst)
+static inline void _list_free(struct list_s *lst)
{
free(lst);
}
#define LIST_CREATE(lst) ((lst) = list_create())
-#define LIST_FREE(lst) (list_free(lst), (lst) = NULL)
+#define LIST_FREE(lst) (_list_free(lst), (lst) = NULL)
static struct list_s *_list_grow(struct list_s *, uintptr_t);
diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c
--- a/c8/stm/nursery.c
+++ b/c8/stm/nursery.c
@@ -3,6 +3,8 @@
# include "core.h" // silence flymake
#endif
+#include "finalizer.h"
+
/************************************************************/
#define NURSERY_START (FIRST_NURSERY_PAGE * 4096UL)
@@ -56,7 +58,6 @@
/************************************************************/
static object_t *find_existing_shadow(object_t *obj);
-#define GCWORD_MOVED ((object_t *) -1)
#define FLAG_SYNC_LARGE 0x01
@@ -441,23 +442,26 @@
}
}
-static void collect_objs_still_young_but_with_finalizers(void)
+static void collect_young_objects_with_finalizers(void)
{
- struct list_s *lst = STM_PSEGMENT->finalizers->objects_with_finalizers;
- uintptr_t i, total = list_count(lst);
+ /* for real finalizers: in a minor collection, all young objs must
survive! */
- for (i = STM_PSEGMENT->finalizers->count_non_young; i < total; i++) {
+ struct list_s *lst =
STM_PSEGMENT->finalizers->probably_young_objects_with_finalizers;
+ long i, count = list_count(lst);
+ for (i = 0; i < count; i += 2) {
+ object_t *obj = (object_t *)list_item(lst, i);
+ uintptr_t qindex = list_item(lst, i + 1);
- object_t *o = (object_t *)list_item(lst, i);
- minor_trace_if_young(&o);
+ minor_trace_if_young(&obj);
- /* was not actually movable */
- assert(o == (object_t *)list_item(lst, i));
+ LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj);
+ LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, qindex);
}
- STM_PSEGMENT->finalizers->count_non_young = total;
+ list_clear(lst);
}
+
static void throw_away_nursery(struct stm_priv_segment_info_s *pseg)
{
#pragma push_macro("STM_PSEGMENT")
@@ -555,8 +559,7 @@
collect_roots_in_nursery();
- if (STM_PSEGMENT->finalizers != NULL)
- collect_objs_still_young_but_with_finalizers();
+ collect_young_objects_with_finalizers();
if (STM_PSEGMENT->active_queues != NULL)
collect_active_queues();
@@ -568,7 +571,7 @@
acquire_privatization_lock(STM_SEGMENT->segment_num);
stm_move_young_weakrefs();
release_privatization_lock(STM_SEGMENT->segment_num);
- deal_with_young_objects_with_finalizers();
+ deal_with_young_objects_with_destructors();
assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery));
diff --git a/c8/stm/nursery.h b/c8/stm/nursery.h
--- a/c8/stm/nursery.h
+++ b/c8/stm/nursery.h
@@ -1,3 +1,8 @@
+#ifndef _STM_NURSERY_H_
+#define _STM_NURSERY_H_
+
+#include <stdint.h>
+#include <stdbool.h>
#define NSE_SIGPAUSE _STM_NSE_SIGNAL_MAX
#define NSE_SIGABORT _STM_NSE_SIGNAL_ABORT
@@ -44,3 +49,11 @@
#define must_abort() is_abort(STM_SEGMENT->nursery_end)
static object_t *find_shadow(object_t *obj);
+
+
+#define GCWORD_MOVED ((object_t *) -1)
+static inline bool _is_young(object_t *obj);
+static inline struct object_s *mark_loc(object_t *obj);
+static inline bool _is_from_same_transaction(object_t *obj);
+
+#endif
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -147,19 +147,19 @@
for (i = 0; i < NB_SEGMENTS; i++) {
struct stm_priv_segment_info_s *pr = get_priv_segment(i);
assert(list_is_empty(pr->objects_pointing_to_nursery));
- list_free(pr->objects_pointing_to_nursery);
- list_free(pr->old_objects_with_cards_set);
- list_free(pr->modified_old_objects);
+ LIST_FREE(pr->objects_pointing_to_nursery);
+ LIST_FREE(pr->old_objects_with_cards_set);
+ LIST_FREE(pr->modified_old_objects);
assert(list_is_empty(pr->large_overflow_objects));
- list_free(pr->large_overflow_objects);
- list_free(pr->young_weakrefs);
- list_free(pr->old_weakrefs);
+ LIST_FREE(pr->large_overflow_objects);
+ LIST_FREE(pr->young_weakrefs);
+ LIST_FREE(pr->old_weakrefs);
tree_free(pr->young_outside_nursery);
tree_free(pr->nursery_objects_shadows);
tree_free(pr->callbacks_on_commit_and_abort[0]);
tree_free(pr->callbacks_on_commit_and_abort[1]);
- list_free(pr->young_objects_with_destructors);
- list_free(pr->old_objects_with_destructors);
+ LIST_FREE(pr->young_objects_with_destructors);
+ LIST_FREE(pr->old_objects_with_destructors);
if (pr->active_queues) tree_free(pr->active_queues);
}
diff --git a/c8/stm/sync.c b/c8/stm/sync.c
--- a/c8/stm/sync.c
+++ b/c8/stm/sync.c
@@ -311,7 +311,6 @@
assert(_stm_in_transaction(tl));
ensure_gs_register(tl->last_associated_segment_num);
assert(STM_SEGMENT->running_thread == tl);
- exec_local_finalizers();
}
void _stm_test_switch_segment(int segnum)
diff --git a/c8/stmgc.h b/c8/stmgc.h
--- a/c8/stmgc.h
+++ b/c8/stmgc.h
@@ -716,7 +716,7 @@
extern void (*stmcb_destructor)(object_t *);
void stm_enable_destructor(object_t *);
-/* Support for regular finalizers. Unreachable objects with
+/* XXX: Support for regular finalizers. Unreachable objects with
finalizers are kept alive, as well as everything they point to, and
stmcb_finalizer() is called after the major GC. If there are
several objects with finalizers that reference each other in a
@@ -730,8 +730,14 @@
transaction. For older objects, the finalizer is called from a
random thread between regular transactions, in a new custom
transaction. */
-extern void (*stmcb_finalizer)(object_t *);
-object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up);
+typedef void (*stm_finalizer_trigger_fn)(void);
+void (*stmcb_finalizer)(object_t *);
+void stm_setup_finalizer_queues(int number, stm_finalizer_trigger_fn
*triggers);
+void stm_enable_finalizer(int queue_index, object_t *obj);
+
+/* Returns the next object that supposedly died and should have its finalizer
+ called. XXX: This function turns the transaction inevitable. */
+object_t *stm_next_to_finalize(int queue_index);
/* dummies for now: */
diff --git a/c8/test/support.py b/c8/test/support.py
--- a/c8/test/support.py
+++ b/c8/test/support.py
@@ -52,7 +52,6 @@
/*void stm_write(object_t *obj); use _checked_stm_write() instead */
object_t *stm_allocate(ssize_t size_rounded_up);
object_t *stm_allocate_weakref(ssize_t size_rounded_up);
-object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up);
object_t *stm_allocate_noconflict(ssize_t size_rounded_up);
/*void stm_write_card(); use _checked_stm_write_card() instead */
@@ -209,7 +208,11 @@
void (*stmcb_destructor)(object_t *);
void stm_enable_destructor(object_t *);
+typedef void (*stm_finalizer_trigger_fn)(void);
void (*stmcb_finalizer)(object_t *);
+void stm_setup_finalizer_queues(int number, stm_finalizer_trigger_fn
*triggers);
+void stm_enable_finalizer(int queue_index, object_t *obj);
+object_t *stm_next_to_finalize(int queue_index);
typedef struct stm_hashtable_s stm_hashtable_t;
typedef ... stm_hashtable_entry_t;
@@ -748,15 +751,19 @@
return o
def stm_allocate_with_finalizer(size):
- o = lib.stm_allocate_with_finalizer(size)
+ # OLD-Style finalizers!
+ o = lib.stm_allocate(size)
tid = 42 + size
lib._set_type_id(o, tid)
+ lib.stm_enable_finalizer(-1, o)
return o
def stm_allocate_with_finalizer_refs(n):
- o = lib.stm_allocate_with_finalizer(HDR + n * WORD)
+ # OLD-Style finalizers!
+ o = lib.stm_allocate(HDR + n * WORD)
tid = 421420 + n
lib._set_type_id(o, tid)
+ lib.stm_enable_finalizer(-1, o)
return o
SIZEOF_HASHTABLE_OBJ = 16 + lib.SIZEOF_MYOBJ
diff --git a/c8/test/test_finalizer.py b/c8/test/test_finalizer.py
--- a/c8/test/test_finalizer.py
+++ b/c8/test/test_finalizer.py
@@ -7,6 +7,14 @@
def setup_method(self, meth):
BaseTest.setup_method(self, meth)
#
+ @ffi.callback("stm_finalizer_trigger_fn")
+ def trigger():
+ # triggers not needed for destructors
+ assert False
+ triggers = ffi.new("stm_finalizer_trigger_fn*", trigger)
+ lib.stm_setup_finalizer_queues(1, triggers)
+ triggers = None
+ #
@ffi.callback("void(object_t *)")
def destructor(obj):
assert stm_get_obj_size(obj) == 48
@@ -140,13 +148,20 @@
-class TestRegularFinalizer(BaseTest):
+class TestOldStyleRegularFinalizer(BaseTest):
expect_content_character = None
run_major_collect_in_finalizer = False
def setup_method(self, meth):
BaseTest.setup_method(self, meth)
#
+ @ffi.callback("stm_finalizer_trigger_fn")
+ def trigger():
+ # triggers not needed for oldstyle-finalizer tests
+ assert False
+ triggers = ffi.new("stm_finalizer_trigger_fn*", trigger)
+ lib.stm_setup_finalizer_queues(1, triggers)
+ #
@ffi.callback("void(object_t *)")
def finalizer(obj):
print "finalizing!", obj
@@ -161,19 +176,24 @@
self._finalizer_keepalive = finalizer
def expect_finalized(self, objs):
- assert self.finalizers_called == objs
+ if isinstance(objs, int):
+ assert len(self.finalizers_called) == objs
+ else:
+ assert self.finalizers_called == objs
self.finalizers_called = []
def test_no_finalizer(self):
self.start_transaction()
lp1 = stm_allocate(48)
stm_major_collect()
+ self.commit_transaction()
self.expect_finalized([])
def test_no_finalizer_in_minor_collection(self):
self.start_transaction()
lp1 = stm_allocate_with_finalizer(48)
stm_minor_collect()
+ self.commit_transaction()
self.expect_finalized([])
def test_finalizer_in_major_collection(self):
@@ -182,10 +202,17 @@
lp1 = stm_allocate_with_finalizer(48)
lp2 = stm_allocate_with_finalizer(48)
lp3 = stm_allocate_with_finalizer(48)
+ self.expect_finalized([])
+ self.push_roots([lp1, lp2, lp3])
+ self.commit_transaction() # move finalizer-objs to global queue
+ self.start_transaction()
+ lp1, lp2, lp3 = self.pop_roots()
print repeat, lp1, lp2, lp3
- self.expect_finalized([])
stm_major_collect()
+ self.commit_transaction() # invoke finalizers
self.expect_finalized([lp1, lp2, lp3])
+ self.start_transaction()
+ self.commit_transaction()
def test_finalizer_from_other_thread(self):
self.start_transaction()
@@ -200,6 +227,7 @@
self.expect_finalized([]) # marked as dead, but wrong thread
#
self.switch(0)
+ py.test.xfail("we don't finalize in the same transaction anymore.")
self.expect_finalized([lp1]) # now it has been finalized
def test_finalizer_ordering(self):
@@ -210,7 +238,14 @@
print lp1, lp2, lp3
stm_set_ref(lp3, 0, lp1)
stm_set_ref(lp1, 0, lp2)
+
+ self.push_roots([lp1, lp2, lp3])
+ self.commit_transaction() # move finalizer-objs to global queue
+ self.start_transaction()
+ lp1, lp2, lp3 = self.pop_roots()
+
stm_major_collect()
+ self.commit_transaction() # invoke finalizers
self.expect_finalized([lp3])
def test_finalizer_extra_transaction(self):
@@ -222,16 +257,16 @@
self.start_transaction()
lp1b = self.pop_root()
- assert lp1b == lp1
+ # assert lp1b == lp1 <- lp1 can be in nursery now
self.expect_finalized([])
- self.commit_transaction()
+ self.commit_transaction() # finalizer-obj moved to global queue
self.expect_finalized([])
self.start_transaction()
stm_major_collect()
self.expect_finalized([])
- self.commit_transaction()
- self.expect_finalized([lp1])
+ self.commit_transaction() # invoke finalizers
+ self.expect_finalized([lp1b])
def test_run_cb_for_all_threads(self):
self.start_transaction()
@@ -245,8 +280,11 @@
self.expect_finalized([])
stm_major_collect()
+ self.commit_transaction()
+ self.expect_finalized(1)
self.switch(0)
- self.expect_finalized([lp2, lp1])
+ self.commit_transaction()
+ self.expect_finalized(1)
def test_run_major_collect_in_finalizer(self):
self.run_major_collect_in_finalizer = True
@@ -256,6 +294,7 @@
lp3 = stm_allocate_with_finalizer(32)
print lp1, lp2, lp3
stm_major_collect()
+ self.commit_transaction()
def test_new_objects_w_finalizers(self):
self.switch(2)
@@ -272,4 +311,5 @@
self.expect_finalized([])
stm_major_collect()
+ self.commit_transaction()
self.expect_finalized([lp1])
diff --git a/c8/test/test_list.py b/c8/test/test_list.py
--- a/c8/test/test_list.py
+++ b/c8/test/test_list.py
@@ -6,7 +6,7 @@
ffi = cffi.FFI()
ffi.cdef("""
struct list_s *list_create(void);
-void list_free(struct list_s *lst);
+void _list_free(struct list_s *lst);
struct list_s *list_append(struct list_s *lst, uintptr_t item);
uintptr_t list_count(struct list_s *lst);
uintptr_t list_item(struct list_s *lst, uintptr_t index);
@@ -144,5 +144,5 @@
assert lib.list_count(a) == 17 + 7
for i, expected in enumerate(range(103, 120) + range(113, 120)):
assert lib.list_item(a, i) == expected
- lib.list_free(b)
- lib.list_free(a)
+ lib._list_free(b)
+ lib._list_free(a)
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit