Author: Armin Rigo <ar...@tunes.org> Branch: Changeset: r1477:3a8ef5f741ab Date: 2014-10-17 12:03 +0200 http://bitbucket.org/pypy/stmgc/changeset/3a8ef5f741ab/
Log: hg merge finalizer Finalizers. Yay. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -374,6 +374,7 @@ assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1])); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); + assert(STM_PSEGMENT->finalizers == NULL); #ifndef NDEBUG /* this should not be used when objects_pointing_to_nursery == NULL */ STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; @@ -807,6 +808,9 @@ void stm_commit_transaction(void) { + restart_all: + exec_local_finalizers(); + assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); assert(STM_PSEGMENT->running_pthread == pthread_self()); @@ -824,6 +828,11 @@ Important: we should not call cond_wait() in the meantime. */ synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); + if (any_local_finalizers()) { + s_mutex_unlock(); + goto restart_all; + } + /* detect conflicts */ if (detect_write_read_conflicts()) goto restart; @@ -845,6 +854,8 @@ push_modified_to_other_segments(); _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); + commit_finalizers(); + /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; @@ -865,10 +876,13 @@ } /* done */ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; _finish_transaction(STM_TRANSACTION_COMMIT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); + + invoke_general_finalizers(tl); } void stm_abort_transaction(void) @@ -1046,6 +1060,8 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ + abort_finalizers(); + if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -198,6 +198,13 @@ /* marker where this thread became inevitable */ stm_loc_marker_t marker_inev; + + /* light finalizers */ + struct list_s *young_objects_with_light_finalizers; + struct list_s *old_objects_with_light_finalizers; + + /* regular finalizers (objs from the current transaction only) */ + struct finalizers_s *finalizers; }; enum /* safe_point */ { diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c new file mode 100644 --- /dev/null +++ b/c7/stm/finalizer.c @@ -0,0 +1,404 @@ + + +/* callbacks */ +void (*stmcb_light_finalizer)(object_t *); +void (*stmcb_finalizer)(object_t *); + + +static void init_finalizers(struct finalizers_s *f) +{ + f->objects_with_finalizers = list_create(); + f->count_non_young = 0; + f->run_finalizers = NULL; + f->running_next = NULL; +} + +static void setup_finalizer(void) +{ + init_finalizers(&g_finalizers); +} + +static void teardown_finalizer(void) +{ + if (g_finalizers.run_finalizers != NULL) + list_free(g_finalizers.run_finalizers); + list_free(g_finalizers.objects_with_finalizers); + memset(&g_finalizers, 0, sizeof(g_finalizers)); +} + +static void _commit_finalizers(void) +{ + if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { + /* copy 'STM_PSEGMENT->finalizers->run_finalizers' into + 'g_finalizers.run_finalizers', dropping any initial NULLs + (finalizers already called) */ + struct list_s *src = STM_PSEGMENT->finalizers->run_finalizers; + uintptr_t frm = 0; + if (STM_PSEGMENT->finalizers->running_next != NULL) { + frm = *STM_PSEGMENT->finalizers->running_next; + assert(frm <= list_count(src)); + *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1; + } + if (frm < list_count(src)) { + g_finalizers.run_finalizers = list_extend( + g_finalizers.run_finalizers, + src, frm); + } + list_free(src); + } + + /* copy the whole 'STM_PSEGMENT->finalizers->objects_with_finalizers' + into 'g_finalizers.objects_with_finalizers' */ + g_finalizers.objects_with_finalizers = list_extend( + g_finalizers.objects_with_finalizers, + STM_PSEGMENT->finalizers->objects_with_finalizers, 0); + list_free(STM_PSEGMENT->finalizers->objects_with_finalizers); + + free(STM_PSEGMENT->finalizers); + STM_PSEGMENT->finalizers = NULL; +} + +static void _abort_finalizers(void) +{ + /* like _commit_finalizers(), but forget everything from the + current transaction */ + if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { + if (STM_PSEGMENT->finalizers->running_next != NULL) { + *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1; + } + list_free(STM_PSEGMENT->finalizers->run_finalizers); + } + list_free(STM_PSEGMENT->finalizers->objects_with_finalizers); + free(STM_PSEGMENT->finalizers); + STM_PSEGMENT->finalizers = NULL; +} + + +void stm_enable_light_finalizer(object_t *obj) +{ + if (_is_young(obj)) + LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj); + else + LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); +} + +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up) +{ + object_t *obj = _stm_allocate_external(size_rounded_up); + + if (STM_PSEGMENT->finalizers == NULL) { + struct finalizers_s *f = malloc(sizeof(struct finalizers_s)); + if (f == NULL) + stm_fatalerror("out of memory in create_finalizers"); /* XXX */ + init_finalizers(f); + STM_PSEGMENT->finalizers = f; + } + LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj); + return obj; +} + + +/************************************************************/ +/* Light finalizers +*/ + +static void deal_with_young_objects_with_finalizers(void) +{ + /* for light finalizers */ + struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers; + long i, count = list_count(lst); + for (i = 0; i < count; i++) { + object_t* obj = (object_t *)list_item(lst, i); + assert(_is_young(obj)); + + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + if (pforwarded_array[0] != GCWORD_MOVED) { + /* not moved: the object dies */ + stmcb_light_finalizer(obj); + } + else { + obj = pforwarded_array[1]; /* moved location */ + assert(!_is_young(obj)); + LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); + } + } + list_clear(lst); +} + +static void deal_with_old_objects_with_finalizers(void) +{ + /* for light finalizers */ + int old_gs_register = STM_SEGMENT->segment_num; + int current_gs_register = old_gs_register; + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + + struct list_s *lst = pseg->old_objects_with_light_finalizers; + long i, count = list_count(lst); + lst->count = 0; + for (i = 0; i < count; i++) { + object_t* obj = (object_t *)list_item(lst, i); + if (!mark_visited_test(obj)) { + /* not marked: object dies */ + /* we're calling the light finalizer in the same + segment as where it was originally registered. For + objects that existed since a long time, it doesn't + change anything: any thread should see the same old + content (because if it wasn't the case, the object + would be in a 'modified_old_objects' list + somewhere, and so it wouldn't be dead). But it's + important if the object was created by the same + transaction: then only that segment sees valid + content. + */ + if (j != current_gs_register) { + set_gs_register(get_segment_base(j)); + current_gs_register = j; + } + stmcb_light_finalizer(obj); + } + else { + /* object survives */ + list_set_item(lst, lst->count++, (uintptr_t)obj); + } + } + } + if (old_gs_register != current_gs_register) + set_gs_register(get_segment_base(old_gs_register)); +} + + +/************************************************************/ +/* Algorithm for regular (non-light) finalizers. + Follows closely pypy/doc/discussion/finalizer-order.rst + as well as rpython/memory/gc/minimark.py. +*/ + +static inline int _finalization_state(object_t *obj) +{ + /* Returns the state, "0", 1, 2 or 3, as per finalizer-order.rst. + One difference is that the official state 0 is returned here + as a number that is <= 0. */ + uintptr_t lock_idx = mark_loc(obj); + return write_locks[lock_idx] - (WL_FINALIZ_ORDER_1 - 1); +} + +static void _bump_finalization_state_from_0_to_1(object_t *obj) +{ + uintptr_t lock_idx = mark_loc(obj); + assert(write_locks[lock_idx] < WL_FINALIZ_ORDER_1); + write_locks[lock_idx] = WL_FINALIZ_ORDER_1; +} + +static struct list_s *_finalizer_tmpstack; +static struct list_s *_finalizer_emptystack; +static struct list_s *_finalizer_pending; + +static inline void _append_to_finalizer_tmpstack(object_t **pobj) +{ + object_t *obj = *pobj; + if (obj != NULL) + LIST_APPEND(_finalizer_tmpstack, obj); +} + +static inline struct list_s *finalizer_trace(char *base, object_t *obj, + struct list_s *lst) +{ + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(base, obj); + _finalizer_tmpstack = lst; + stmcb_trace(realobj, &_append_to_finalizer_tmpstack); + return _finalizer_tmpstack; +} + +static void _recursively_bump_finalization_state(char *base, object_t *obj, + int to_state) +{ + struct list_s *tmpstack = _finalizer_emptystack; + assert(list_is_empty(tmpstack)); + + while (1) { + if (_finalization_state(obj) == to_state - 1) { + /* bump to the next state */ + write_locks[mark_loc(obj)]++; + + /* trace */ + tmpstack = finalizer_trace(base, obj, tmpstack); + } + + if (list_is_empty(tmpstack)) + break; + + obj = (object_t *)list_pop_item(tmpstack); + } + _finalizer_emptystack = tmpstack; +} + +static struct list_s *mark_finalize_step1(char *base, struct finalizers_s *f) +{ + if (f == NULL) + return NULL; + + struct list_s *marked = list_create(); + + struct list_s *lst = f->objects_with_finalizers; + long i, count = list_count(lst); + lst->count = 0; + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(lst, i); + + assert(_finalization_state(x) != 1); + if (_finalization_state(x) >= 2) { + list_set_item(lst, lst->count++, (uintptr_t)x); + continue; + } + LIST_APPEND(marked, x); + + struct list_s *pending = _finalizer_pending; + LIST_APPEND(pending, x); + while (!list_is_empty(pending)) { + object_t *y = (object_t *)list_pop_item(pending); + int state = _finalization_state(y); + if (state <= 0) { + _bump_finalization_state_from_0_to_1(y); + pending = finalizer_trace(base, y, pending); + } + else if (state == 2) { + _recursively_bump_finalization_state(base, y, 3); + } + } + _finalizer_pending = pending; + assert(_finalization_state(x) == 1); + _recursively_bump_finalization_state(base, x, 2); + } + return marked; +} + +static void mark_finalize_step2(char *base, struct finalizers_s *f, + struct list_s *marked) +{ + if (f == NULL) + return; + + struct list_s *run_finalizers = f->run_finalizers; + + long i, count = list_count(marked); + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(marked, i); + + int state = _finalization_state(x); + assert(state >= 2); + if (state == 2) { + if (run_finalizers == NULL) + run_finalizers = list_create(); + LIST_APPEND(run_finalizers, x); + _recursively_bump_finalization_state(base, x, 3); + } + else { + struct list_s *lst = f->objects_with_finalizers; + list_set_item(lst, lst->count++, (uintptr_t)x); + } + } + list_free(marked); + + f->run_finalizers = run_finalizers; +} + +static void deal_with_objects_with_finalizers(void) +{ + /* for non-light finalizers */ + + /* there is one 'objects_with_finalizers' list per segment. + Objects that die at a major collection running in the same + transaction as they were created will be put in the + 'run_finalizers' list of that segment. Objects that survive at + least one commit move to the global g_objects_with_finalizers, + and when they die they go to g_run_finalizers. The former kind + of dying object must have its finalizer called in the correct + thread; the latter kind can be called in any thread, through + any segment, because they all should see the same old content + anyway. (If the content was different between segments at this + point, the object would be in a 'modified_old_objects' list + somewhere, and so it wouldn't be dead). + */ + struct list_s *marked_seg[NB_SEGMENTS + 1]; + LIST_CREATE(_finalizer_emptystack); + LIST_CREATE(_finalizer_pending); + + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + marked_seg[j] = mark_finalize_step1(pseg->pub.segment_base, + pseg->finalizers); + } + marked_seg[0] = mark_finalize_step1(stm_object_pages, &g_finalizers); + + LIST_FREE(_finalizer_pending); + + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + mark_finalize_step2(pseg->pub.segment_base, pseg->finalizers, + marked_seg[j]); + } + mark_finalize_step2(stm_object_pages, &g_finalizers, marked_seg[0]); + + LIST_FREE(_finalizer_emptystack); +} + +static void _execute_finalizers(struct finalizers_s *f) +{ + if (f->run_finalizers == NULL) + return; /* nothing to do */ + + restart: + if (f->running_next != NULL) + return; /* in a nested invocation of execute_finalizers() */ + + uintptr_t next = 0, total = list_count(f->run_finalizers); + f->running_next = &next; + + while (next < total) { + object_t *obj = (object_t *)list_item(f->run_finalizers, next); + list_set_item(f->run_finalizers, next, 0); + next++; + + stmcb_finalizer(obj); + } + if (next == (uintptr_t)-1) { + /* transaction committed: the whole 'f' was freed */ + return; + } + f->running_next = NULL; + + if (f->run_finalizers->count > total) { + memmove(f->run_finalizers->items, + f->run_finalizers->items + total, + (f->run_finalizers->count - total) * sizeof(uintptr_t)); + goto restart; + } + + LIST_FREE(f->run_finalizers); +} + +static void _invoke_general_finalizers(stm_thread_local_t *tl) +{ + /* called between transactions */ + static int lock = 0; + + if (__sync_lock_test_and_set(&lock, 1) != 0) { + /* can't acquire the lock: someone else is likely already + running this function, so don't wait. */ + return; + } + + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + stm_start_transaction(tl); + + _execute_finalizers(&g_finalizers); + + stm_commit_transaction(); + stm_rewind_jmp_leaveframe(tl, &rjbuf); + + __sync_lock_release(&lock); +} diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h new file mode 100644 --- /dev/null +++ b/c7/stm/finalizer.h @@ -0,0 +1,47 @@ + +struct finalizers_s { + struct list_s *objects_with_finalizers; + uintptr_t count_non_young; + struct list_s *run_finalizers; + uintptr_t *running_next; +}; + +static void deal_with_young_objects_with_finalizers(void); +static void deal_with_old_objects_with_finalizers(void); +static void deal_with_objects_with_finalizers(void); + +static void setup_finalizer(void); +static void teardown_finalizer(void); + +static void _commit_finalizers(void); +static void _abort_finalizers(void); + +#define commit_finalizers() do { \ + if (STM_PSEGMENT->finalizers != NULL) \ + _commit_finalizers(); \ +} while (0) + +#define abort_finalizers() do { \ + if (STM_PSEGMENT->finalizers != NULL) \ + _abort_finalizers(); \ +} while (0) + + +/* regular finalizers (objs from already-committed transactions) */ +static struct finalizers_s g_finalizers; + +static void _invoke_general_finalizers(stm_thread_local_t *tl); + +#define invoke_general_finalizers(tl) do { \ + if (g_finalizers.run_finalizers != NULL) \ + _invoke_general_finalizers(tl); \ +} while (0) + +static void _execute_finalizers(struct finalizers_s *f); + +#define any_local_finalizers() (STM_PSEGMENT->finalizers != NULL && \ + STM_PSEGMENT->finalizers->run_finalizers != NULL) +#define exec_local_finalizers() do { \ + if (any_local_finalizers()) \ + _execute_finalizers(STM_PSEGMENT->finalizers); \ +} while (0) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -153,6 +153,7 @@ } s_mutex_unlock(); + exec_local_finalizers(); } @@ -161,7 +162,11 @@ static struct list_s *mark_objects_to_trace; -#define WL_VISITED 255 +#define WL_FINALIZ_ORDER_1 253 +#define WL_FINALIZ_ORDER_2 254 +#define WL_FINALIZ_ORDER_3 WL_VISITED + +#define WL_VISITED 255 static inline uintptr_t mark_loc(object_t *obj) @@ -626,8 +631,14 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); - /* weakrefs: */ + /* finalizer support: will mark as WL_VISITED all objects with a + finalizer and all objects reachable from there, and also moves + some objects from 'objects_with_finalizers' to 'run_finalizers'. */ + deal_with_objects_with_finalizers(); + + /* weakrefs and old light finalizers */ stm_visit_old_weakrefs(); + deal_with_old_objects_with_finalizers(); /* cleanup */ clean_up_segment_lists(); diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -30,6 +30,21 @@ return lst; } +static struct list_s *list_extend(struct list_s *lst, struct list_s *lst2, + uintptr_t slicestart) +{ + if (lst2->count <= slicestart) + return lst; + uintptr_t baseindex = lst->count; + lst->count = baseindex + lst2->count - slicestart; + uintptr_t lastindex = lst->count - 1; + if (lastindex > lst->last_allocated) + lst = _list_grow(lst, lastindex); + memcpy(lst->items + baseindex, lst2->items + slicestart, + (lst2->count - slicestart) * sizeof(uintptr_t)); + return lst; +} + /************************************************************/ diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -83,6 +83,9 @@ return &lst->items[index]; } +static struct list_s *list_extend(struct list_s *lst, struct list_s *lst2, + uintptr_t slicestart); + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -435,6 +435,22 @@ } } +static void collect_objs_still_young_but_with_finalizers(void) +{ + struct list_s *lst = STM_PSEGMENT->finalizers->objects_with_finalizers; + uintptr_t i, total = list_count(lst); + + for (i = STM_PSEGMENT->finalizers->count_non_young; i < total; i++) { + + object_t *o = (object_t *)list_item(lst, i); + minor_trace_if_young(&o); + + /* was not actually movable */ + assert(o == (object_t *)list_item(lst, i)); + } + STM_PSEGMENT->finalizers->count_non_young = total; +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { #pragma push_macro("STM_PSEGMENT") @@ -554,11 +570,15 @@ collect_roots_in_nursery(); + if (STM_PSEGMENT->finalizers != NULL) + collect_objs_still_young_but_with_finalizers(); + collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); /* now all surviving nursery objects have been moved out */ stm_move_young_weakrefs(); + deal_with_young_objects_with_finalizers(); throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -113,7 +113,7 @@ /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ + assert(1 <= i && i < 253); /* 253 is WL_FINALIZ_ORDER_1 in gcpage.c */ pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; @@ -128,6 +128,8 @@ pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_commit_and_abort[0] = tree_create(); pr->callbacks_on_commit_and_abort[1] = tree_create(); + pr->young_objects_with_light_finalizers = list_create(); + pr->old_objects_with_light_finalizers = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -147,6 +149,7 @@ setup_gcpage(); setup_pages(); setup_forksupport(); + setup_finalizer(); } void stm_teardown(void) @@ -169,12 +172,15 @@ tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_commit_and_abort[0]); tree_free(pr->callbacks_on_commit_and_abort[1]); + list_free(pr->young_objects_with_light_finalizers); + list_free(pr->old_objects_with_light_finalizers); } munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; close_fd_mmap(stm_object_pages_fd); + teardown_finalizer(); teardown_core(); teardown_sync(); teardown_gcpage(); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -228,6 +228,7 @@ assert(_stm_in_transaction(tl)); set_gs_register(get_segment_base(tl->associated_segment_num)); assert(STM_SEGMENT->running_thread == tl); + exec_local_finalizers(); } #if STM_TESTS diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -16,6 +16,7 @@ #include "stm/weakref.h" #include "stm/marker.h" #include "stm/prof.h" +#include "stm/finalizer.h" #include "stm/misc.c" #include "stm/list.c" @@ -37,3 +38,4 @@ #include "stm/marker.c" #include "stm/prof.c" #include "stm/rewind_setjmp.c" +#include "stm/finalizer.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -505,6 +505,29 @@ } while (0) +/* Support for light finalizers. This is a simple version of + finalizers that guarantees not to do anything fancy, like not + resurrecting objects. */ +void (*stmcb_light_finalizer)(object_t *); +void stm_enable_light_finalizer(object_t *); + +/* Support for regular finalizers. Unreachable objects with + finalizers are kept alive, as well as everything they point to, and + stmcb_finalizer() is called after the major GC. If there are + several objects with finalizers that reference each other in a + well-defined order (i.e. there are no cycles), then they are + finalized in order from outermost to innermost (i.e. starting with + the ones that are unreachable even from others). + + For objects that have been created by the current transaction, if a + major GC runs while that transaction is alive and finds the object + unreachable, the finalizer is called immediately in the same + transaction. For older objects, the finalizer is called from a + random thread between regular transactions, in a new custom + transaction. */ +void (*stmcb_finalizer)(object_t *); +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up); + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -31,6 +31,7 @@ /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); object_t *stm_allocate_weakref(ssize_t size_rounded_up); +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); /*void stm_write_card(); use _checked_stm_write_card() instead */ @@ -62,6 +63,7 @@ bool _check_become_inevitable(stm_thread_local_t *tl); bool _check_become_globally_unique_transaction(stm_thread_local_t *tl); int stm_is_inevitable(void); +long current_segment_num(void); void _set_type_id(object_t *obj, uint32_t h); uint32_t _get_type_id(object_t *obj); @@ -158,6 +160,11 @@ void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); void stm_pop_marker(stm_thread_local_t *); + +void (*stmcb_light_finalizer)(object_t *); +void stm_enable_light_finalizer(object_t *); + +void (*stmcb_finalizer)(object_t *); """) @@ -370,6 +377,11 @@ void stmcb_commit_soon() { } + +long current_segment_num(void) +{ + return STM_SEGMENT->segment_num; +} ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_NO_AUTOMATIC_SETJMP', '1'), @@ -449,6 +461,18 @@ stm_read(obj) return lib._get_ptr(obj, idx) +def stm_allocate_with_finalizer(size): + o = lib.stm_allocate_with_finalizer(size) + tid = 42 + size + lib._set_type_id(o, tid) + return o + +def stm_allocate_with_finalizer_refs(n): + o = lib.stm_allocate_with_finalizer(HDR + n * WORD) + tid = 421420 + n + lib._set_type_id(o, tid) + return o + def stm_set_char(obj, c, offset=HDR, use_cards=False): assert HDR <= offset < stm_get_obj_size(obj) if use_cards: diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py new file mode 100644 --- /dev/null +++ b/c7/test/test_finalizer.py @@ -0,0 +1,184 @@ +from support import * +import py + + +class TestLightFinalizer(BaseTest): + + def setup_method(self, meth): + BaseTest.setup_method(self, meth) + # + @ffi.callback("void(object_t *)") + def light_finalizer(obj): + segnum = lib.current_segment_num() + tlnum = '?' + for n, tl in enumerate(self.tls): + if tl.associated_segment_num == segnum: + tlnum = n + break + self.light_finalizers_called.append((obj, tlnum)) + self.light_finalizers_called = [] + lib.stmcb_light_finalizer = light_finalizer + self._light_finalizer_keepalive = light_finalizer + + def expect_finalized(self, objs, from_tlnum=None): + assert [obj for (obj, tlnum) in self.light_finalizers_called] == objs + if from_tlnum is not None: + for obj, tlnum in self.light_finalizers_called: + assert tlnum == from_tlnum + self.light_finalizers_called = [] + + def test_no_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + self.commit_transaction() + self.expect_finalized([]) + + def test_young_light_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.expect_finalized([]) + self.commit_transaction() + self.expect_finalized([lp1], from_tlnum=0) + + def test_young_light_finalizer_survives(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) # stays alive + self.commit_transaction() + self.expect_finalized([]) + + def test_old_light_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + lib.stm_enable_light_finalizer(lp1) + self.commit_transaction() + self.expect_finalized([]) + + def test_old_light_finalizer_2(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + self.expect_finalized([]) + stm_major_collect() + self.expect_finalized([lp1]) + self.commit_transaction() + + def test_old_light_finalizer_survives(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + self.push_root(lp1) + stm_major_collect() + self.commit_transaction() + self.expect_finalized([]) + + def test_old_light_finalizer_segment(self): + self.start_transaction() + # + self.switch(1) + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # + self.switch(0) + self.expect_finalized([]) + stm_major_collect() + self.expect_finalized([lp1], from_tlnum=1) + + +class TestRegularFinalizer(BaseTest): + + def setup_method(self, meth): + BaseTest.setup_method(self, meth) + # + @ffi.callback("void(object_t *)") + def finalizer(obj): + self.finalizers_called.append(obj) + self.finalizers_called = [] + lib.stmcb_finalizer = finalizer + self._finalizer_keepalive = finalizer + + def expect_finalized(self, objs): + assert self.finalizers_called == objs + self.finalizers_called = [] + + def test_no_finalizer(self): + self.start_transaction() + lp1 = stm_allocate(48) + stm_major_collect() + self.expect_finalized([]) + + def test_no_finalizer_in_minor_collection(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer(48) + stm_minor_collect() + self.expect_finalized([]) + + def test_finalizer_in_major_collection(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer(48) + lp2 = stm_allocate_with_finalizer(48) + lp3 = stm_allocate_with_finalizer(48) + print lp1, lp2, lp3 + stm_major_collect() + self.expect_finalized([lp1, lp2, lp3]) + + def test_finalizer_ordering(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer_refs(1) + lp2 = stm_allocate_with_finalizer_refs(1) + lp3 = stm_allocate_with_finalizer_refs(1) + print lp1, lp2, lp3 + stm_set_ref(lp3, 0, lp1) + stm_set_ref(lp1, 0, lp2) + stm_major_collect() + self.expect_finalized([lp3]) + + def test_finalizer_extra_transation(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer(32) + print lp1 + self.push_root(lp1) + self.commit_transaction() + + self.start_transaction() + lp1b = self.pop_root() + assert lp1b == lp1 + self.expect_finalized([]) + self.commit_transaction() + self.expect_finalized([]) + + self.start_transaction() + stm_major_collect() + self.expect_finalized([]) + self.commit_transaction() + self.expect_finalized([lp1]) + + def test_run_cb_for_all_threads(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer(48) + print lp1 + # + self.switch(1) + self.start_transaction() + lp2 = stm_allocate_with_finalizer(56) + print lp2 + + self.expect_finalized([]) + stm_major_collect() + self.switch(0) + self.expect_finalized([lp2, lp1]) diff --git a/c7/test/test_list.py b/c7/test/test_list.py --- a/c7/test/test_list.py +++ b/c7/test/test_list.py @@ -6,6 +6,12 @@ ffi = cffi.FFI() ffi.cdef(""" struct list_s *list_create(void); +void list_free(struct list_s *lst); +struct list_s *list_append(struct list_s *lst, uintptr_t item); +uintptr_t list_count(struct list_s *lst); +uintptr_t list_item(struct list_s *lst, uintptr_t index); +struct list_s *list_extend(struct list_s *lst, struct list_s *lst2, + uintptr_t slicestart); struct tree_s *tree_create(void); void tree_free(struct tree_s *tree); @@ -127,3 +133,16 @@ def test_hash_permutation(): hashes = [((n ^ (n << 4)) & 0xFF0) for n in range(256)] assert set(hashes) == set(range(0, 4096, 16)) + +def test_list_extend(): + a = lib.list_create() + b = lib.list_create() + for i in range(100, 120): + b = lib.list_append(b, i) + a = lib.list_extend(a, b, 3) + a = lib.list_extend(a, b, 13) + assert lib.list_count(a) == 17 + 7 + for i, expected in enumerate(range(103, 120) + range(113, 120)): + assert lib.list_item(a, i) == expected + lib.list_free(b) + lib.list_free(a) _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit