Author: Remi Meier <[email protected]>
Branch: c8-overflow-objs
Changeset: r1676:20294d4006bc
Date: 2015-02-27 13:51 +0100
http://bitbucket.org/pypy/stmgc/changeset/20294d4006bc/
Log: implement overflow objs
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -582,6 +582,16 @@
assert(!_is_in_nursery(obj));
assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
+ if (obj->stm_flags & GCFLAG_WB_EXECUTED
+ || IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) {
+ /* already executed WB once in this transaction or is
+ overflow object -> do only GC part: */
+ dprintf(("write_slowpath-fast(%p)\n", obj));
+ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER;
+ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
+ return;
+ }
+
int my_segnum = STM_SEGMENT->segment_num;
uintptr_t end_page, first_page = ((uintptr_t)obj) / 4096UL;
char *realobj;
@@ -599,15 +609,6 @@
/* add to read set: */
stm_read(obj);
- if (obj->stm_flags & GCFLAG_WB_EXECUTED) {
- /* already executed WB once in this transaction. do GC
- part again: */
- dprintf(("write_slowpath-fast(%p)\n", obj));
- obj->stm_flags &= ~GCFLAG_WRITE_BARRIER;
- LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
- return;
- }
-
assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED));
dprintf(("write_slowpath(%p): sz=%lu\n", obj, obj_size));
@@ -849,12 +850,14 @@
static void push_large_overflow_objects_to_other_segments(void)
{
+ if (list_is_empty(STM_PSEGMENT->large_overflow_objects))
+ return;
+
/* XXX: also pushes small ones right now */
acquire_privatization_lock(STM_SEGMENT->segment_num);
LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *,
({
- assert(item->stm_flags & GCFLAG_WB_EXECUTED);
- item->stm_flags &= ~GCFLAG_WB_EXECUTED;
+ assert(!(item->stm_flags & GCFLAG_WB_EXECUTED));
synchronize_object_enqueue(item);
}));
synchronize_objects_flush();
@@ -908,6 +911,15 @@
commit_finalizers();
+ /* update 'overflow_number' if needed */
+ if (STM_PSEGMENT->overflow_number_has_been_used) {
+ highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0;
+ assert(highest_overflow_number != /* XXX else, overflow! */
+ (uint32_t)-GCFLAG_OVERFLOW_NUMBER_bit0);
+ STM_PSEGMENT->overflow_number = highest_overflow_number;
+ STM_PSEGMENT->overflow_number_has_been_used = false;
+ }
+
invoke_and_clear_user_callbacks(0); /* for commit */
if (globally_unique_transaction && was_inev) {
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -40,6 +40,15 @@
GCFLAG_WB_EXECUTED = 0x04,
GCFLAG_VISITED = 0x08,
GCFLAG_FINALIZATION_ORDERING = 0x10,
+
+ /* All remaining bits of the 32-bit 'stm_flags' field are taken by
+ the "overflow number". This is a number that identifies the
+ "overflow objects" from the current transaction among all old
+ objects. More precisely, overflow objects are objects from the
+ current transaction that have been flushed out of the nursery,
+ which occurs if the same transaction allocates too many objects.
+ */
+ GCFLAG_OVERFLOW_NUMBER_bit0 = 0x20 /* must be last */
};
@@ -102,6 +111,14 @@
struct tree_s *callbacks_on_commit_and_abort[2];
+ /* This is the number stored in the overflowed objects (a multiple of
+ GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the
+ transaction is done, but only if we actually overflowed any
+ object; otherwise, no object has got this number. */
+ uint32_t overflow_number;
+ bool overflow_number_has_been_used;
+
+
struct stm_commit_log_entry_s *last_commit_log_entry;
struct stm_shadowentry_s *shadowstack_at_start_of_transaction;
@@ -194,6 +211,9 @@
#define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src))
+#define IS_OVERFLOW_OBJ(pseg, obj) (((obj)->stm_flags &
-GCFLAG_OVERFLOW_NUMBER_bit0) \
+ == (pseg)->overflow_number)
+
static inline char *get_segment_base(long segment_num) {
return stm_object_pages + segment_num * (NB_PAGES * 4096UL);
diff --git a/c8/stm/finalizer.c b/c8/stm/finalizer.c
--- a/c8/stm/finalizer.c
+++ b/c8/stm/finalizer.c
@@ -98,14 +98,14 @@
list_clear(lst);
}
- /* also deals with newly created objects: they are at the tail of
+ /* also deals with overflow objects: they are at the tail of
old_objects_with_light_finalizers (this list is kept in order
and we cannot add any already-committed object) */
lst = pseg->old_objects_with_light_finalizers;
count = list_count(lst);
while (count > 0) {
object_t *obj = (object_t *)list_item(lst, --count);
- if (!(obj->stm_flags & GCFLAG_WB_EXECUTED))
+ if (!IS_OVERFLOW_OBJ(pseg, obj))
break;
lst->count = count;
if (must_fix_gs) {
@@ -264,11 +264,14 @@
LIST_APPEND(_finalizer_tmpstack, obj);
}
-static inline struct list_s *finalizer_trace(char *base, object_t *obj,
- struct list_s *lst)
+static inline struct list_s *finalizer_trace(
+ struct stm_priv_segment_info_s *pseg, object_t *obj, struct list_s *lst)
{
- if (!is_new_object(obj))
+ char *base;
+ if (!is_overflow_obj_safe(pseg, obj))
base = stm_object_pages;
+ else
+ base = pseg->pub.segment_base;
struct object_s *realobj = (struct object_s *)REAL_ADDRESS(base, obj);
_finalizer_tmpstack = lst;
@@ -277,7 +280,8 @@
}
-static void _recursively_bump_finalization_state_from_2_to_3(char *base,
object_t *obj)
+static void _recursively_bump_finalization_state_from_2_to_3(
+ struct stm_priv_segment_info_s *pseg, object_t *obj)
{
assert(_finalization_state(obj) == 2);
struct list_s *tmpstack = _finalizer_emptystack;
@@ -289,7 +293,7 @@
realobj->stm_flags &= ~GCFLAG_FINALIZATION_ORDERING;
/* trace */
- tmpstack = finalizer_trace(base, obj, tmpstack);
+ tmpstack = finalizer_trace(pseg, obj, tmpstack);
}
if (list_is_empty(tmpstack))
@@ -300,14 +304,16 @@
_finalizer_emptystack = tmpstack;
}
-static void _recursively_bump_finalization_state_from_1_to_2(char *base,
object_t *obj)
+static void _recursively_bump_finalization_state_from_1_to_2(
+ struct stm_priv_segment_info_s *pseg, object_t *obj)
{
assert(_finalization_state(obj) == 1);
/* The call will add GCFLAG_VISITED recursively, thus bump state 1->2 */
- mark_visit_possibly_new_object(base, obj);
+ mark_visit_possibly_new_object(obj, pseg);
}
-static struct list_s *mark_finalize_step1(char *base, struct finalizers_s *f)
+static struct list_s *mark_finalize_step1(
+ struct stm_priv_segment_info_s *pseg, struct finalizers_s *f)
{
if (f == NULL)
return NULL;
@@ -336,21 +342,22 @@
int state = _finalization_state(y);
if (state <= 0) {
_bump_finalization_state_from_0_to_1(y);
- pending = finalizer_trace(base, y, pending);
+ pending = finalizer_trace(pseg, y, pending);
}
else if (state == 2) {
- _recursively_bump_finalization_state_from_2_to_3(base, y);
+ _recursively_bump_finalization_state_from_2_to_3(pseg, y);
}
}
_finalizer_pending = pending;
assert(_finalization_state(x) == 1);
- _recursively_bump_finalization_state_from_1_to_2(base, x);
+ _recursively_bump_finalization_state_from_1_to_2(pseg, x);
}
return marked;
}
-static void mark_finalize_step2(char *base, struct finalizers_s *f,
- struct list_s *marked)
+static void mark_finalize_step2(
+ struct stm_priv_segment_info_s *pseg, struct finalizers_s *f,
+ struct list_s *marked)
{
if (f == NULL)
return;
@@ -367,7 +374,7 @@
if (run_finalizers == NULL)
run_finalizers = list_create();
LIST_APPEND(run_finalizers, x);
- _recursively_bump_finalization_state_from_2_to_3(base, x);
+ _recursively_bump_finalization_state_from_2_to_3(pseg, x);
}
else {
struct list_s *lst = f->objects_with_finalizers;
@@ -403,29 +410,28 @@
long j;
for (j = 1; j < NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
- marked_seg[j] = mark_finalize_step1(pseg->pub.segment_base,
- pseg->finalizers);
+ marked_seg[j] = mark_finalize_step1(pseg, pseg->finalizers);
}
- marked_seg[0] = mark_finalize_step1(stm_object_pages, &g_finalizers);
+ marked_seg[0] = mark_finalize_step1(get_priv_segment(0), &g_finalizers);
LIST_FREE(_finalizer_pending);
for (j = 1; j < NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
- mark_finalize_step2(pseg->pub.segment_base, pseg->finalizers,
- marked_seg[j]);
+ mark_finalize_step2(pseg, pseg->finalizers, marked_seg[j]);
}
- mark_finalize_step2(stm_object_pages, &g_finalizers, marked_seg[0]);
+ mark_finalize_step2(get_priv_segment(0), &g_finalizers, marked_seg[0]);
LIST_FREE(_finalizer_emptystack);
}
-static void mark_visit_from_finalizer1(char *base, struct finalizers_s *f)
+static void mark_visit_from_finalizer1(
+ struct stm_priv_segment_info_s *pseg, struct finalizers_s *f)
{
if (f != NULL && f->run_finalizers != NULL) {
LIST_FOREACH_R(f->run_finalizers, object_t * /*item*/,
({
- mark_visit_possibly_new_object(base, item);
+ mark_visit_possibly_new_object(item, pseg);
}));
}
}
@@ -435,9 +441,9 @@
long j;
for (j = 1; j < NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
- mark_visit_from_finalizer1(pseg->pub.segment_base, pseg->finalizers);
+ mark_visit_from_finalizer1(pseg, pseg->finalizers);
}
- mark_visit_from_finalizer1(stm_object_pages, &g_finalizers);
+ mark_visit_from_finalizer1(get_priv_segment(0), &g_finalizers);
}
static void _execute_finalizers(struct finalizers_s *f)
diff --git a/c8/stm/forksupport.c b/c8/stm/forksupport.c
--- a/c8/stm/forksupport.c
+++ b/c8/stm/forksupport.c
@@ -84,7 +84,7 @@
stm_thread_local_t *tl = pr->pub.running_thread;
dprintf(("forksupport_child: abort in seg%ld\n", i));
assert(tl->associated_segment_num == i);
- assert(pr->transaction_state == TS_REGULAR);
+ assert(pr->transaction_state != TS_INEVITABLE);
set_gs_register(get_segment_base(i));
assert(STM_SEGMENT->segment_num == i);
diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c
--- a/c8/stm/gcpage.c
+++ b/c8/stm/gcpage.c
@@ -200,14 +200,18 @@
/************************************************************/
+static bool is_overflow_obj_safe(struct stm_priv_segment_info_s *pseg,
object_t *obj)
+{
+ /* this function first also checks if the page is accessible in order
+ to not cause segfaults during major gc (it does exactly the same
+ as IS_OVERFLOW_OBJ otherwise) */
+ if (get_page_status_in(pseg->pub.segment_num, (uintptr_t)obj / 4096UL) ==
PAGE_NO_ACCESS)
+ return false;
-static bool is_new_object(object_t *obj)
-{
- struct object_s *realobj = (struct
object_s*)REAL_ADDRESS(stm_object_pages, obj); /* seg0 */
- return realobj->stm_flags & GCFLAG_WB_EXECUTED;
+ struct object_s *realobj = (struct
object_s*)REAL_ADDRESS(pseg->pub.segment_base, obj);
+ return IS_OVERFLOW_OBJ(pseg, realobj);
}
-
static inline void mark_record_trace(object_t **pobj)
{
/* takes a normal pointer to a thread-local pointer to an object */
@@ -230,7 +234,10 @@
}
-static void mark_and_trace(object_t *obj, char *segment_base)
+static void mark_and_trace(
+ object_t *obj,
+ char *segment_base, /* to trace obj in */
+ struct stm_priv_segment_info_s *pseg) /* to trace children in */
{
/* mark the obj and trace all reachable objs from it */
@@ -243,35 +250,39 @@
/* trace all references found in sharing seg0 (should always be
up-to-date and not cause segfaults, except for overflow objs) */
+ segment_base = pseg->pub.segment_base;
while (!list_is_empty(marked_objects_to_trace)) {
obj = (object_t *)list_pop_item(marked_objects_to_trace);
- char *base = is_new_object(obj) ? segment_base : stm_object_pages;
+ char *base = is_overflow_obj_safe(pseg, obj) ? segment_base :
stm_object_pages;
realobj = (struct object_s *)REAL_ADDRESS(base, obj);
stmcb_trace(realobj, &mark_record_trace);
}
}
-static inline void mark_visit_object(object_t *obj, char *segment_base)
+static inline void mark_visit_object(
+ object_t *obj,
+ char *segment_base, /* to trace ojb in */
+ struct stm_priv_segment_info_s *pseg) /* to trace children in */
{
/* if already visited, don't trace */
if (obj == NULL || mark_visited_test_and_set(obj))
return;
- mark_and_trace(obj, segment_base);
+ mark_and_trace(obj, segment_base, pseg);
}
-static void mark_visit_possibly_new_object(char *segment_base, object_t *obj)
+static void mark_visit_possibly_new_object(object_t *obj, struct
stm_priv_segment_info_s *pseg)
{
/* if newly allocated object, we trace in segment_base, otherwise in
the sharing seg0 */
if (obj == NULL)
return;
- if (is_new_object(obj)) {
- mark_visit_object(obj, segment_base);
+ if (is_overflow_obj_safe(pseg, obj)) {
+ mark_visit_object(obj, pseg->pub.segment_base, pseg);
} else {
- mark_visit_object(obj, stm_object_pages);
+ mark_visit_object(obj, stm_object_pages, pseg);
}
}
@@ -282,8 +293,10 @@
end = (const struct stm_shadowentry_s *)(slice + size);
for (; p < end; p++)
if ((((uintptr_t)p->ss) & 3) == 0) {
- assert(!is_new_object(p->ss));
- mark_visit_object(p->ss, stm_object_pages); // seg0
+ mark_visit_object(p->ss, stm_object_pages, // seg0
+ /* there should be no overflow objs not already
+ visited, so any pseg is fine really: */
+ get_priv_segment(STM_SEGMENT->segment_num));
}
return NULL;
}
@@ -350,7 +363,7 @@
and thus make all pages accessible. */
assert_obj_accessible_in(i, item);
- assert(!is_new_object(item)); /* should never be in that list */
+ assert(!is_overflow_obj_safe(get_priv_segment(i), item)); /*
should never be in that list */
if (!mark_visited_test_and_set(item)) {
/* trace shared, committed version: only do this if we
didn't
@@ -358,9 +371,9 @@
objs before mark_visit_from_modified_objects AND if we
do mark_and_trace on an obj that is modified in >1
segment,
the tracing always happens in seg0 (see mark_and_trace).
*/
- mark_and_trace(item, stm_object_pages);
+ mark_and_trace(item, stm_object_pages, get_priv_segment(i));
}
- mark_and_trace(item, base); /* private, modified version */
+ mark_and_trace(item, base, get_priv_segment(i)); /* private,
modified version */
}));
list_clear(uniques);
@@ -372,7 +385,11 @@
{
if (testing_prebuilt_objs != NULL) {
LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/,
- mark_visit_object(item, stm_object_pages)); // seg0
+ mark_visit_object(item, stm_object_pages, // seg0
+ /* any pseg is fine, as we already traced
modified
+ objs and thus covered all overflow
objs reachable
+ from here */
+
get_priv_segment(STM_SEGMENT->segment_num)));
}
stm_thread_local_t *tl = stm_all_thread_locals;
@@ -392,17 +409,17 @@
If 'tl' is currently running, its 'last_associated_segment_num'
field is the segment number that contains the correct
version of its overflowed objects. */
- char *segment_base = get_segment_base(tl->last_associated_segment_num);
+ struct stm_priv_segment_info_s *pseg =
get_priv_segment(tl->last_associated_segment_num);
struct stm_shadowentry_s *current = tl->shadowstack;
struct stm_shadowentry_s *base = tl->shadowstack_base;
while (current-- != base) {
if ((((uintptr_t)current->ss) & 3) == 0) {
- mark_visit_possibly_new_object(segment_base, current->ss);
+ mark_visit_possibly_new_object(current->ss, pseg);
}
}
- mark_visit_possibly_new_object(segment_base, tl->thread_local_obj);
+ mark_visit_possibly_new_object(tl->thread_local_obj, pseg);
tl = tl->next;
} while (tl != stm_all_thread_locals);
@@ -413,8 +430,8 @@
for (i = 1; i < NB_SEGMENTS; i++) {
if (get_priv_segment(i)->transaction_state != TS_NONE) {
mark_visit_possibly_new_object(
- get_segment_base(i),
- get_priv_segment(i)->threadlocal_at_start_of_transaction);
+ get_priv_segment(i)->threadlocal_at_start_of_transaction,
+ get_priv_segment(i));
stm_rewind_jmp_enum_shadowstack(
get_segment(i)->running_thread,
@@ -423,49 +440,6 @@
}
}
-static void ready_large_overflow_objects(void)
-{
-#pragma push_macro("STM_PSEGMENT")
-#pragma push_macro("STM_SEGMENT")
-#undef STM_PSEGMENT
-#undef STM_SEGMENT
- /* objs in large_overflow only have garbage in the sharing seg0,
- since it is used to mark objs as visited, we must make
- sure the flag is cleared at the start of a major collection.
- (XXX: ^^^ may be optional if we have the part below)
-
- Also, we need to be able to recognize these objects in order
- to only trace them in the segment they are valid in. So we
- also make sure to set WB_EXECUTED in the sharing seg0. No
- other objs than large_overflow_objects have WB_EXECUTED in seg0 (since
- there can only be committed versions there).
- */
-
- long i;
- for (i = 1; i < NB_SEGMENTS; i++) {
- struct stm_priv_segment_info_s *pseg = get_priv_segment(i);
- struct list_s *lst = pseg->large_overflow_objects;
-
- LIST_FOREACH_R(lst, object_t* /*item*/,
- ({
- struct object_s *realobj;
- /* WB_EXECUTED always set in this segment */
- assert(realobj = (struct
object_s*)REAL_ADDRESS(pseg->pub.segment_base, item));
- assert(realobj->stm_flags & GCFLAG_WB_EXECUTED);
-
- /* clear VISITED (garbage) and ensure WB_EXECUTED in seg0 */
- mark_visited_test_and_clear(item);
- realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages,
item);
- realobj->stm_flags |= GCFLAG_WB_EXECUTED;
-
- /* make sure this flag is cleared as well */
- realobj->stm_flags &= ~GCFLAG_FINALIZATION_ORDERING;
- }));
- }
-#pragma pop_macro("STM_SEGMENT")
-#pragma pop_macro("STM_PSEGMENT")
-}
-
static void clean_up_segment_lists(void)
{
@@ -494,10 +468,7 @@
({
struct object_s *realobj = (struct object_s *)
REAL_ADDRESS(pseg->pub.segment_base, (uintptr_t)item);
-
- assert(realobj->stm_flags & GCFLAG_WB_EXECUTED);
assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER));
-
realobj->stm_flags |= GCFLAG_WRITE_BARRIER;
}));
list_clear(lst);
@@ -683,8 +654,6 @@
DEBUG_EXPECT_SEGFAULT(false);
- ready_large_overflow_objects();
-
/* marking */
LIST_CREATE(marked_objects_to_trace);
mark_visit_from_modified_objects();
diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c
--- a/c8/stm/nursery.c
+++ b/c8/stm/nursery.c
@@ -38,7 +38,7 @@
}
static inline bool _is_from_same_transaction(object_t *obj) {
- return _is_young(obj) || (obj->stm_flags & GCFLAG_WB_EXECUTED);
+ return _is_young(obj) || IS_OVERFLOW_OBJ(STM_PSEGMENT, obj);
}
long stm_can_move(object_t *obj)
@@ -135,8 +135,9 @@
/* if this is not during commit, we make them overflow objects
and push them to other segments on commit. */
assert(!(nobj->stm_flags & GCFLAG_WB_EXECUTED));
+ assert((nobj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == 0);
if (!STM_PSEGMENT->minor_collect_will_commit_now) {
- nobj->stm_flags |= GCFLAG_WB_EXECUTED;
+ nobj->stm_flags |= STM_PSEGMENT->overflow_number;
}
/* Must trace the object later */
@@ -313,6 +314,11 @@
dprintf(("minor_collection commit=%d\n", (int)commit));
STM_PSEGMENT->minor_collect_will_commit_now = commit;
+ if (!commit) {
+ /* 'STM_PSEGMENT->overflow_number' is used now by this collection,
+ in the sense that it's copied to the overflow objects */
+ STM_PSEGMENT->overflow_number_has_been_used = true;
+ }
collect_roots_in_nursery();
diff --git a/c8/stm/nursery.h b/c8/stm/nursery.h
--- a/c8/stm/nursery.h
+++ b/c8/stm/nursery.h
@@ -2,6 +2,9 @@
#define NSE_SIGPAUSE _STM_NSE_SIGNAL_MAX
#define NSE_SIGABORT _STM_NSE_SIGNAL_ABORT
+static uint32_t highest_overflow_number;
+
+
static void minor_collection(bool commit);
static void check_nursery_at_transaction_start(void);
static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg);
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -112,6 +112,8 @@
pr->old_objects_with_light_finalizers = list_create();
pr->last_commit_log_entry = &commit_log_root;
+ pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i;
+ highest_overflow_number = pr->overflow_number;
pr->pub.transaction_read_version = 0xff;
}
diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py
--- a/c8/test/test_gcpage.py
+++ b/c8/test/test_gcpage.py
@@ -473,3 +473,27 @@
stm_major_collect()
assert stm_get_char(s) == '\0'
self.commit_transaction()
+
+
+ def test_overflow_on_ss_in_major_gc(self):
+ self.start_transaction()
+ o = stm_allocate_refs(100)
+ p = stm_allocate(16)
+ stm_set_ref(o, 0, p)
+ self.push_root(o)
+ stm_minor_collect()
+ o = self.pop_root()
+ p = stm_get_ref(o, 0)
+ assert stm_get_char(p) == '\0'
+ self.push_root(o)
+
+ self.switch(1)
+
+ self.start_transaction()
+ stm_major_collect()
+ self.commit_transaction()
+
+ self.switch(0)
+ # p not freed
+ assert stm_get_char(p) == '\0'
+ self.commit_transaction()
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit