Author: Armin Rigo <ar...@tunes.org> Branch: stmgc-c8 Changeset: r78297:eeea8ac4da6b Date: 2015-06-24 16:35 +0200 http://bitbucket.org/pypy/pypy/changeset/eeea8ac4da6b/
Log: import stmgc/c8ccc22dbf16 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -9ffba4fe03df +c8ccc22dbf16 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -511,10 +511,12 @@ try to detach an inevitable transaction regularly */ detached = fetch_detached_transaction(); if (detached == 0) { + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) goto wait_some_more; } } + EMIT_WAIT_DONE(); s_mutex_unlock(); if (detached != 0) @@ -1130,6 +1132,7 @@ static void _do_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); + tl->wait_event_emitted = 0; acquire_thread_segment(tl); /* GS invalid before this point! */ @@ -1318,6 +1321,7 @@ } assert(STM_SEGMENT->running_thread->self_or_0_if_atomic == (intptr_t)(STM_SEGMENT->running_thread)); + assert(STM_SEGMENT->running_thread->wait_event_emitted == 0); dprintf(("> stm_commit_transaction(external=%d)\n", (int)external)); minor_collection(/*commit=*/ true, external); @@ -1582,9 +1586,8 @@ if (any_soon_finished_or_inevitable_thread_segment() && num_waits <= NB_SEGMENTS) { -#if STM_TESTS - timing_become_inevitable(); /* for tests: another transaction */ - stm_abort_transaction(); /* is already inevitable, abort */ +#if STM_TESTS /* for tests: another transaction */ + stm_abort_transaction(); /* is already inevitable, abort */ #endif bool timed_out = false; @@ -1594,6 +1597,7 @@ !safe_point_requested()) { /* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */ + EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE); if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.000054321)) timed_out = true; @@ -1607,14 +1611,17 @@ not too common. We don't want two threads constantly detaching each other. */ intptr_t detached = fetch_detached_transaction(); - if (detached != 0) + if (detached != 0) { + EMIT_WAIT_DONE(); commit_fetched_detached_transaction(detached); + } } else { num_waits++; } goto retry_from_start; } + EMIT_WAIT_DONE(); if (!_validate_and_turn_inevitable()) goto retry_from_start; } diff --git a/rpython/translator/stm/src_stm/stm/marker.h b/rpython/translator/stm/src_stm/stm/marker.h --- a/rpython/translator/stm/src_stm/stm/marker.h +++ b/rpython/translator/stm/src_stm/stm/marker.h @@ -15,3 +15,28 @@ #define timing_become_inevitable() \ (timing_enabled() ? _timing_become_inevitable() : (void)0) + + +static inline void emit_wait(stm_thread_local_t *tl, enum stm_event_e event) +{ + if (!timing_enabled()) + return; + if (tl->wait_event_emitted != 0) { + if (tl->wait_event_emitted == event) + return; + stmcb_timing_event(tl, STM_WAIT_DONE, NULL); + } + tl->wait_event_emitted = event; + stmcb_timing_event(tl, event, NULL); +} + +static inline void emit_wait_done(stm_thread_local_t *tl) +{ + if (tl->wait_event_emitted != 0) { + tl->wait_event_emitted = 0; + stmcb_timing_event(tl, STM_WAIT_DONE, NULL); + } +} + +#define EMIT_WAIT(event) emit_wait(STM_SEGMENT->running_thread, event) +#define EMIT_WAIT_DONE() emit_wait_done(STM_SEGMENT->running_thread) diff --git a/rpython/translator/stm/src_stm/stm/prof.c b/rpython/translator/stm/src_stm/stm/prof.c --- a/rpython/translator/stm/src_stm/stm/prof.c +++ b/rpython/translator/stm/src_stm/stm/prof.c @@ -2,7 +2,7 @@ #include <stdio.h> #include <time.h> -static FILE *profiling_file; +static FILE *volatile profiling_file; static char *profiling_basefn = NULL; static stm_expand_marker_fn profiling_expand_marker; @@ -26,9 +26,6 @@ struct buf_s buf; struct timespec t; - clock_gettime(CLOCK_MONOTONIC, &t); - buf.tv_sec = t.tv_sec; - buf.tv_nsec = t.tv_nsec; buf.thread_num = tl->thread_local_counter; buf.event = event; buf.marker_length = 0; @@ -39,10 +36,29 @@ buf.extra, MARKER_LEN_MAX); } - if (fwrite(&buf, offsetof(struct buf_s, extra) + buf.marker_length, - 1, profiling_file) != 1) { + size_t result, outsize = offsetof(struct buf_s, extra) + buf.marker_length; + FILE *f = profiling_file; + if (f == NULL) + return; + flockfile(f); + + /* We expect the following CLOCK_MONOTONIC to be really monotonic: + it should guarantee that the file will be perfectly ordered by time. + That's why we do it inside flockfile()/funlockfile(). */ + clock_gettime(CLOCK_MONOTONIC, &t); + buf.tv_sec = t.tv_sec; + buf.tv_nsec = t.tv_nsec; + + result = fwrite_unlocked(&buf, outsize, 1, f); + funlockfile(f); + + if (result != 1) { fprintf(stderr, "stmgc: profiling log file closed unexpectedly: %m\n"); - close_timing_log(); + + /* xxx the FILE leaks here, but it is better than random crashes if + we try to close it while other threads are still writing to it + */ + profiling_file = NULL; } } @@ -54,11 +70,12 @@ static bool open_timing_log(const char *filename) { - profiling_file = fopen(filename, "w"); - if (profiling_file == NULL) + FILE *f = fopen(filename, "w"); + profiling_file = f; + if (f == NULL) return false; - fwrite("STMGC-C8-PROF01\n", 16, 1, profiling_file); + fwrite("STMGC-C8-PROF01\n", 16, 1, f); stmcb_timing_event = _stm_profiling_event; return true; } @@ -66,9 +83,11 @@ static bool close_timing_log(void) { if (stmcb_timing_event == &_stm_profiling_event) { + FILE *f = profiling_file; stmcb_timing_event = NULL; - fclose(profiling_file); profiling_file = NULL; + if (f != NULL) + fclose(f); return true; } return false; @@ -76,8 +95,9 @@ static void prof_forksupport_prepare(void) { - if (profiling_file != NULL) - fflush(profiling_file); + FILE *f = profiling_file; + if (f != NULL) + fflush(f); } static void prof_forksupport_child(void) diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -243,13 +243,13 @@ /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. Note that we prefer waiting rather than detaching an inevitable transaction, here. */ - timing_event(tl, STM_WAIT_FREE_SEGMENT); + emit_wait(tl, STM_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); - timing_event(tl, STM_WAIT_DONE); goto retry_from_start; got_num: + emit_wait_done(tl); OPT_ASSERT(num >= 0 && num < NB_SEGMENTS-1); sync_ctl.in_use1[num+1] = 1; assert(STM_SEGMENT->segment_num == num+1); @@ -425,15 +425,15 @@ #ifdef STM_TESTS abort_with_mutex(); #endif - timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE); + EMIT_WAIT(STM_WAIT_SYNC_PAUSE); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; - timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); assert(!STM_SEGMENT->no_safe_point_here); dprintf(("left safe point\n")); } + EMIT_WAIT_DONE(); } static void synchronize_all_threads(enum sync_type_e sync_type) @@ -461,12 +461,14 @@ intptr_t detached = fetch_detached_transaction(); if (detached != 0) { + EMIT_WAIT_DONE(); remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */ s_mutex_unlock(); commit_fetched_detached_transaction(detached); s_mutex_lock(); goto restart; } + EMIT_WAIT(STM_WAIT_SYNCING); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_AT_SAFE_POINT; cond_wait_timeout(C_AT_SAFE_POINT, 0.00001); /* every 10 microsec, try again fetch_detached_transaction() */ @@ -477,6 +479,7 @@ abort_with_mutex(); } } + EMIT_WAIT_DONE(); if (UNLIKELY(sync_type == STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE)) { globally_unique_transaction = true; diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -71,6 +71,7 @@ /* the next fields are handled internally by the library */ int last_associated_segment_num; /* always a valid seg num */ int thread_local_counter; + int wait_event_emitted; struct stm_thread_local_s *prev, *next; intptr_t self_or_0_if_atomic; void *creating_pthread[2]; @@ -580,10 +581,6 @@ STM_TRANSACTION_COMMIT, STM_TRANSACTION_ABORT, - /* write-read contention: a "marker" is included in the PYPYSTM file - saying where the write was done. Followed by STM_TRANSACTION_ABORT. */ - STM_CONTENTION_WRITE_READ, - /* inevitable contention: all threads that try to become inevitable have a STM_BECOME_INEVITABLE event with a position marker. Then, if it waits it gets a STM_WAIT_OTHER_INEVITABLE. It is possible @@ -591,8 +588,14 @@ STM_TRANSACTION_ABORT if it fails to become inevitable. */ STM_BECOME_INEVITABLE, - /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE */ + /* write-read contention: a "marker" is included in the PYPYSTM file + saying where the write was done. Followed by STM_TRANSACTION_ABORT. */ + STM_CONTENTION_WRITE_READ, + + /* always one STM_WAIT_xxx followed later by STM_WAIT_DONE or + possibly STM_TRANSACTION_ABORT */ STM_WAIT_FREE_SEGMENT, + STM_WAIT_SYNCING, STM_WAIT_SYNC_PAUSE, STM_WAIT_OTHER_INEVITABLE, STM_WAIT_DONE, _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit