Author: Remi Meier <[email protected]>
Branch: stmgc-c8
Changeset: r83520:79247965065e
Date: 2016-04-05 15:28 +0300
http://bitbucket.org/pypy/pypy/changeset/79247965065e/
Log: import stmgc
diff --git a/rpython/translator/stm/src_stm/revision
b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-8c9162341945
+b7f8a106095f
diff --git a/rpython/translator/stm/src_stm/stm/atomic.h
b/rpython/translator/stm/src_stm/stm/atomic.h
--- a/rpython/translator/stm/src_stm/stm/atomic.h
+++ b/rpython/translator/stm/src_stm/stm/atomic.h
@@ -24,16 +24,16 @@
#if defined(__i386__) || defined(__amd64__)
- static inline void spin_loop(void) { asm("pause" : : : "memory"); }
- static inline void write_fence(void) { asm("" : : : "memory"); }
+ static inline void stm_spin_loop(void) { asm("pause" : : : "memory"); }
+ static inline void stm_write_fence(void) { asm("" : : : "memory"); }
/*# define atomic_exchange(ptr, old, new) do { \
(old) = __sync_lock_test_and_set(ptr, new); \
} while (0)*/
#else
- static inline void spin_loop(void) { asm("" : : : "memory"); }
- static inline void write_fence(void) { __sync_synchronize(); }
+ static inline void stm_spin_loop(void) { asm("" : : : "memory"); }
+ static inline void stm_write_fence(void) { __sync_synchronize(); }
/*# define atomic_exchange(ptr, old, new) do { \
(old) = *(ptr); \
@@ -42,19 +42,19 @@
#endif
-static inline void _spinlock_acquire(uint8_t *plock) {
+static inline void _stm_spinlock_acquire(uint8_t *plock) {
retry:
if (__builtin_expect(__sync_lock_test_and_set(plock, 1) != 0, 0)) {
- spin_loop();
+ stm_spin_loop();
goto retry;
}
}
-static inline void _spinlock_release(uint8_t *plock) {
+static inline void _stm_spinlock_release(uint8_t *plock) {
assert(*plock == 1);
__sync_lock_release(plock);
}
-#define spinlock_acquire(lock) _spinlock_acquire(&(lock))
-#define spinlock_release(lock) _spinlock_release(&(lock))
+#define stm_spinlock_acquire(lock) _stm_spinlock_acquire(&(lock))
+#define stm_spinlock_release(lock) _stm_spinlock_release(&(lock))
#endif /* _STM_ATOMIC_H */
diff --git a/rpython/translator/stm/src_stm/stm/core.c
b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -1218,7 +1218,7 @@
/* but first, emit commit-event of this thread: */
timing_event(STM_SEGMENT->running_thread, STM_TRANSACTION_COMMIT);
STM_SEGMENT->running_thread = NULL;
- write_fence();
+ stm_write_fence();
assert(_stm_detached_inevitable_from_thread == -1);
_stm_detached_inevitable_from_thread = 0;
}
@@ -1540,7 +1540,7 @@
0. We have to wait for this to happen bc. otherwise, eg.
_stm_detach_inevitable_transaction is not safe to do yet */
while (_stm_detached_inevitable_from_thread == -1)
- spin_loop();
+ stm_spin_loop();
assert(_stm_detached_inevitable_from_thread == 0);
soon_finished_or_inevitable_thread_segment();
diff --git a/rpython/translator/stm/src_stm/stm/core.h
b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -333,12 +333,12 @@
static inline void acquire_privatization_lock(int segnum)
{
- spinlock_acquire(get_priv_segment(segnum)->privatization_lock);
+ stm_spinlock_acquire(get_priv_segment(segnum)->privatization_lock);
}
static inline void release_privatization_lock(int segnum)
{
- spinlock_release(get_priv_segment(segnum)->privatization_lock);
+ stm_spinlock_release(get_priv_segment(segnum)->privatization_lock);
}
static inline bool all_privatization_locks_acquired(void)
diff --git a/rpython/translator/stm/src_stm/stm/detach.c
b/rpython/translator/stm/src_stm/stm/detach.c
--- a/rpython/translator/stm/src_stm/stm/detach.c
+++ b/rpython/translator/stm/src_stm/stm/detach.c
@@ -107,7 +107,7 @@
is reset to a value different from -1 */
dprintf(("reattach_transaction: busy wait...\n"));
while (_stm_detached_inevitable_from_thread == -1)
- spin_loop();
+ stm_spin_loop();
/* then retry */
goto restart;
@@ -157,7 +157,7 @@
/* busy-loop: wait until _stm_detached_inevitable_from_thread
is reset to a value different from -1 */
while (_stm_detached_inevitable_from_thread == -1)
- spin_loop();
+ stm_spin_loop();
goto restart;
}
if (!__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
@@ -209,7 +209,7 @@
/* busy-loop: wait until _stm_detached_inevitable_from_thread
is reset to a value different from -1 */
while (_stm_detached_inevitable_from_thread == -1)
- spin_loop();
+ stm_spin_loop();
goto restart;
}
}
diff --git a/rpython/translator/stm/src_stm/stm/finalizer.c
b/rpython/translator/stm/src_stm/stm/finalizer.c
--- a/rpython/translator/stm/src_stm/stm/finalizer.c
+++ b/rpython/translator/stm/src_stm/stm/finalizer.c
@@ -30,7 +30,7 @@
{
/* move finalizer lists to g_finalizers for major collections */
while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) {
- spin_loop();
+ stm_spin_loop();
}
if (STM_PSEGMENT->finalizers->run_finalizers != NULL) {
@@ -515,7 +515,7 @@
while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) {
/* somebody is adding more finalizers (_commit_finalizer()) */
- spin_loop();
+ stm_spin_loop();
}
struct finalizers_s copy = g_finalizers;
assert(copy.running_next == NULL);
diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c
b/rpython/translator/stm/src_stm/stm/gcpage.c
--- a/rpython/translator/stm/src_stm/stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/stm/gcpage.c
@@ -75,7 +75,7 @@
/* uncommon case: need to initialize some more pages */
- spinlock_acquire(lock_growth_large);
+ stm_spinlock_acquire(lock_growth_large);
char *start = uninitialized_page_start;
if (addr + size > start) {
@@ -99,7 +99,7 @@
((struct object_s*)addr)->stm_flags = 0;
- spinlock_release(lock_growth_large);
+ stm_spinlock_release(lock_growth_large);
return (stm_char*)(addr - stm_object_pages);
}
@@ -178,7 +178,7 @@
DEBUG_EXPECT_SEGFAULT(true);
release_privatization_lock(STM_SEGMENT->segment_num);
- write_fence(); /* make sure 'nobj' is fully initialized from
+ stm_write_fence(); /* make sure 'nobj' is fully initialized from
all threads here */
return (object_t *)nobj;
}
diff --git a/rpython/translator/stm/src_stm/stm/hashtable.c
b/rpython/translator/stm/src_stm/stm/hashtable.c
--- a/rpython/translator/stm/src_stm/stm/hashtable.c
+++ b/rpython/translator/stm/src_stm/stm/hashtable.c
@@ -216,7 +216,7 @@
}
biggertable->resize_counter = rc;
- write_fence(); /* make sure that 'biggertable' is valid here,
+ stm_write_fence(); /* make sure that 'biggertable' is valid here,
and make sure 'table->resize_counter' is updated
('table' must be immutable from now on). */
VOLATILE_HASHTABLE(hashtable)->table = biggertable;
@@ -278,7 +278,7 @@
just now. In both cases, this thread must simply spin loop.
*/
if (IS_EVEN(rc)) {
- spin_loop();
+ stm_spin_loop();
goto restart;
}
/* in the other cases, we need to grab the RESIZING_LOCK.
@@ -348,7 +348,7 @@
hashtable->additions++;
}
table->items[i] = entry;
- write_fence(); /* make sure 'table->items' is written here */
+ stm_write_fence(); /* make sure 'table->items' is written here */
VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */
stm_read((object_t*)entry);
return entry;
@@ -437,7 +437,7 @@
table = VOLATILE_HASHTABLE(hashtable)->table;
rc = VOLATILE_TABLE(table)->resize_counter;
if (IS_EVEN(rc)) {
- spin_loop();
+ stm_spin_loop();
goto restart;
}
diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c
b/rpython/translator/stm/src_stm/stm/largemalloc.c
--- a/rpython/translator/stm/src_stm/stm/largemalloc.c
+++ b/rpython/translator/stm/src_stm/stm/largemalloc.c
@@ -116,12 +116,12 @@
static void lm_lock(void)
{
- spinlock_acquire(lm.lock);
+ stm_spinlock_acquire(lm.lock);
}
static void lm_unlock(void)
{
- spinlock_release(lm.lock);
+ stm_spinlock_release(lm.lock);
}
diff --git a/rpython/translator/stm/src_stm/stm/queue.c
b/rpython/translator/stm/src_stm/stm/queue.c
--- a/rpython/translator/stm/src_stm/stm/queue.c
+++ b/rpython/translator/stm/src_stm/stm/queue.c
@@ -77,7 +77,7 @@
stm_queue_segment_t *seg = &queue->segs[i];
struct stm_priv_segment_info_s *pseg = get_priv_segment(i + 1);
- spinlock_acquire(pseg->active_queues_lock);
+ stm_spinlock_acquire(pseg->active_queues_lock);
if (seg->active) {
assert(pseg->active_queues != NULL);
@@ -91,7 +91,7 @@
assert(!seg->old_objects_popped);
}
- spinlock_release(pseg->active_queues_lock);
+ stm_spinlock_release(pseg->active_queues_lock);
queue_free_entries(seg->added_in_this_transaction);
queue_free_entries(seg->old_objects_popped);
@@ -102,12 +102,12 @@
static inline void queue_lock_acquire(void)
{
int num = STM_SEGMENT->segment_num;
- spinlock_acquire(get_priv_segment(num)->active_queues_lock);
+ stm_spinlock_acquire(get_priv_segment(num)->active_queues_lock);
}
static inline void queue_lock_release(void)
{
int num = STM_SEGMENT->segment_num;
- spinlock_release(get_priv_segment(num)->active_queues_lock);
+ stm_spinlock_release(get_priv_segment(num)->active_queues_lock);
}
static void queue_activate(stm_queue_t *queue, stm_queue_segment_t *seg)
@@ -133,7 +133,7 @@
#pragma push_macro("STM_SEGMENT")
#undef STM_PSEGMENT
#undef STM_SEGMENT
- spinlock_acquire(pseg->active_queues_lock);
+ stm_spinlock_acquire(pseg->active_queues_lock);
bool added_any_old_entries = false;
bool finished_more_tasks = false;
@@ -177,11 +177,11 @@
}
dprintf(("items move to old_entries in queue %p\n", queue));
- spinlock_acquire(queue->old_entries_lock);
+ stm_spinlock_acquire(queue->old_entries_lock);
old = queue->old_entries;
tail->next = old;
queue->old_entries = head;
- spinlock_release(queue->old_entries_lock);
+ stm_spinlock_release(queue->old_entries_lock);
added_any_old_entries = true;
}
@@ -196,7 +196,7 @@
tree_free(pseg->active_queues);
pseg->active_queues = NULL;
- spinlock_release(pseg->active_queues_lock);
+ stm_spinlock_release(pseg->active_queues_lock);
if (added_any_old_entries)
cond_broadcast(C_QUEUE_OLD_ENTRIES);
@@ -267,11 +267,11 @@
can free and reuse this entry. Then the compare_and_swap
succeeds, but the value written is outdated nonsense.
*/
- spinlock_acquire(queue->old_entries_lock);
+ stm_spinlock_acquire(queue->old_entries_lock);
entry = queue->old_entries;
if (entry != NULL)
queue->old_entries = entry->next;
- spinlock_release(queue->old_entries_lock);
+ stm_spinlock_release(queue->old_entries_lock);
if (entry != NULL) {
/* successfully popped the old 'entry'. It remains in the
diff --git a/rpython/translator/stm/src_stm/stm/smallmalloc.c
b/rpython/translator/stm/src_stm/stm/smallmalloc.c
--- a/rpython/translator/stm/src_stm/stm/smallmalloc.c
+++ b/rpython/translator/stm/src_stm/stm/smallmalloc.c
@@ -53,7 +53,7 @@
lock of pages.c to prevent any remapping from occurring under our
feet.
*/
- spinlock_acquire(gmfp_lock);
+ stm_spinlock_acquire(gmfp_lock);
if (free_uniform_pages == NULL) {
@@ -90,7 +90,7 @@
}
}
- spinlock_release(gmfp_lock);
+ stm_spinlock_release(gmfp_lock);
return;
out_of_memory:
diff --git a/rpython/translator/stm/src_stm/stmgc.h
b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -98,7 +98,7 @@
void _stm_commit_transaction(void);
void _stm_leave_noninevitable_transactional_zone(void);
#define _stm_detach_inevitable_transaction(tl) do { \
- write_fence(); \
+ stm_write_fence(); \
assert(_stm_detached_inevitable_from_thread == 0); \
if (stmcb_timing_event != NULL && tl->self_or_0_if_atomic != 0) \
{stmcb_timing_event(tl, STM_TRANSACTION_DETACH, NULL);} \
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit