Author: Armin Rigo <[email protected]>
Branch:
Changeset: r64:87d10f56bb42
Date: 2013-06-05 12:53 +0200
http://bitbucket.org/pypy/stmgc/changeset/87d10f56bb42/
Log: in-progress
diff --git a/.hgignore b/.hgignore
new file mode 100644
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,3 @@
+syntax: glob
+*.pyc
+*~
diff --git a/c3/et.c b/c3/et.c
--- a/c3/et.c
+++ b/c3/et.c
@@ -18,21 +18,18 @@
die if we start more than 0x7fff threads. */
static revision_t next_locked_value = (LOCKED + 1) | 1;
-/* a negative odd number that uniquely identifies the currently running
- transaction (but the number in aborted transactions is reused).
- Because we don't know yet the value of 'global_cur_time' that we'll
- be assigned when we commit, we use the (negative of) the value of
- 'global_cur_time' when we committed the previous transaction. */
-__thread revision_t stm_local_revision;
+/* a negative odd number that identifies the currently running
+ transaction within the thread. */
+__thread revision_t stm_private_rev_num;
revision_t stm_global_cur_time(void) /* for tests */
{
return global_cur_time;
}
-revision_t stm_local_rev(void) /* for tests */
+revision_t get_private_rev_num(void) /* for tests */
{
- return stm_local_revision;
+ return stm_private_rev_num;
}
struct tx_descriptor *stm_thread_descriptor(void) /* for tests */
{
@@ -70,6 +67,8 @@
static gcptr HeadOfRevisionChainList(struct tx_descriptor *d, gcptr G)
{
+ abort();
+#if 0
gcptr R = G;
revision_t v;
@@ -135,10 +134,13 @@
goto retry; // restart searching from R
}
return R;
+#endif
}
static inline gcptr AddInReadSet(struct tx_descriptor *d, gcptr R)
{
+ abort();
+#if 0
fprintf(stderr, "AddInReadSet(%p)\n", R);
d->count_reads++;
if (!fxcache_add(&d->recent_reads_cache, R)) {
@@ -153,10 +155,13 @@
// return Localize(d, R);
// }
return R;
+#endif
}
gcptr stm_DirectReadBarrier(gcptr G)
{
+ abort();
+#if 0
gcptr R;
struct tx_descriptor *d = thread_descriptor;
assert(d->active >= 1);
@@ -174,19 +179,13 @@
G2L_FIND(d->public_to_private, R, entry, goto not_found);
L = entry->val;
assert(L->h_revision == stm_local_revision);
-#if 0
- if (R_Container && !(R_Container->h_tid & GCFLAG_GLOBAL))
- { /* R_Container is a local object */
- gcptr *ref = (gcptr *)(((char *)R_Container) + offset);
- *ref = L; /* fix in-place */
- }
-#endif
return L;
not_found:;
}
R = AddInReadSet(d, R);
return R;
+#endif
}
static gcptr _latest_gcptr(gcptr R)
@@ -312,6 +311,8 @@
static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R)
{
+ abort();
+#if 0
if (R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)
{
wlog_t *entry;
@@ -332,6 +333,7 @@
AddInReadSet(d, R);
/*mark*/
return L;
+#endif
}
gcptr stm_WriteBarrier(gcptr P)
@@ -376,6 +378,8 @@
static _Bool ValidateDuringTransaction(struct tx_descriptor *d,
_Bool during_commit)
{
+ abort();
+#if 0
long i, size = d->list_of_read_objects.size;
gcptr *items = d->list_of_read_objects.items;
@@ -416,6 +420,7 @@
}
}
return 1;
+#endif
}
static void ValidateNow(struct tx_descriptor *d)
@@ -638,8 +643,8 @@
goto retry;
gcptr L = item->val;
- assert(L->h_revision == stm_local_revision);
- assert(v != stm_local_revision);
+ assert(L->h_revision == stm_private_rev_num);
+ assert(v != stm_private_rev_num);
L->h_revision = v; /* store temporarily this value here */
} G2L_LOOP_END;
@@ -647,6 +652,8 @@
static void CancelLocks(struct tx_descriptor *d)
{
+ abort();
+#if 0
revision_t my_lock = d->my_lock;
wlog_t *item;
@@ -672,6 +679,7 @@
ACCESS_ONCE(R->h_revision) = v;
} G2L_LOOP_END;
+#endif
}
static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER;
@@ -820,15 +828,15 @@
"*************************************\n",
(long)cur_time);
- revision_t localrev = stm_local_revision;
+ revision_t localrev = stm_private_rev_num;
UpdateProtectedChainHeads(d, cur_time, localrev);
smp_wmb();
revision_t newrev = -(cur_time + 1);
assert(newrev & 1);
- ACCESS_ONCE(stm_local_revision) = newrev;
+ ACCESS_ONCE(stm_private_rev_num) = newrev;
fprintf(stderr, "%p: stm_local_revision = %ld\n", d, (long)newrev);
- assert(d->local_revision_ref = &stm_local_revision);
+ assert(d->private_revision_ref = &stm_private_rev_num);
UpdateChainHeads(d, cur_time, localrev);
@@ -1027,8 +1035,8 @@
}
assert(d->my_lock & 1);
assert(d->my_lock > LOCKED);
- stm_local_revision = -d->my_lock; /* a unique negative odd value */
- d->local_revision_ref = &stm_local_revision;
+ stm_private_rev_num = -1;
+ d->private_revision_ref = &stm_private_rev_num;
d->max_aborts = -1;
thread_descriptor = d;
diff --git a/c3/et.h b/c3/et.h
--- a/c3/et.h
+++ b/c3/et.h
@@ -1,7 +1,7 @@
/*** Extendable Timestamps
*
* Documentation:
- * https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/stm2012/stmimpl.rst
+ * doc-*.txt
*
* This is very indirectly based on rstm_r5/stm/et.hpp.
* See http://www.cs.rochester.edu/research/synchronization/rstm/api.shtml
@@ -20,35 +20,28 @@
* collection: "young" objects are the ones in the nursery (plus a few big
* ones outside) and will be collected by the following minor collection.
*
- * Additionally, objects are either "public", "protected" or "private". The
- * private objects have h_revision == stm_local_revision and are invisible
- * to other threads. They become non-private when the transaction commits.
- *
- * non-private | private
- * +------------------------------------------------------------
- * |
- * old | public objects | old private objects
- * ---------|
- * |
- * young | [ protected objects | private objects (--> grows) ]
- * (nursery)|
+ * Additionally, objects are either "public", "protected" or "private".
*
* GCFLAG_OLD is set on old objects.
*
* GCFLAG_VISITED is used temporarily during major collections.
*
+ * GCFLAG_PUBLIC is set on public objects.
+ *
+ * GCFLAG_BACKUP_COPY means the object is a (protected) backup copy.
+ *
* GCFLAG_PUBLIC_TO_PRIVATE is added to a *public* object that has got a
* *private* copy. It is sticky, reset only at the next major collection.
*
* GCFLAG_PREBUILT_ORIGINAL is only set on the original version of
* prebuilt objects.
*
- * GCFLAG_WRITE_BARRIER is set on *old* *private* objects to track old-to-
- * young pointers. It may be left set on *public* objects but is ignored
- * there, because the write barrier will trigger anyway on any non-private
- * object. On an old private object, it is removed once a write occurs
- * and the object is recorded in 'private_old_pointing_to_young'; it is
- * set again at the next minor collection.
+ * GCFLAG_WRITE_BARRIER is set on *old* objects to track old-to- young
+ * pointers. It may be left set on *public* objects but is ignored
+ * there, because public objects are read-only. The flag is removed
+ * once a write occurs and the object is recorded in the list
+ * 'old_pointing_to_young'; it is set again at the next minor
+ * collection.
*
* GCFLAG_NURSERY_MOVED is used temporarily during minor collections.
*
@@ -56,26 +49,31 @@
* have been stolen.
*
* GCFLAG_STUB is used for debugging: it's set on stub objects made by
- * create_yo_stubs()
+ * stealing or by major collections.
*/
#define GCFLAG_OLD (STM_FIRST_GCFLAG << 0)
#define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1)
-#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 2)
+#define GCFLAG_PUBLIC (STM_FIRST_GCFLAG << 2)
#define GCFLAG_PREBUILT_ORIGINAL (STM_FIRST_GCFLAG << 3)
-#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 4)
-#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 5)
-#define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 6)
-#define GCFLAG_STUB (STM_FIRST_GCFLAG << 7) /* debugging */
+#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 4)
+#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 5)
+#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 6)
+#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 7)
+#define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 8)
+#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* debugging */
/* this value must be reflected in PREBUILT_FLAGS in stmgc.h */
#define GCFLAG_PREBUILT (GCFLAG_VISITED | \
GCFLAG_PREBUILT_ORIGINAL | \
- GCFLAG_OLD)
+ GCFLAG_OLD | \
+ GCFLAG_PUBLIC)
#define GC_FLAG_NAMES { "OLD", \
"VISITED", \
+ "PUBLIC", \
+ "PREBUILT_ORIGINAL", \
+ "BACKUP_COPY", \
"PUBLIC_TO_PRIVATE", \
- "PREBUILT_ORIGINAL", \
"WRITE_BARRIER", \
"NURSERY_MOVED", \
"STOLEN", \
@@ -121,12 +119,12 @@
char *longest_abort_info;
long long longest_abort_info_time;
struct FXCache recent_reads_cache;
- revision_t *local_revision_ref;
+ revision_t *private_revision_ref;
struct tx_descriptor *tx_next, *tx_prev; /* a doubly linked list */
};
extern __thread struct tx_descriptor *thread_descriptor;
-extern __thread revision_t stm_local_revision;
+extern __thread revision_t stm_private_rev_num;
/************************************************************/
diff --git a/c3/gcpage.c b/c3/gcpage.c
--- a/c3/gcpage.c
+++ b/c3/gcpage.c
@@ -423,7 +423,7 @@
assert(stmgc_classify(item->addr) == K_PUBLIC);
/*..rt(stmgc_classify(item->val) == K_PRIVATE); but in the
other thread, which becomes: */
- assert(item->val->h_revision == *d->local_revision_ref);
+ assert(item->val->h_revision == *d->private_revision_ref);
item->addr->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE;
@@ -559,8 +559,8 @@
{
struct tx_descriptor *d;
struct tx_descriptor *saved = thread_descriptor;
- revision_t saved_local_rev = stm_local_revision;
- assert(saved_local_rev == *saved->local_revision_ref);
+ revision_t saved_private_rev = stm_private_rev_num;
+ assert(saved_private_rev == *saved->private_revision_ref);
for (d = tx_head; d; d = d->tx_next) {
/* Force a minor collection to run in the thread 'd'.
@@ -572,12 +572,12 @@
/* Hack: temporarily pretend that we "are" the other thread...
*/
thread_descriptor = d;
- stm_local_revision = *d->local_revision_ref;
+ stm_private_rev_num = *d->private_revision_ref;
assert(stmgc_nursery_hiding(d, 0));
stmgc_minor_collect_no_abort();
assert(stmgc_nursery_hiding(d, 1));
thread_descriptor = saved;
- stm_local_revision = saved_local_rev;
+ stm_private_rev_num = saved_private_rev;
}
}
}
diff --git a/c3/lists.c b/c3/lists.c
--- a/c3/lists.c
+++ b/c3/lists.c
@@ -223,15 +223,10 @@
/************************************************************/
-void fxcache_clear(struct FXCache *fxcache)
+void _fxcache_reset(struct FXCache *fxcache)
{
- fxcache->shift += 4;
- /* FX_ENTRIES+1 entries are needed */
- if (fxcache->shift + FX_ENTRIES + 1 > FX_TOTAL) {
+ fxcache->shift = 0;
memset(fxcache->cache, 0, sizeof(fxcache->cache));
- fxcache->shift = 0;
- }
- fxcache->cache_start = (char *)(fxcache->cache + fxcache->shift);
}
/************************************************************/
diff --git a/c3/lists.h b/c3/lists.h
--- a/c3/lists.h
+++ b/c3/lists.h
@@ -168,16 +168,12 @@
/* The fxcache_xx functions implement a fixed-size set of gcptr's.
Moreover the gcptr's in the set are mapped to small integers. In case
- of collisions, old items are discarded. The cache uses 3-way caching,
- stored in 3 consecutive entries, but the 3 entries are in "cache lines"
- that are only aligned to a multiple of 2. This means that among the 3
- items, the item 0 overlaps with the item 2 of the previous cache line,
- and the item 2 overlaps with the item 0 of the following cache line.
- The item 1 can only be seen by the current cache line.
+ of collisions, old items are discarded. The cache doesn't use
+ multi-way caching for now.
- The cache itself uses a total of FX_ENTRIES+1 entries in the 'cache'
- array below, starting at 'cache_start'. The reason it is bigger than
- necessary is that fxcache_clear() simply shifts 'cache_start', making
+ The cache itself uses a total of FX_ENTRIES entries in the 'cache'
+ array below, starting at 'shift'. The reason it is bigger than
+ necessary is that fxcache_clear() simply increments 'shift', making
any previous entries invalid by not being in the correct position any
more.
*/
@@ -187,41 +183,18 @@
struct FXCache {
char *cache_start;
- revision_t nextadd;
revision_t shift;
revision_t cache[FX_TOTAL];
};
-void fxcache_clear(struct FXCache *fxcache);
+void _fxcache_reset(struct FXCache *fxcache);
-static inline int fxcache_add(struct FXCache *fxcache, gcptr item) {
- /* If 'item' is not in the cache, add it and returns 0.
- If it is already, return 1.
- */
- revision_t uitem = (revision_t)item;
- /* 'entry' points to 'cache_start[mask of uitem, even-valued]' */
- revision_t *entry = (revision_t *)
- (fxcache->cache_start + (uitem & ((FX_ENTRIES-2) * sizeof(revision_t))));
- revision_t current;
-
- current = entry[1]; /* first look here, the cache-private entry */
- if (current == uitem)
- return 1;
-
- if (entry[0] == uitem) {
- entry[0] = current; /* move from this collidable entry to */
- entry[1] = uitem; /* the cache-private entry */
- return 1;
- }
- if (entry[2] == uitem) {
- entry[2] = current; /* move from this collidable entry to */
- entry[1] = uitem; /* the cache-private entry */
- return 1;
- }
-
- entry[fxcache->nextadd] = uitem;
- fxcache->nextadd ^= 2;
- return 0;
+static inline void fxcache_clear(struct FXCache *fxcache)
+{
+ fxcache->shift++;
+ if (fxcache->shift > FX_TOTAL - FX_ENTRIES)
+ _fxcache_reset(fxcache);
+ fxcache->cache_start = (char *)(fxcache->cache + fxcache->shift);
}
/************************************************************/
diff --git a/c3/nursery.c b/c3/nursery.c
--- a/c3/nursery.c
+++ b/c3/nursery.c
@@ -29,7 +29,7 @@
enum protection_class_t stmgc_classify(gcptr obj)
{
/* note that this function never returns K_OLD_PRIVATE. */
- if (obj->h_revision == stm_local_revision)
+ if (obj->h_revision == stm_private_rev_num)
return K_PRIVATE;
if (is_young(obj))
return K_PROTECTED;
@@ -42,7 +42,7 @@
/* for assertions only; moreover this function returns K_PRIVATE
only for young private objects, and K_OLD_PRIVATE for old ones. */
struct tx_descriptor *d = thread_descriptor;
- int private = (obj->h_revision == stm_local_revision);
+ int private = (obj->h_revision == stm_private_rev_num);
enum protection_class_t e;
if (is_in_nursery(d, obj)) {
@@ -128,7 +128,7 @@
{
gcptr p = stmgcpage_malloc(size);
memset(p, 0, size);
- p->h_revision = stm_local_revision;
+ p->h_revision = stm_private_rev_num;
p->h_tid = GCFLAG_OLD;
return p;
}
@@ -146,7 +146,7 @@
}
stm_dbgmem_used_again(cur, size, 1);
gcptr p = (gcptr)cur;
- p->h_revision = stm_local_revision;
+ p->h_revision = stm_private_rev_num;
return p;
}
@@ -185,7 +185,7 @@
GCFLAG_PREBUILT_ORIGINAL |
GCFLAG_WRITE_BARRIER |
GCFLAG_OLD);
- localobj->h_revision = stm_local_revision;
+ localobj->h_revision = stm_private_rev_num;
return localobj;
}
@@ -413,7 +413,7 @@
/* nb. don't use stmgc_classify() here, because some objects trigger
an assert at this point: young non-nursery objects which just
grew the flag GCFLAG_OLD */
- assert(obj->h_revision != stm_local_revision); /* not a private object */
+ assert(obj->h_revision != stm_private_rev_num); /* not a private object */
PATCH_ROOT_WITH(obj);
goto retry;
}
@@ -490,7 +490,7 @@
/* then we record the dependency in the dictionary
'public_to_private' */
- assert(L->h_revision == stm_local_revision);
+ assert(L->h_revision == stm_private_rev_num);
g2l_insert(&d->public_to_private, R, L);
/*mark*/
}
@@ -956,6 +956,8 @@
static gcptr extract_from_foreign_nursery(struct tx_descriptor *source_d,
gcptr R)
{
+ abort();
+#if 0
/* "Stealing": this function follows a chain of protected objects in
the foreign nursery of the thread 'source_d'. It copies the last
one outside the nursery, and return it. */
@@ -1011,6 +1013,7 @@
source_d->stolen_objects.size - 1);
return N;
+#endif
}
void stmgc_public_to_foreign_protected(gcptr P)
@@ -1112,7 +1115,7 @@
/* we re-insert L as a private copy of the public object N */
N->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE;
- assert(L->h_revision == stm_local_revision);
+ assert(L->h_revision == stm_private_rev_num);
g2l_insert(&d->public_to_private, N, L);
gcptrlist_insert(&d->public_to_young, N);
}
diff --git a/c3/stmgc.h b/c3/stmgc.h
--- a/c3/stmgc.h
+++ b/c3/stmgc.h
@@ -21,7 +21,7 @@
#define STM_SIZE_OF_USER_TID (sizeof(revision_t) / 2) /* in bytes */
#define STM_FIRST_GCFLAG (1L << (8 * STM_SIZE_OF_USER_TID))
#define STM_USER_TID_MASK (STM_FIRST_GCFLAG - 1)
-#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 8))
+#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 4 + 8))
#define PREBUILT_REVISION 1
diff --git a/c3/stmsync.c b/c3/stmsync.c
--- a/c3/stmsync.c
+++ b/c3/stmsync.c
@@ -78,8 +78,11 @@
gcptr stm_read_barrier(gcptr obj)
{
/* XXX inline in the caller */
+ abort();
+#if 0
if (UNLIKELY(obj->h_revision != stm_local_revision))
obj = stm_DirectReadBarrier(obj);
+#endif
return obj;
}
@@ -87,7 +90,7 @@
{
/* XXX inline in the caller */
if (UNLIKELY(((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0) |
- (obj->h_revision != stm_local_revision)))
+ (obj->h_revision != stm_private_rev_num)))
obj = stm_WriteBarrier(obj);
return obj;
}
diff --git a/c3/test/support.py b/c3/test/support.py
--- a/c3/test/support.py
+++ b/c3/test/support.py
@@ -80,7 +80,7 @@
void rawsetlong(gcptr, long, long);
gcptr pseudoprebuilt(size_t size, int tid);
- revision_t get_local_revision(void);
+ revision_t get_private_rev_num(void);
revision_t get_start_time(void);
gcptr *addr_of_thread_local(void);
@@ -112,6 +112,7 @@
extern revision_t stm_global_cur_time(void);
extern void stmgcpage_add_prebuilt_root(gcptr);
extern void stm_clear_between_tests(void);
+ extern revision_t get_private_rev_num(void);
int gettid(gcptr obj)
{
@@ -194,11 +195,6 @@
return x;
}
- revision_t get_local_revision(void)
- {
- return stm_local_revision;
- }
-
revision_t get_start_time(void)
{
return thread_descriptor->start_time;
@@ -421,7 +417,8 @@
def nalloc(size):
"Allocate a fresh object from the nursery"
p = lib.stm_allocate(size, 42 + size)
- assert p.h_revision == lib.get_local_revision()
+ assert p.h_tid == 42 + size # no GC flags
+ assert p.h_revision == lib.get_private_rev_num()
return p
def nalloc_refs(nrefs):
@@ -442,6 +439,7 @@
p = lib.pseudoprebuilt(HDR + WORD * nrefs, 421 + nrefs)
return p
+gettid = lib.gettid
setptr = lib.setptr
getptr = lib.getptr
rawsetptr = lib.rawsetptr
diff --git a/c3/test/test_et.py b/c3/test/test_et.py
new file mode 100644
--- /dev/null
+++ b/c3/test/test_et.py
@@ -0,0 +1,64 @@
+import py
+from support import *
+
+
+def setup_function(f):
+ lib.stm_clear_between_tests()
+ lib.stm_initialize_tests(getattr(f, 'max_aborts', 0))
+
+def teardown_function(_):
+ lib.stm_finalize()
+
+
+def test_freshly_created():
+ p = nalloc(HDR)
+ r = lib.get_private_rev_num()
+ assert r < 0 and r % 2 == 1
+ assert p.h_revision == r
+ assert p.h_tid == lib.gettid(p) | 0 # no GC flags
+
+def test_write_barrier_private():
+ p = nalloc(HDR)
+ assert lib.stm_write_barrier(p) == p
+ assert p.h_revision == lib.get_private_rev_num()
+ assert p.h_tid == lib.gettid(p) | 0 # no GC flags
+
+def test_protected_no_backup():
+ p = nalloc(HDR)
+ r = lib.get_private_rev_num()
+ assert p.h_revision == r
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ r2 = lib.get_private_rev_num()
+ assert r2 < 0 and r2 % 2 == 1
+ assert r != r2
+ assert p.h_revision == r
+ assert p.h_tid == lib.gettid(p) | 0 # no GC flags
+
+def test_private_with_backup():
+ p = nalloc(HDR)
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ r2 = lib.get_private_rev_num()
+ assert p.h_revision != r2
+ p2 = lib.stm_write_barrier(p)
+ assert p2 == p # does not move
+ assert p.h_revision == r2
+
+def test_get_backup_copy():
+ p = nalloc(HDR + WORD)
+ lib.setlong(p, 0, 78927812)
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
+ org_r = p.h_revision
+ lib.setlong(p, 0, 927122)
+ assert p.h_revision == lib.get_private_rev_num()
+ pback = lib.stm_get_backup_copy(p)
+ assert pback and pback != p
+ assert pback.h_revision == org_r
+ assert pback.h_tid == p.h_tid | GCFLAG_BACKUP_COPY
+ assert lib.rawgetlong(pback, 0) == 78927812
+ assert lib.rawgetlong(p, 0) == 927122
+
+def test_protected_with_backup():
+ xxx
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit