Author: Armin Rigo <[email protected]>
Branch:
Changeset: r84:d2ad521c9ac9
Date: 2013-06-09 16:07 +0200
http://bitbucket.org/pypy/stmgc/changeset/d2ad521c9ac9/
Log: in-progress, maybe? messy :-(
diff --git a/c4/et.c b/c4/et.c
--- a/c4/et.c
+++ b/c4/et.c
@@ -66,6 +66,13 @@
gcptr P = G;
revision_t v;
+ if (UNLIKELY(d->public_descriptor->stolen_objects.size > 0))
+ {
+ spinlock_acquire(d->public_descriptor->collection_lock, 'N');
+ stm_normalize_stolen_objects(d->public_descriptor);
+ spinlock_release(d->public_descriptor->collection_lock);
+ }
+
if (P->h_tid & GCFLAG_PUBLIC)
{
/* follow the chained list of h_revision's as long as they are
@@ -144,7 +151,7 @@
if (foreign_pd == d->public_descriptor)
{
/* same thread */
- P = (gcptr)v;
+ P = (gcptr)(v - 2);
assert(!(P->h_tid & GCFLAG_PUBLIC));
if (P->h_revision == stm_private_rev_num)
{
@@ -306,7 +313,14 @@
memcpy(B + 1, P + 1, size - sizeof(*B));
}
assert(B->h_tid & GCFLAG_BACKUP_COPY);
- gcptrlist_insert2(&d->public_descriptor->active_backup_copies, P, B);
+
+ gcptrlist_locked_insert2(&d->public_descriptor->active_backup_copies, P, B,
+ &d->public_descriptor->collection_lock);
+
+ smp_wmb(); /* guarantees that stm_steal_stub() will see the list
+ up to the (P, B) pair in case it goes the path
+ h_revision == *foreign_pd->private_revision_ref */
+
P->h_revision = stm_private_rev_num;
return P;
}
@@ -328,7 +342,6 @@
not_found:;
gcptr L = stmgc_duplicate(R);
assert(!(L->h_tid & GCFLAG_BACKUP_COPY));
- assert(!(L->h_tid & GCFLAG_STOLEN));
assert(!(L->h_tid & GCFLAG_STUB));
L->h_tid &= ~(GCFLAG_OLD |
GCFLAG_VISITED |
@@ -367,16 +380,19 @@
return W;
}
-gcptr stm_get_backup_copy(gcptr P)
+gcptr stm_get_backup_copy(long index)
{
- assert(P->h_revision == stm_private_rev_num);
+ struct tx_public_descriptor *pd = thread_descriptor->public_descriptor;
+ if (index < gcptrlist_size(&pd->active_backup_copies))
+ return pd->active_backup_copies.items[index];
+ return NULL;
+}
+gcptr stm_get_stolen_obj(long index)
+{
struct tx_public_descriptor *pd = thread_descriptor->public_descriptor;
- long i, size = pd->active_backup_copies.size;
- gcptr *items = pd->active_backup_copies.items;
- for (i = 0; i < size; i += 2)
- if (items[i] == P)
- return items[i + 1];
+ if (index < gcptrlist_size(&pd->stolen_objects))
+ return pd->stolen_objects.items[index];
return NULL;
}
@@ -549,7 +565,9 @@
gcptrlist_clear(&d->list_of_read_objects);
gcptrlist_clear(&d->public_descriptor->active_backup_copies);
- abort();//stmgc_abort_transaction(d);
+ abort();
+ d->public_descriptor->stolen_objects;//XXX clean up
+ //stmgc_abort_transaction(d);
fprintf(stderr,
"\n"
@@ -612,7 +630,6 @@
d->start_real_time.tv_nsec = -1;
}
assert(d->list_of_read_objects.size == 0);
- assert(d->public_descriptor->active_backup_copies.size == 0);
assert(!g2l_any_entry(&d->public_to_private));
d->count_reads = 1;
@@ -724,7 +741,6 @@
assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE));
assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL));
assert(!(L->h_tid & GCFLAG_NURSERY_MOVED));
- assert(!(L->h_tid & GCFLAG_STOLEN));
assert(L->h_revision != localrev); /* modified by AcquireLocks() */
#ifdef DUMP_EXTRA
@@ -751,7 +767,6 @@
assert(R->h_tid & GCFLAG_PUBLIC);
assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE);
assert(!(R->h_tid & GCFLAG_NURSERY_MOVED));
- assert(!(R->h_tid & GCFLAG_STOLEN));
assert(R->h_revision != localrev);
#ifdef DUMP_EXTRA
@@ -801,19 +816,21 @@
void TurnPrivateWithBackupToProtected(struct tx_descriptor *d,
revision_t cur_time)
{
- long i, size = d->public_descriptor->active_backup_copies.size;
- gcptr *items = d->public_descriptor->active_backup_copies.items;
+ struct tx_public_descriptor *pd = d->public_descriptor;
+ long i, size = pd->active_backup_copies.size;
+ gcptr *items = pd->active_backup_copies.items;
for (i = 0; i < size; i += 2)
{
gcptr P = items[i];
gcptr B = items[i + 1];
+ assert(B->h_tid & GCFLAG_BACKUP_COPY);
+ assert(!(B->h_tid & GCFLAG_PUBLIC));
assert(P->h_revision == stm_private_rev_num);
- assert(B->h_tid & GCFLAG_BACKUP_COPY);
B->h_revision = cur_time;
P->h_revision = (revision_t)B;
};
- gcptrlist_clear(&d->public_descriptor->active_backup_copies);
+ gcptrlist_clear(&pd->active_backup_copies);
}
void CommitTransaction(void)
@@ -823,6 +840,9 @@
assert(d->active >= 1);
spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/
+ if (d->public_descriptor->stolen_objects.size)
+ stm_normalize_stolen_objects(d->public_descriptor);
+
AcquireLocks(d);
if (is_inevitable(d))
@@ -879,7 +899,7 @@
assert(newrev & 1);
ACCESS_ONCE(stm_private_rev_num) = newrev;
fprintf(stderr, "%p: stm_local_revision = %ld\n", d, (long)newrev);
- assert(d->private_revision_ref = &stm_private_rev_num);
+ assert(d->public_descriptor->private_revision_ref = &stm_private_rev_num);
UpdateChainHeads(d, cur_time, localrev);
@@ -1097,7 +1117,7 @@
assert(d->my_lock & 1);
assert(d->my_lock >= LOCKED);
stm_private_rev_num = -1;
- d->private_revision_ref = &stm_private_rev_num;
+ pd->private_revision_ref = &stm_private_rev_num;
d->max_aborts = -1;
thread_descriptor = d;
@@ -1113,12 +1133,15 @@
void DescriptorDone(void)
{
+ static revision_t no_private_revision = 8;
revision_t i;
struct tx_descriptor *d = thread_descriptor;
assert(d != NULL);
assert(d->active == 0);
- d->public_descriptor->collection_lock = 0; /* unlock */
+ spinlock_acquire(d->public_descriptor->collection_lock, 'D'); /*done*/
+ d->public_descriptor->private_revision_ref = &no_private_revision;
+ spinlock_release(d->public_descriptor->collection_lock);
spinlock_acquire(descriptor_array_lock, 1);
i = d->public_descriptor_index;
diff --git a/c4/et.h b/c4/et.h
--- a/c4/et.h
+++ b/c4/et.h
@@ -31,6 +31,7 @@
* GCFLAG_PUBLIC is set on public objects.
*
* GCFLAG_BACKUP_COPY means the object is a (protected) backup copy.
+ * For debugging.
*
* GCFLAG_PUBLIC_TO_PRIVATE is added to a *public* object that has got a
* *private* copy. It is sticky, reset only at the next major collection.
@@ -47,9 +48,6 @@
*
* GCFLAG_NURSERY_MOVED is used temporarily during minor collections.
*
- * GCFLAG_STOLEN is set of protected objects after we notice that they
- * have been stolen.
- *
* GCFLAG_STUB is set for debugging on stub objects made by stealing or
* by major collections. 'p_stub->h_revision' might be a value
* that is == 2 (mod 4): in this case they point to a protected/private
@@ -59,12 +57,11 @@
#define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1)
#define GCFLAG_PUBLIC (STM_FIRST_GCFLAG << 2)
#define GCFLAG_PREBUILT_ORIGINAL (STM_FIRST_GCFLAG << 3)
-#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 4)
-#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 5)
-#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 6)
-#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 7)
-#define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 8)
-#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* debugging */
+#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 4)
+#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 5)
+#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 6)
+#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 7) /* debugging */
+#define GCFLAG_STUB (STM_FIRST_GCFLAG << 8) /* debugging */
/* this value must be reflected in PREBUILT_FLAGS in stmgc.h */
#define GCFLAG_PREBUILT (GCFLAG_VISITED | \
@@ -108,8 +105,9 @@
revision_t collection_lock;
struct stub_block_s *stub_blocks;
gcptr stub_free_list;
- struct GcPtrList stolen_objects;
- struct GcPtrList active_backup_copies;
+ struct GcPtrList active_backup_copies; /* (P,B) where P=private, B=backup */
+ struct GcPtrList stolen_objects; /* (P,Q) where P=priv/prot, Q=public */
+ revision_t *private_revision_ref;
revision_t free_list_next;
/* xxx gcpage data here */
};
@@ -142,7 +140,6 @@
char *longest_abort_info;
long long longest_abort_info_time;
struct FXCache recent_reads_cache;
- revision_t *private_revision_ref;
};
extern __thread struct tx_descriptor *thread_descriptor;
@@ -165,7 +162,8 @@
gcptr stm_RepeatReadBarrier(gcptr);
gcptr stm_WriteBarrier(gcptr);
gcptr _stm_nonrecord_barrier(gcptr, int *);
-gcptr stm_get_backup_copy(gcptr);
+gcptr stm_get_backup_copy(long); /* debugging */
+gcptr stm_get_stolen_obj(long); /* debugging */
gcptr stm_get_read_obj(long); /* debugging */
gcptr stmgc_duplicate(gcptr);
diff --git a/c4/lists.c b/c4/lists.c
--- a/c4/lists.c
+++ b/c4/lists.c
@@ -171,6 +171,23 @@
gcptrlist->size = i + 2;
}
+void gcptrlist_locked_insert2(struct GcPtrList *gcptrlist, gcptr newitem1,
+ gcptr newitem2, revision_t *lock)
+{
+ gcptr *items;
+ long i = gcptrlist->size;
+ if (UNLIKELY((gcptrlist->alloc - i) < 2))
+ {
+ spinlock_acquire(*lock, 'I');
+ _gcptrlist_grow(gcptrlist);
+ spinlock_release(*lock);
+ }
+ items = gcptrlist->items;
+ items[i+0] = newitem1;
+ items[i+1] = newitem2;
+ gcptrlist->size = i + 2;
+}
+
void gcptrlist_insert3(struct GcPtrList *gcptrlist, gcptr newitem1,
gcptr newitem2, gcptr newitem3)
{
diff --git a/c4/lists.h b/c4/lists.h
--- a/c4/lists.h
+++ b/c4/lists.h
@@ -164,6 +164,9 @@
void gcptrlist_merge(struct GcPtrList *, struct GcPtrList *gcptrlist_source);
void gcptrlist_move(struct GcPtrList *, struct GcPtrList *gcptrlist_source);
+void gcptrlist_locked_insert2(struct GcPtrList *gcptrlist, gcptr newitem1,
+ gcptr newitem2, revision_t *lock);
+
/************************************************************/
/* The fxcache_xx functions implement a fixed-size set of gcptr's.
diff --git a/c4/steal.c b/c4/steal.c
--- a/c4/steal.c
+++ b/c4/steal.c
@@ -53,9 +53,29 @@
if ((v & 3) != 2)
goto done; /* un-stubbed while we waited for the lock */
- gcptr L = (gcptr)(v - 2);
- gcptr Q = stmgc_duplicate(L);
+ gcptr Q, L = (gcptr)(v - 2);
+ revision_t w = ACCESS_ONCE(L->h_revision);
+
+ if (w == *foreign_pd->private_revision_ref) {
+ /* The stub points to a private object L. Because it cannot point
+ to "really private" objects, it must mean that L used to be
+ a protected object, and it has an attached backed copy.
+ XXX find a way to optimize this search, maybe */
+ long i;
+ gcptr *items = foreign_pd->active_backup_copies.items;
+ /* we must find L as the first item of a pair in the list. We
+ cannot rely on how big the list is here, but we know that
+ it will not be resized while we hold collection_lock. */
+ for (i = 0; items[i] != L; i += 2)
+ ;
+ L = items[i + 1];
+ assert(L->h_tid & GCFLAG_BACKUP_COPY);
+ }
+ /* duplicate L */
+ Q = stmgc_duplicate(L); XXX RACE
+ Q->h_tid &= ~GCFLAG_BACKUP_COPY;
Q->h_tid |= GCFLAG_PUBLIC;
+ gcptrlist_insert2(&foreign_pd->stolen_objects, L, Q);
smp_wmb();
@@ -64,3 +84,16 @@
done:
spinlock_release(foreign_pd->collection_lock);
}
+
+void stm_normalize_stolen_objects(struct tx_public_descriptor *pd)
+{
+ long i, size = pd->stolen_objects.size;
+ gcptr *items = pd->stolen_objects.items;
+ for (i = 0; i < size; i += 2) {
+ gcptr L = items[i];
+ gcptr Q = items[i + 1];
+ if (L->h_revision == stm_private_rev_num) {
+
+ }
+ }
+}
diff --git a/c4/steal.h b/c4/steal.h
--- a/c4/steal.h
+++ b/c4/steal.h
@@ -9,6 +9,8 @@
gcptr stm_stub_malloc(struct tx_public_descriptor *);
void stm_steal_stub(gcptr);
+gcptr stm_get_stolen_obj(long index); /* debugging */
+void stm_normalize_stolen_objects(struct tx_public_descriptor *);
#endif
diff --git a/c4/test/support.py b/c4/test/support.py
--- a/c4/test/support.py
+++ b/c4/test/support.py
@@ -68,7 +68,8 @@
void stm_start_sharedlock(void);
void stm_stop_sharedlock(void);
void AbortTransaction(int);
- gcptr stm_get_backup_copy(gcptr);
+ gcptr stm_get_backup_copy(long index);
+ gcptr stm_get_stolen_obj(long index);
gcptr stm_get_read_obj(long index);
void *STUB_THREAD(gcptr);
@@ -103,7 +104,6 @@
#define GCFLAG_PUBLIC_TO_PRIVATE ...
#define GCFLAG_WRITE_BARRIER ...
#define GCFLAG_NURSERY_MOVED ...
- #define GCFLAG_STOLEN ...
#define GCFLAG_STUB ...
#define ABRT_MANUAL ...
//typedef struct { ...; } page_header_t;
@@ -552,4 +552,23 @@
index += 1
return result
+def _list2dict(getter):
+ result = {}
+ index = 0
+ while 1:
+ p = getter(index)
+ if p == ffi.NULL:
+ break
+ q = getter(index + 1)
+ assert q != ffi.NULL
+ result[p] = q
+ index += 2
+ return result
+
+def backup_copies():
+ return _list2dict(lib.stm_get_backup_copy)
+
+def stolen_objs():
+ return _list2dict(lib.stm_get_stolen_obj)
+
stub_thread = lib.STUB_THREAD
diff --git a/c4/test/test_et.py b/c4/test/test_et.py
--- a/c4/test/test_et.py
+++ b/c4/test/test_et.py
@@ -60,7 +60,7 @@
org_r = p.h_revision
lib.setlong(p, 0, 927122)
assert p.h_revision == lib.get_private_rev_num()
- pback = lib.stm_get_backup_copy(p)
+ pback = backup_copies()[p]
assert pback and pback != p
assert pback.h_revision == org_r
assert pback.h_tid == p.h_tid | GCFLAG_BACKUP_COPY
@@ -75,7 +75,7 @@
lib.stm_commit_transaction()
lib.stm_begin_inevitable_transaction()
lib.setlong(p, 0, 927122)
- pback = lib.stm_get_backup_copy(p)
+ pback = backup_copies()[p]
assert pback != p
assert p.h_revision == lib.get_private_rev_num()
lib.stm_commit_transaction()
@@ -90,7 +90,7 @@
lib.stm_commit_transaction()
lib.stm_begin_inevitable_transaction()
lib.setlong(p, 0, 927122)
- pback = lib.stm_get_backup_copy(p)
+ pback = backup_copies()[p]
assert pback != p
lib.stm_commit_transaction()
lib.stm_begin_inevitable_transaction()
@@ -100,7 +100,7 @@
assert lib.rawgetlong(pback, 0) == 78927812 # but should not be used
lib.setlong(p, 0, 43891)
assert p.h_revision == lib.get_private_rev_num()
- assert pback == lib.stm_get_backup_copy(p)
+ assert pback == backup_copies()[p]
assert lib.rawgetlong(p, 0) == 43891
assert lib.rawgetlong(pback, 0) == 927122
@@ -239,14 +239,16 @@
lib.stm_begin_inevitable_transaction()
assert classify(p) == "public"
assert classify(p1) == "protected"
- plist.append(p1)
- # now p's most recent revision is protected
+ plist.append(p1) # now p's most recent revision is protected
assert classify(ffi.cast("gcptr", p.h_revision)) == "stub"
r.set(2)
r.wait(3)
- assert lib.list_stolen_objects() == plist[-2:]
- p2 = lib.stm_read_barrier(p1)
- assert p2 == plist[-1]
+ d = stolen_objs()
+ assert len(d) == 1
+ assert d.keys() == [p1]
+ [p2] = d.values()
+ assert lib.stm_read_barrier(p) == p2
+ assert lib.stm_read_barrier(p1) == p2
def f2(r):
r.wait(2)
p2 = lib.stm_read_barrier(p) # steals
@@ -255,6 +257,7 @@
assert p.h_revision == int(ffi.cast("revision_t", p2))
assert p2 == lib.stm_read_barrier(p)
assert p2 not in plist
+ assert classify(p2) == "public"
plist.append(p2)
r.set(3)
run_parallel(f1, f2)
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit