It's a cool thing, but it has a few problems. - It wants to know statically how many vcores there are (max, at least). - It doesn't pad its dissem structure properly (it adds 64 bytes extra, not of padding, but just an array). - It doesn't handle preemption.
All of these can be fixed, if we actually want the barriers. In that case, we can bring this code back and fix up the above three things. Signed-off-by: Barret Rhoden <[email protected]> --- tests/mhello.c | 7 ------- user/parlib/include/parlib/mcs.h | 19 ----------------- user/parlib/mcs.c | 44 ---------------------------------------- 3 files changed, 70 deletions(-) diff --git a/tests/mhello.c b/tests/mhello.c index fe894be1d40d..b6f0abf84928 100644 --- a/tests/mhello.c +++ b/tests/mhello.c @@ -14,8 +14,6 @@ #include <parlib/event.h> #include <parlib/uthread.h> -mcs_barrier_t b; - __thread int temp; void *core0_tls = 0; @@ -42,9 +40,6 @@ int main(int argc, char** argv) uint32_t vcoreid; int retval; - /* Initialize our barrier. */ - mcs_barrier_init(&b, max_vcores()); - /* vcore_context test */ assert(!in_vcore_context()); @@ -125,7 +120,6 @@ int main(int argc, char** argv) } printf("Vcore %d Done!\n", vcoreid); - //mcs_barrier_wait(&b,vcore_id()); printf("All Cores Done!\n", vcoreid); while(1); // manually kill from the monitor @@ -181,7 +175,6 @@ void ghetto_vcore_entry(void) #endif vcore_request(1); - //mcs_barrier_wait(&b,vcore_id()); udelay(vcoreid * 10000000); //exit(0); while(1); diff --git a/user/parlib/include/parlib/mcs.h b/user/parlib/include/parlib/mcs.h index 570c569b7c27..ca521b56257f 100644 --- a/user/parlib/include/parlib/mcs.h +++ b/user/parlib/include/parlib/mcs.h @@ -19,25 +19,6 @@ typedef struct mcs_lock mcs_lock_qnode_t* lock; } mcs_lock_t; -typedef struct -{ - volatile int myflags[2][LOG2_MAX_VCORES]; - volatile int* partnerflags[2][LOG2_MAX_VCORES]; - int parity; - int sense; - char pad[ARCH_CL_SIZE]; -} mcs_dissem_flags_t; - -typedef struct -{ - size_t nprocs; - mcs_dissem_flags_t* allnodes; - size_t logp; -} mcs_barrier_t; - -int mcs_barrier_init(mcs_barrier_t* b, size_t nprocs); -void mcs_barrier_wait(mcs_barrier_t* b, size_t vcoreid); - void mcs_lock_init(struct mcs_lock *lock); /* Caller needs to alloc (and zero) their own qnode to spin on. The memory * should be on a cacheline that is 'per-thread'. This could be on the stack, diff --git a/user/parlib/mcs.c b/user/parlib/mcs.c index 9c5a93459108..a2bea6744835 100644 --- a/user/parlib/mcs.c +++ b/user/parlib/mcs.c @@ -120,50 +120,6 @@ void mcs_unlock_notifsafe(struct mcs_lock *lock, struct mcs_lock_qnode *qnode) uth_enable_notifs(); } -// MCS dissemination barrier! -int mcs_barrier_init(mcs_barrier_t* b, size_t np) -{ - if(np > max_vcores()) - return -1; - b->allnodes = (mcs_dissem_flags_t*)malloc(np*sizeof(mcs_dissem_flags_t)); - memset(b->allnodes,0,np*sizeof(mcs_dissem_flags_t)); - b->nprocs = np; - - b->logp = (np & (np-1)) != 0; - while(np >>= 1) - b->logp++; - - size_t i,k; - for(i = 0; i < b->nprocs; i++) - { - b->allnodes[i].parity = 0; - b->allnodes[i].sense = 1; - - for(k = 0; k < b->logp; k++) - { - size_t j = (i+(1<<k)) % b->nprocs; - b->allnodes[i].partnerflags[0][k] = &b->allnodes[j].myflags[0][k]; - b->allnodes[i].partnerflags[1][k] = &b->allnodes[j].myflags[1][k]; - } - } - - return 0; -} - -void mcs_barrier_wait(mcs_barrier_t* b, size_t pid) -{ - mcs_dissem_flags_t* localflags = &b->allnodes[pid]; - size_t i; - for(i = 0; i < b->logp; i++) - { - *localflags->partnerflags[localflags->parity][i] = localflags->sense; - while(localflags->myflags[localflags->parity][i] != localflags->sense); - } - if(localflags->parity) - localflags->sense = 1-localflags->sense; - localflags->parity = 1-localflags->parity; -} - /* Preemption detection and recovering MCS locks. */ /* Old style. Has trouble getting out of 'preempt/change-to storms' under * heavy contention and with preemption. */ -- 2.7.0.rc3.207.g0ac5344 -- You received this message because you are subscribed to the Google Groups "Akaros" group. To unsubscribe from this group and stop receiving emails from it, send an email to [email protected]. To post to this group, send email to [email protected]. For more options, visit https://groups.google.com/d/optout.
