Changeset: 5ced52da5b76 for MonetDB
URL: http://dev.monetdb.org/hg/MonetDB?cmd=changeset;node=5ced52da5b76
Modified Files:
monetdb5/mal/mal_interpreter.mx
monetdb5/optimizer/opt_mitosis.mx
Branch: default
Log Message:
Revert some changes
Experiments on sf100 show that performance is not easily influenced
by piece sizes, nor prioritizing the instructions using their
footprint claim. The ultimate scheduler/mitosis is a study in its own.
diffs (97 lines):
diff --git a/monetdb5/mal/mal_interpreter.mx b/monetdb5/mal/mal_interpreter.mx
--- a/monetdb5/mal/mal_interpreter.mx
+++ b/monetdb5/mal/mal_interpreter.mx
@@ -711,6 +711,7 @@
/* optimistically set memory */
if ( argclaim == 0)
return 0;
+ return 0; /* invalidate */
mal_set_lock(mal_contextLock, "DFLOWdelay");
if (memorypool <= 0 && memoryclaims == 0) {
@@ -843,6 +844,28 @@
return r;
}
+/* it makes sense to give priority to those
+ * instructions that carry a lot of temporary arguments
+ * It will reduce the footprint of the database.
+*/
+static void
+queue_sort(queue *q)
+{
+ int i, j;
+ void *f;
+
+ for( i =0; i< q->last; i++)
+ for( j=i+1; j<q->last; j++)
+ if( ((FlowStatus)q->data[i])->argclaim >
((FlowStatus)q->data[j])->argclaim){
+ f= q->data[i];
+ q->data[i]= q->data[j];
+ q->data[j]= f;
+ }
+ /* decay, because it is likely flushed */
+ for( i =0; i< q->last; i++)
+ ((FlowStatus)q->data[i])->argclaim /= 2;
+}
+
@-
We simply move an instruction into the front of the queue.
Beware, we assume that variables are assigned a value once, otherwise
@@ -1037,8 +1060,8 @@
task->flow->status[i].blocks == 1) {
task->flow->status[i].state = DFLOWrunning;
task->flow->status[i].blocks = 0;
- task->flow->status[i].hotclaim = fs->hotclaim/p->retc;
- task->flow->status[i].argclaim += fs->hotclaim/p->retc;
+ task->flow->status[i].hotclaim = fs->hotclaim;
+ task->flow->status[i].argclaim += fs->hotclaim;
task->flow->status[i].error = NULL;
nxtfs = task->flow->status + i;
PARDEBUG
@@ -1246,11 +1269,9 @@
for( ; (i = flow->nodes[last]) > 0; last= flow->edges[last])
if (flow->status[i].state == DFLOWpending)
{
- flow->status[i].argclaim += f->hotclaim/ p->retc;
flow->status[i].blocks--;
if ( flow->status[i].blocks == 0 && ret == MAL_SUCCEED)
{
queued++;
- flow->status[i].hotclaim = f->hotclaim;
q_enqueue_(flow->todo, flow->status + i);
flow->status[i].state = DFLOWrunning;
PARDEBUG
@@ -1265,6 +1286,8 @@
queued++;
oldq++;
}
+ if (0 && oldq !=queued) /* invalidate */
+ queue_sort(flow->todo);
MT_lock_unset(&flow->todo->l, "q_enqueue");
if ( ret == MAL_SUCCEED)
diff --git a/monetdb5/optimizer/opt_mitosis.mx
b/monetdb5/optimizer/opt_mitosis.mx
--- a/monetdb5/optimizer/opt_mitosis.mx
+++ b/monetdb5/optimizer/opt_mitosis.mx
@@ -98,7 +98,7 @@
oid slice;
wrd r = 0, rowcnt=0; /* table should be sizeable to consider
parallel execution*/
InstrPtr q,*old, target= 0, matq;
- size_t typewidth= sizeof(lng) + sizeof(oid); /* a common result size */
+ size_t typewidth= 3 *sizeof(lng); /* 2 arguments and a result */
int threads = GDKnr_threads ? GDKnr_threads:1;
ValRecord vr;
VarPtr loc,rows;
@@ -152,10 +152,10 @@
if ( (i = OPTtarantulaAdviceInternal(mb,stk,p)) > 0 )
pieces = i;
else {
- /* ensure that GDKnr_threads result partition fit into main
memory */
+ /* ensure that GDKnr_threads partitions fit into main memory */
r = (BUN) (monet_memory / typewidth / threads);
if (rowcnt > r)
- pieces = (int) MAX ( ((rowcnt / r )? rowcnt/r:1) ,
(wrd) threads );
+ pieces = (int) MAX ( (rowcnt / r + 1) , (wrd) threads );
else
/* exploit parallelism, but ensure minimal partition size to
limit overhead */
if (rowcnt > MINPARTCNT)
_______________________________________________
Checkin-list mailing list
[email protected]
http://mail.monetdb.org/mailman/listinfo/checkin-list