Changeset: 8889968add15 for MonetDB
URL: https://dev.monetdb.org/hg/MonetDB/rev/8889968add15
Modified Files:
gdk/gdk.h
Branch: default
Log Message:
Merge with Sep2022 branch.
diffs (truncated from 937 to 300 lines):
diff --git a/gdk/gdk_firstn.c b/gdk/gdk_firstn.c
--- a/gdk/gdk_firstn.c
+++ b/gdk/gdk_firstn.c
@@ -94,11 +94,11 @@
/* we inherit LT and GT from gdk_calc_private.h */
-#define nLTbte(a, b) (!is_bte_nil(b) && (is_bte_nil(a) || (a) < (b)))
-#define nLTsht(a, b) (!is_sht_nil(b) && (is_sht_nil(a) || (a) < (b)))
-#define nLTint(a, b) (!is_int_nil(b) && (is_int_nil(a) || (a) < (b)))
-#define nLTlng(a, b) (!is_lng_nil(b) && (is_lng_nil(a) || (a) < (b)))
-#define nLThge(a, b) (!is_hge_nil(b) && (is_hge_nil(a) || (a) < (b)))
+#define nLTbte(a, b) (!is_bte_nil(a) && (is_bte_nil(b) || (a) < (b)))
+#define nLTsht(a, b) (!is_sht_nil(a) && (is_sht_nil(b) || (a) < (b)))
+#define nLTint(a, b) (!is_int_nil(a) && (is_int_nil(b) || (a) < (b)))
+#define nLTlng(a, b) (!is_lng_nil(a) && (is_lng_nil(b) || (a) < (b)))
+#define nLThge(a, b) (!is_hge_nil(a) && (is_hge_nil(b) || (a) < (b)))
#define nGTbte(a, b) (!is_bte_nil(b) && (is_bte_nil(a) || (a) > (b)))
#define nGTsht(a, b) (!is_sht_nil(b) && (is_sht_nil(a) || (a) > (b)))
diff --git a/monetdb5/optimizer/opt_mitosis.c b/monetdb5/optimizer/opt_mitosis.c
--- a/monetdb5/optimizer/opt_mitosis.c
+++ b/monetdb5/optimizer/opt_mitosis.c
@@ -11,18 +11,21 @@
#include "mal_interpreter.h"
#include "gdk_utils.h"
+#define MIN_PART_SIZE 100000 /* minimal record count per partition */
+#define MAX_PARTS2THREADS_RATIO 4 /* There should be at most this
multiple more of partitions then threads */
+
str
OPTmitosisImplementation(Client cntxt, MalBlkPtr mb, MalStkPtr stk, InstrPtr
pci)
{
int i, j, limit, slimit, estimate = 0, pieces = 1, mito_parts = 0,
mito_size = 0, row_size = 0, mt = -1, nr_cols = 0,
- nr_aggrs = 0;
+ nr_aggrs = 0, nr_maps = 0;
str schema = 0, table = 0;
BUN r = 0, rowcnt = 0; /* table should be sizeable to consider
parallel execution*/
InstrPtr p, q, *old, target = 0;
size_t argsize = 6 * sizeof(lng), m = 0, memclaim;
/* estimate size per operator estimate: 4 args + 2 res*/
- int threads = GDKnr_threads ? GDKnr_threads : 1, maxslices = MAXSLICES;
+ int threads = GDKnr_threads ? GDKnr_threads : 1, maxparts = MAXSLICES;
str msg = MAL_SUCCEED;
/* if the user has associated limitation on the number of threads,
respect it in the
@@ -50,9 +53,10 @@ OPTmitosisImplementation(Client cntxt, M
/* mitosis/mergetable bailout conditions */
/* Crude protection against self join explosion */
if (p->retc == 2 && isMatJoinOp(p))
- maxslices = threads;
+ maxparts = threads;
nr_aggrs += (p->argc > 2 && getModuleId(p) == aggrRef);
+ nr_maps += (isMapOp(p));
if (p->argc > 2 && getModuleId(p) == aggrRef &&
getFunctionId(p) != subcountRef &&
@@ -158,21 +162,21 @@ OPTmitosisImplementation(Client cntxt, M
*/
if (pieces <= 1){
/* improve memory usage estimation */
- if (nr_cols > 1 || nr_aggrs > 1)
- argsize = (nr_cols + nr_aggrs) * sizeof(lng);
+ if (nr_cols > 1 || nr_aggrs > 1 || nr_maps > 1)
+ argsize = (nr_cols + nr_aggrs + nr_maps) * sizeof(lng);
/* We haven't assigned the number of pieces.
* Determine the memory available for this client
*/
/* respect the memory limit size set for the user
- * and determine the column slice size
+ * and determine the column part size
*/
if( cntxt->memorylimit)
m = (((size_t) cntxt->memorylimit) * 1048576) / argsize;
else {
memclaim= MCmemoryClaim();
if(memclaim == GDK_mem_maxsize){
- m = GDK_mem_maxsize / (size_t)
MCactiveClients() / argsize;
+ m = GDK_mem_maxsize / (size_t)
MCactiveClients() / argsize;
} else
m = (GDK_mem_maxsize - memclaim) / argsize;
}
@@ -186,17 +190,17 @@ OPTmitosisImplementation(Client cntxt, M
* i.e., (threads*(rowcnt/pieces) <= m),
* i.e., (rowcnt/pieces <= m/threads),
* i.e., (pieces => rowcnt/(m/threads))
- * (assuming that (m > threads*MINPARTCNT)) */
+ * (assuming that (m > threads*MIN_PART_SIZE)) */
/* the number of pieces affects SF-100, going beyond 8x
increases
* the optimizer costs beyond the execution time
*/
- pieces = ((int) ceil((double)rowcnt / m / threads));
+ pieces = ((int) ceil((double)rowcnt / (m / threads)));
if (pieces <= threads)
pieces = threads;
- } else if (rowcnt > MINPARTCNT) {
+ } else if (rowcnt > MIN_PART_SIZE) {
/* exploit parallelism, but ensure minimal partition size to
* limit overhead */
- pieces = MIN((int) ceil((double)rowcnt / MINPARTCNT),
threads);
+ pieces = MIN((int) ceil((double)rowcnt /
MIN_PART_SIZE), MAX_PARTS2THREADS_RATIO * threads);
}
}
@@ -206,10 +210,10 @@ OPTmitosisImplementation(Client cntxt, M
if (pieces < threads)
pieces = (int) MIN((BUN) threads, rowcnt);
/* prevent plan explosion */
- if (pieces > maxslices)
- pieces = maxslices;
+ if (pieces > maxparts)
+ pieces = maxparts;
/* to enable experimentation we introduce the option to set
- * the number of parts required and/or the size of each chunk (in K)
+ * the number of partitions required and/or the size of each chunk (in
K)
*/
mito_parts = GDKgetenv_int("mito_parts", 0);
if (mito_parts > 0)
diff --git a/monetdb5/optimizer/opt_mitosis.h b/monetdb5/optimizer/opt_mitosis.h
--- a/monetdb5/optimizer/opt_mitosis.h
+++ b/monetdb5/optimizer/opt_mitosis.h
@@ -12,7 +12,6 @@
#include "opt_support.h"
#define MAXSLICES 1024 /* to be refined */
-#define MINPARTCNT 100000 /* minimal record count per partition */
extern str OPTmitosisImplementation(Client cntxt, MalBlkPtr mb, MalStkPtr stk,
InstrPtr p);
diff --git a/monetdb5/optimizer/opt_reorder.c b/monetdb5/optimizer/opt_reorder.c
--- a/monetdb5/optimizer/opt_reorder.c
+++ b/monetdb5/optimizer/opt_reorder.c
@@ -32,6 +32,8 @@
#include "opt_mitosis.h"
+#define MAXSLICES 1024 /* to be refined */
+
/* Insert the instruction immediately after a previous instruction that
* generated an argument needed.
* If non can be found, add it to the end.
diff --git a/sql/storage/objectset.c b/sql/storage/objectset.c
--- a/sql/storage/objectset.c
+++ b/sql/storage/objectset.c
@@ -599,7 +599,7 @@ tc_gc_objectversion(sql_store store, sql
assert(!change->handled);
objectversion *ov = (objectversion*)change->data;
- if (oldest && oldest >= TRANSACTION_ID_BASE)
+ if (oldest >= TRANSACTION_ID_BASE)
return 0;
int res = os_cleanup( (sqlstore*) store, ov, oldest);
change->handled = (res)?true:false;
diff --git a/sql/storage/store.c b/sql/storage/store.c
--- a/sql/storage/store.c
+++ b/sql/storage/store.c
@@ -2235,7 +2235,7 @@ id_hash_clear_older(sql_hash *h, ulng ol
if (h->entries == 0)
return;
for (int i = 0; i < h->size; i++) {
- sql_hash_e *e = h->buckets[i], *c = NULL, *first = NULL;
+ sql_hash_e *e = h->buckets[i], *last = NULL, *first = NULL;
while (e) {
sql_hash_e *next = e->chain;
@@ -2246,16 +2246,16 @@ id_hash_clear_older(sql_hash *h, ulng ol
_DELETE(e);
h->entries--;
} else {
- if (c)
- c->chain = e;
+ if (last)
+ last->chain = e;
else
first = e;
- c = e;
+ last = e;
}
e = next;
}
- if (c)
- c->chain = NULL;
+ if (last)
+ last->chain = NULL;
h->buckets[i] = first;
}
}
@@ -2263,12 +2263,13 @@ id_hash_clear_older(sql_hash *h, ulng ol
static void
store_pending_changes(sqlstore *store, ulng oldest)
{
- ulng oldest_changes = store_get_timestamp(store);
+ ulng oldest_changes = store_get_timestamp(store);
if (!list_empty(store->changes)) { /* lets first cleanup old stuff */
for(node *n=store->changes->h; n; ) {
node *next = n->next;
sql_change *c = n->data;
+ assert(c->cleanup);
if (c->cleanup(store, c, oldest)) {
list_remove_node(store->changes, store, n);
_DELETE(c);
@@ -3944,10 +3945,15 @@ sql_trans_commit(sql_trans *tr)
ok = store->logger_api.log_tend(store); /* wal
end */
}
store_lock(store);
- commit_ts = tr->parent ? tr->parent->tid :
store_timestamp(store);
- if (tr->parent)
+
+ if (tr->parent) {
+ commit_ts = oldest = tr->parent->tid;
tr->parent->logchanges += tr->logchanges;
- oldest = tr->parent ? commit_ts : store_oldest(store);
+ }
+ else {
+ commit_ts = store_timestamp(store);
+ oldest = store_oldest(store);
+ }
tr->logchanges = 0;
TRC_DEBUG(SQL_STORE, "Forwarding changes (" ULLFMT ", " ULLFMT
") -> " ULLFMT "\n", tr->tid, tr->ts, commit_ts);
/* apply committed changes */
@@ -4161,15 +4167,11 @@ static int
sql_trans_drop_any_comment(sql_trans *tr, sqlid id)
{
sqlstore *store = tr->store;
- sql_schema *sys;
+ sql_table *comments;
sql_column *id_col;
- sql_table *comments;
oid row;
- sys = find_sql_schema(tr, "sys");
- assert(sys);
-
- comments = find_sql_table(tr, sys, "comments");
+ comments = find_sql_table(tr, find_sql_schema(tr, "sys"), "comments");
if (!comments) /* for example during upgrades */
return 0;
@@ -4880,8 +4882,9 @@ sql_trans_create_func(sql_func **fres, s
const char *mod, const char *impl,
const char *query, bit varres, bit vararg, bit system, bit side_effect)
{
sqlstore *store = tr->store;
- sql_table *sysfunc = find_sql_table(tr, find_sql_schema(tr, "sys"),
"functions");
- sql_table *sysarg = find_sql_table(tr, find_sql_schema(tr, "sys"),
"args");
+ sql_schema *syss = find_sql_schema(tr, "sys");
+ sql_table *sysfunc = find_sql_table(tr, syss, "functions");
+ sql_table *sysarg = find_sql_table(tr, syss, "args");
node *n;
int number = 0, ftype = (int) type, flang = (int) lang, res = LOG_OK;
bit semantics = TRUE;
diff --git a/sql/test/Tests/orderby-nulls-first-last.test
b/sql/test/Tests/orderby-nulls-first-last.test
--- a/sql/test/Tests/orderby-nulls-first-last.test
+++ b/sql/test/Tests/orderby-nulls-first-last.test
@@ -1,4 +1,4 @@
--- tests voor ORDER BY x [ ASC | DESC ] NULLS { FIRST | LAST }
+-- tests voor ORDER BY x [ ASC | DESC ] NULLS { FIRST | LAST } [ LIMIT n ] [
OFFSET n ]
-- FYI: other scripts where NULLS [ FIRST | LAST ] are tested:
-- sql/test/analytics/Tests/analytics13.test
@@ -204,6 +204,32 @@ NULL
NULL
query I nosort
+select cti from obn order by cti nulls last limit 6;
+----
+2
+3
+4
+9
+12
+NULL
+
+query I nosort
+select cti from obn order by cti nulls last limit 4 offset 2;
+----
+4
+9
+12
+NULL
+
+query I nosort
+select cti from obn order by cti nulls last offset 3;
+----
+9
+12
+NULL
+NULL
+
+query I nosort
select cti from obn order by cti desc;
----
12
@@ -236,6 +262,25 @@ 4
3
2
_______________________________________________
checkin-list mailing list -- [email protected]
To unsubscribe send an email to [email protected]