Changeset: 4f909606267c for MonetDB
URL: https://dev.monetdb.org/hg/MonetDB/rev/4f909606267c
Modified Files:
gdk/gdk_calc_addsub.c
gdk/gdk_string.c
gdk/gdk_subquery.c
gdk/gdk_utils.c
geom/monetdb5/geom.c
monetdb5/extras/rapi/rapi.c
monetdb5/modules/atoms/json.c
monetdb5/modules/atoms/str.c
monetdb5/modules/mal/tablet.c
Branch: default
Log Message:
Merge with Dec2025 branch.
diffs (truncated from 994 to 300 lines):
diff --git a/gdk/gdk_calc_addsub.c b/gdk/gdk_calc_addsub.c
--- a/gdk/gdk_calc_addsub.c
+++ b/gdk/gdk_calc_addsub.c
@@ -1456,12 +1456,10 @@ addstr_loop(BAT *b1, const char *l, BAT
llen = strlen(l);
rlen = strlen(r);
if (llen + rlen >= slen) {
- /* ma_close(&ta_state); */
- /* ta_state = ma_open(ta); */
- slen = llen + rlen + 1024;
- s = ma_alloc(ta, slen);
+ s = ma_realloc(ta, s, llen + rlen + 1024, slen);
if (s == NULL)
goto bailout;
+ slen = llen + rlen + 1024;
}
(void) stpcpy(stpcpy(s, l), r);
if (tfastins_nocheckVAR(bn, i, s) != GDK_SUCCEED)
diff --git a/gdk/gdk_string.c b/gdk/gdk_string.c
--- a/gdk/gdk_string.c
+++ b/gdk/gdk_string.c
@@ -6887,11 +6887,11 @@ convertcase(allocator *ma, char **restri
* terminating NUL */
size_t newlen = bl + 1024;
dst = ma_realloc(ma, *buf, newlen, bl);
+ *buf = (char *) dst;
if (dst == NULL) {
- *buflen = bl;
+ *buflen = 0;
return GDK_FAIL;
}
- *buf = (char *) dst;
bl = newlen;
bl5 = bl - 5;
}
@@ -6935,11 +6935,11 @@ convertcase(allocator *ma, char **restri
if (dstoff + 1 > bl) {
size_t newlen = dstoff + 1;
dst = ma_realloc(ma, *buf, newlen, bl);
+ *buf = (char *) dst;
if (dst == NULL) {
- *buflen = bl;
+ *buflen = 0;
return GDK_FAIL;
}
- *buf = (char *) dst;
bl = newlen;
}
dst[dstoff] = '\0';
@@ -6978,8 +6978,6 @@ BATcaseconvert(BAT *b, BAT *s, int direc
oid bhseqbase = b->hseqbase;
QryCtx *qry_ctx = MT_thread_get_qry_ctx();
qry_ctx = qry_ctx ? qry_ctx : &(QryCtx) {.endtime = 0};
- allocator *ta = MT_thread_getallocator();
- allocator_state ta_state = ma_open(ta);
TRC_DEBUG_IF(ALGO) t0 = GDKusec();
BATcheck(b, NULL);
@@ -6990,6 +6988,8 @@ BATcaseconvert(BAT *b, BAT *s, int direc
bi = bat_iterator(b);
char *buf = NULL;
size_t buflen = 0;
+ allocator *ta = MT_thread_getallocator();
+ allocator_state ta_state = ma_open(ta);
TIMEOUT_LOOP_IDX_DECL(i, ci.ncand, qry_ctx) {
BUN x = canditer_next(&ci) - bhseqbase;
if (convertcase(ta, &buf, &buflen, BUNtvar(&bi, x),
@@ -9726,11 +9726,11 @@ GDKasciify(allocator *ma, char **restric
* bytes plus terminating NUL */
size_t newlen = bl + 1024;
dst = ma_realloc(ma, *buf, newlen, bl);
+ *buf = (char *) dst;
if (dst == NULL) {
- *buflen = bl;
+ *buflen = 0;
return GDK_FAIL;
}
- *buf = (char *) dst;
bl = newlen;
bl8 = bl - 8;
}
@@ -9756,11 +9756,11 @@ GDKasciify(allocator *ma, char **restric
if (dstoff + 1 > bl) {
size_t newlen = dstoff + 1;
dst = ma_realloc(ma, *buf, newlen, bl);
+ *buf = (char *) dst;
if (dst == NULL) {
- *buflen = bl;
+ *buflen = 0;
return GDK_FAIL;
}
- *buf = (char *) dst;
bl = newlen;
}
dst[dstoff] = '\0';
@@ -9784,8 +9784,6 @@ BATasciify(BAT *b, BAT *s)
oid bhseqbase = b->hseqbase;
QryCtx *qry_ctx = MT_thread_get_qry_ctx();
qry_ctx = qry_ctx ? qry_ctx : &(QryCtx) {.endtime = 0};
- allocator *ta = MT_thread_getallocator();
- allocator_state ta_state = ma_open(ta);
TRC_DEBUG_IF(ALGO) t0 = GDKusec();
BATcheck(b, NULL);
@@ -9796,6 +9794,8 @@ BATasciify(BAT *b, BAT *s)
bi = bat_iterator(b);
char *buf = NULL;
size_t buflen = 0;
+ allocator *ta = MT_thread_getallocator();
+ allocator_state ta_state = ma_open(ta);
TIMEOUT_LOOP_IDX_DECL(i, ci.ncand, qry_ctx) {
BUN x = canditer_next(&ci) - bhseqbase;
if (GDKasciify(ta, &buf, &buflen, BUNtvar(&bi, x)) !=
GDK_SUCCEED ||
diff --git a/gdk/gdk_subquery.c b/gdk/gdk_subquery.c
--- a/gdk/gdk_subquery.c
+++ b/gdk/gdk_subquery.c
@@ -71,120 +71,119 @@ BATall_grp(BAT *l, BAT *g, BAT *e, BAT *
return NULL;
}
- allocator *ta = MT_thread_getallocator();
- allocator_state ta_state = ma_open(ta);
if (BATcount(l) == 0 || ngrp == 0) {
const void *nilp = ATOMnilptr(l->ttype);
- if ((res = BATconstant(ngrp == 0 ? 0 : min, l->ttype, nilp,
ngrp, TRANSIENT)) == NULL)
- goto alloc_fail;
- } else {
- BATiter li;
+ return BATconstant(ngrp == 0 ? 0 : min, l->ttype, nilp, ngrp,
TRANSIENT);
+ }
+
+ if ((res = COLnew(min, l->ttype, ngrp, TRANSIENT)) == NULL)
+ return NULL;
- if ((res = COLnew(min, l->ttype, ngrp, TRANSIENT)) == NULL)
- goto alloc_fail;
- if ((oids = ma_alloc(ta, ngrp * sizeof(oid))) == NULL)
- goto alloc_fail;
+ allocator *ta = MT_thread_getallocator();
+ allocator_state ta_state = ma_open(ta);
- for (i = 0; i < ngrp; i++)
- oids[i] = BUN_NONE;
+ if ((oids = ma_alloc(ta, ngrp * sizeof(oid))) == NULL)
+ goto alloc_fail;
+
+ for (i = 0; i < ngrp; i++)
+ oids[i] = BUN_NONE;
- if (!g || BATtdense(g))
- gids = NULL;
- else
- gids = (const oid *) Tloc(g, 0);
+ if (!g || BATtdense(g))
+ gids = NULL;
+ else
+ gids = (const oid *) Tloc(g, 0);
- li = bat_iterator(l);
- switch (ATOMbasetype(l->ttype)) {
- case TYPE_bte:
- SQLall_grp_imp(bte);
- break;
- case TYPE_sht:
- SQLall_grp_imp(sht);
- break;
- case TYPE_int:
- SQLall_grp_imp(int);
- break;
- case TYPE_lng:
- SQLall_grp_imp(lng);
- break;
+ BATiter li = bat_iterator(l);
+ switch (ATOMbasetype(l->ttype)) {
+ case TYPE_bte:
+ SQLall_grp_imp(bte);
+ break;
+ case TYPE_sht:
+ SQLall_grp_imp(sht);
+ break;
+ case TYPE_int:
+ SQLall_grp_imp(int);
+ break;
+ case TYPE_lng:
+ SQLall_grp_imp(lng);
+ break;
#ifdef HAVE_HGE
- case TYPE_hge:
- SQLall_grp_imp(hge);
- break;
+ case TYPE_hge:
+ SQLall_grp_imp(hge);
+ break;
#endif
- case TYPE_flt:
- SQLall_grp_imp(flt);
- break;
- case TYPE_dbl:
- SQLall_grp_imp(dbl);
- break;
- default: {
- bool (*atomeq) (const void *, const void *) =
ATOMequal(l->ttype);
- const void *restrict nilp = ATOMnilptr(l->ttype);
+ case TYPE_flt:
+ SQLall_grp_imp(flt);
+ break;
+ case TYPE_dbl:
+ SQLall_grp_imp(dbl);
+ break;
+ default: {
+ bool (*atomeq) (const void *, const void *) =
ATOMequal(l->ttype);
+ const void *restrict nilp = ATOMnilptr(l->ttype);
- for (BUN n = 0; n < ci.ncand; n++) {
- i = canditer_next(&ci) - l->hseqbase;
- if (gids == NULL ||
- (gids[i] >= min && gids[i] <= max)) {
- if (gids)
- gid = gids[i] - min;
- else
- gid = (oid) i;
- if (oids[gid] != (BUN_NONE - 1)) {
- if (oids[gid] == BUN_NONE) {
- if
(!atomeq(BUNtail(&li, i), nilp))
- oids[gid] = i;
- } else {
- const void *pi =
BUNtail(&li, oids[gid]);
- const void *pp =
BUNtail(&li, i);
- if (!atomeq(pi, pp) &&
!atomeq(pp, nilp))
- oids[gid] =
BUN_NONE - 1;
- }
+ for (BUN n = 0; n < ci.ncand; n++) {
+ i = canditer_next(&ci) - l->hseqbase;
+ if (gids == NULL ||
+ (gids[i] >= min && gids[i] <= max)) {
+ if (gids)
+ gid = gids[i] - min;
+ else
+ gid = (oid) i;
+ if (oids[gid] != (BUN_NONE - 1)) {
+ if (oids[gid] == BUN_NONE) {
+ if (!atomeq(BUNtail(&li, i),
nilp))
+ oids[gid] = i;
+ } else {
+ const void *pi = BUNtail(&li,
oids[gid]);
+ const void *pp = BUNtail(&li,
i);
+ if (!atomeq(pi, pp) &&
!atomeq(pp, nilp))
+ oids[gid] = BUN_NONE -
1;
}
}
}
+ }
- if (ATOMvarsized(l->ttype)) {
- for (i = 0; i < ngrp; i++) { /* convert the
found oids in values */
- BUN noid = oids[i];
- const void *next;
- if (noid > (BUN_NONE - 2)) {
- next = nilp;
- hasnil = 1;
- } else {
- next = BUNtvar(&li, noid);
- }
- if (tfastins_nocheckVAR(res, i, next)
!= GDK_SUCCEED) {
- bat_iterator_end(&li);
- goto alloc_fail;
- }
+ if (ATOMvarsized(l->ttype)) {
+ for (i = 0; i < ngrp; i++) { /* convert the found oids
in values */
+ BUN noid = oids[i];
+ const void *next;
+ if (noid > (BUN_NONE - 2)) {
+ next = nilp;
+ hasnil = 1;
+ } else {
+ next = BUNtvar(&li, noid);
}
- } else {
- uint8_t *restrict rcast = (uint8_t *) Tloc(res,
0);
- uint16_t width = res->twidth;
- for (i = 0; i < ngrp; i++) { /* convert the
found oids in values */
- BUN noid = oids[i];
- const void *next;
- if (noid > (BUN_NONE - 2)) {
- next = nilp;
- hasnil = 1;
- } else {
- next = BUNtloc(&li, noid);
- }
- memcpy(rcast, next, width);
- rcast += width;
+ if (tfastins_nocheckVAR(res, i, next) !=
GDK_SUCCEED) {
+ bat_iterator_end(&li);
+ goto alloc_fail;
}
}
- }
+ } else {
+ uint8_t *restrict rcast = (uint8_t *) Tloc(res, 0);
+ uint16_t width = res->twidth;
_______________________________________________
checkin-list mailing list -- [email protected]
To unsubscribe send an email to [email protected]