Changeset: c11df9b10af3 for MonetDB
URL: https://dev.monetdb.org/hg/MonetDB/rev/c11df9b10af3
Modified Files:
        sql/backends/monet5/sql.c
        sql/test/BugTracker-2026/Tests/All
Branch: default
Log Message:

Merge with Dec2025 branch.


diffs (truncated from 452 to 300 lines):

diff --git a/sql/backends/monet5/sql.c b/sql/backends/monet5/sql.c
--- a/sql/backends/monet5/sql.c
+++ b/sql/backends/monet5/sql.c
@@ -1322,6 +1322,7 @@ mvc_bind_wrap(Client cntxt, MalBlkPtr mb
                                *bid = id->batCacheid;
                                *uvl = vl->batCacheid;
                        } else {
+                               BBPunfix(bn->batCacheid);
                                *bid = e_bat(TYPE_oid);
                                *uvl = e_bat(c->type.type->localtype);
                                if (*bid == BID_NIL || *uvl == BID_NIL) {
@@ -1354,8 +1355,7 @@ mvc_bind_wrap(Client cntxt, MalBlkPtr mb
                        BBPkeepref(bn);
                        *bid = bn->batCacheid;
                }
-       }
-       else if (upd) { /*unpartitioned access to update bats*/
+       } else if (upd) { /*unpartitioned access to update bats*/
                BAT *ui = NULL, *uv = NULL;
                if (store->storage_api.bind_updates(m->session->tr, c, &ui, 
&uv) == LOG_ERR)
                        throw(SQL,"sql.bind",SQLSTATE(HY005) "Cannot access the 
update columns");
@@ -1365,8 +1365,7 @@ mvc_bind_wrap(Client cntxt, MalBlkPtr mb
                BBPkeepref(uv);
                *bid = ui->batCacheid;
                *uvl = uv->batCacheid;
-       }
-       else { /*unpartitioned access to base column*/
+       } else { /*unpartitioned access to base column*/
                int coltype = getBatType(getArgType(mb, pci, 0));
                b = store->storage_api.bind_col(m->session->tr, c, access);
                if (b == NULL)
@@ -1675,6 +1674,7 @@ mvc_bind_idxbat_wrap(Client cntxt, MalBl
                                *bid = id->batCacheid;
                                *uvl = vl->batCacheid;
                        } else {
+                               BBPunfix(bn->batCacheid);
                                *bid = e_bat(TYPE_oid);
                                *uvl = 
e_bat((i->type==join_idx)?TYPE_oid:TYPE_lng);
                                if (*bid == BID_NIL || *uvl == BID_NIL) {
diff --git a/sql/test/2026/Tests/All b/sql/test/2026/Tests/All
new file mode 100644
--- /dev/null
+++ b/sql/test/2026/Tests/All
@@ -0,0 +1,1 @@
+prepared-stmt-bat-leak
diff --git a/sql/test/2026/Tests/prepared-stmt-bat-leak.SQL.py 
b/sql/test/2026/Tests/prepared-stmt-bat-leak.SQL.py
new file mode 100644
--- /dev/null
+++ b/sql/test/2026/Tests/prepared-stmt-bat-leak.SQL.py
@@ -0,0 +1,48 @@
+from os import environ
+from MonetDBtesting import tpymonetdb as pymonetdb
+
+conn = pymonetdb.connect(database=environ['TSTDB'], port=environ['MAPIPORT'], 
autocommit=True)
+
+def count_dirty_voids(conn):
+    sql = "SELECT COUNT(*) FROM sys.bbp() WHERE dirty = 'dirty' AND ttype = 
'void'"
+    with conn.cursor() as c:
+        c.execute(sql)
+        return c.fetchone()[0]
+
+# prepare
+nrows = 500_000   # problem used to occur starting at 100_001
+with conn.cursor() as c:
+    c.execute("DROP TABLE IF EXISTS foo")
+    c.execute("CREATE TABLE foo(i INT, j INT, k INT)")
+    c.execute("INSERT INTO foo SELECT value AS i, -value AS j, 2 * value AS k 
FROM sys.generate_series(0, %s)", [nrows])
+
+
+def do_it(conn):
+    with conn.cursor() as c:
+        c.execute("PREPARE SELECT i AS col1, j as COL2 FROM foo LIMIT 1")
+        prep_id = c.lastrowid
+        c.execute("EXECUTE %s()", [prep_id])
+        c.execute("DEALLOCATE %s", [prep_id])
+
+
+# run it a few times
+dirty_void_counts = []
+dirty_void_counts.append(count_dirty_voids(conn))
+for i in range(4):
+    do_it(conn)
+    dirty_void_counts.append(count_dirty_voids(conn))
+
+# In principle, there should not be any new dirty void bats.
+# But maybe one or to appear because of other things that are
+# happening in the server.  Therefore we accept this test as
+# succesful if at least one pair of measurements does not show
+# an increase
+
+print('# These are the dirty void bat counts we measured: ', dirty_void_counts)
+for i in range(len(dirty_void_counts) - 1):
+    if dirty_void_counts[i] >= dirty_void_counts[i + 1]:
+        print("# It doesn't always increase so it's probably fine")
+        exit(0)
+# if we get here, they were all increasing
+print('# They keep increasing so there is a leak')
+exit(1)
diff --git 
a/sql/test/BugTracker-2026/Tests/7873-push_up_join-assertion-failure.test 
b/sql/test/BugTracker-2026/Tests/7873-push_up_join-assertion-failure.test
new file mode 100644
--- /dev/null
+++ b/sql/test/BugTracker-2026/Tests/7873-push_up_join-assertion-failure.test
@@ -0,0 +1,176 @@
+statement ok
+CREATE SCHEMA test7873
+
+statement ok
+SET SCHEMA test7873
+
+statement ok
+CREATE TABLE users (
+    id           INT,
+    username     VARCHAR(100),
+    email        VARCHAR(255),
+    age          INT,
+    status       VARCHAR(20),
+    created_at   TIMESTAMP,
+    score        DOUBLE
+)
+
+statement ok
+CREATE TABLE posts (
+    id          INT,
+    user_id     INT,
+    title       VARCHAR(255),
+    content     VARCHAR(1000),
+    views       INT,
+    likes       INT,
+    created_at  TIMESTAMP,
+    rating      DOUBLE
+)
+
+statement ok
+CREATE TABLE comments (
+    id          INT,
+    post_id     INT,
+    user_id     INT,
+    content     VARCHAR(1000),
+    is_spam     INT,
+    created_at  TIMESTAMP
+)
+
+statement ok
+CREATE TABLE orders (
+    id          INT,
+    user_id     INT,
+    amount      DOUBLE,
+    status      VARCHAR(20),
+    created_at  TIMESTAMP
+)
+
+statement ok
+INSERT INTO users VALUES
+(1, 'alice', '[email protected]', 20, 'active',  '2022-01-01 10:00:00', 88.5),
+(2, 'bob',   '[email protected]',   30, 'active',  '2022-01-02 11:00:00', 92.3),
+(3, 'carol', NULL,             NULL, 'banned','2022-01-03 12:00:00', NULL),
+(4, 'dave',  '[email protected]',  45, 'active',  '2022-01-04 13:00:00', 65.2),
+(5, NULL,    '[email protected]',  18, 'inactive','2022-01-05 14:00:00', 70.0)
+
+statement ok
+INSERT INTO posts VALUES
+(1, 1, 'Hello World', 'First post', 100, 10, '2022-01-10 10:00:00', 4.5),
+(2, 1, 'Another Post', NULL,        150, 20, '2022-01-11 11:00:00', 3.0),
+(3, 2, 'Bob Post',     'Content',   NULL,  5, '2022-01-12 12:00:00', NULL),
+(4, 3, NULL,           'Empty',     50,   2, '2022-01-13 13:00:00', 5.0),
+(5, 4, 'Last Post',    'Last',      300,  30,'2022-01-14 14:00:00', 4.9)
+
+statement ok
+INSERT INTO comments VALUES
+(1, 1, 2, 'Nice post', 0, '2022-01-20 10:00:00'),
+(2, 1, 3, 'Spam here', 1,  '2022-01-21 11:00:00'),
+(3, 2, 1, 'Thanks',    0, '2022-01-22 12:00:00'),
+(4, 4, 5, NULL,        0, '2022-01-23 13:00:00')
+
+statement ok
+INSERT INTO orders VALUES
+(1, 1, 100.00, 'paid',    '2022-02-01 09:00:00'),
+(2, 1, 200.50, 'shipped', '2022-02-02 10:00:00'),
+(3, 2, NULL,   'failed',  '2022-02-03 11:00:00'),
+(4, 3, 50.00,  'paid',    '2022-02-04 12:00:00'),
+(5, 5, 999.99, 'paid',    '2022-02-05 13:00:00')
+
+query I rowsort
+select
+  ascii(
+    'fG') as c0
+from
+  orders as ref_0
+----
+102
+102
+102
+102
+102
+
+-- query triggers assertion failure in push_up_join (sql=0x7f3d081844e0, 
rel=0x7f3d082494d0, ad=0x7f3d0824fde0) at sql/server/rel_unnest.c:1489
+query I rowsort
+select
+  ascii(
+    'fG') as c0
+from
+  orders as ref_0
+where EXISTS (
+  select
+      ref_0.amount as c0,
+      ref_0.status as c1,
+      subq_1.c3 as c2,
+      subq_1.c2 as c3,
+      ref_0.created_at as c4,
+      nullif(ref_0.created_at,
+        subq_1.c3) as c5,
+      subq_0.c1 as c6
+    from
+      (select
+            ref_0.user_id as c0,
+            (select status from orders limit 1 offset 3)
+               as c1,
+            ref_0.status as c2
+          from
+            users as ref_1
+          where ((select stddev_samp(id) from orders)
+                 < 8.51)
+            and (((37 != ref_1.age)
+                and ((EXISTS (
+                    select
+                        ref_2.email as c0,
+                        ref_2.score as c1
+                      from
+                        users as ref_2
+                      where ((EXISTS (
+                            select
+                                ref_0.id as c0
+                              from
+                                users as ref_3
+                              where 9.59 >= (select stddev_samp(id) from posts)
+                              limit 36))
+                          or (true AND true))
+                        and (ref_1.id >= ref_0.id)
+                      limit 195))
+                  or (ref_0.user_id > ref_1.id)))
+              and (true))) as subq_0,
+      lateral (select
+            ref_0.id as c0,
+            subq_0.c1 as c1,
+            subq_0.c2 as c2,
+            ref_4.created_at as c3,
+            ref_4.post_id as c4,
+            ref_4.created_at as c5
+          from
+            comments as ref_4
+          where '5H' != 'm'
+          limit 85) as subq_1
+    where (subq_0.c2 is not NULL)
+      or ((case when '4jEM' >= 'Ml4' then 44.16 else (select avg(id) from 
posts)
+               end
+             = 12.32)
+        and (case when ref_0.id < subq_1.c0 then false else false end
+             OR false))
+    limit 107)
+----
+
+statement ok
+DROP TABLE orders
+
+statement ok
+DROP TABLE comments
+
+statement ok
+DROP TABLE posts
+
+statement ok
+DROP TABLE users
+
+statement ok
+SET SCHEMA sys
+
+statement ok
+DROP SCHEMA test7873
+
diff --git 
a/sql/test/BugTracker-2026/Tests/7874-rel2bin_join-assertion-failure.test 
b/sql/test/BugTracker-2026/Tests/7874-rel2bin_join-assertion-failure.test
new file mode 100644
--- /dev/null
+++ b/sql/test/BugTracker-2026/Tests/7874-rel2bin_join-assertion-failure.test
@@ -0,0 +1,159 @@
+statement ok
+CREATE SCHEMA test7874
+
+statement ok
+SET SCHEMA test7874
+
+statement ok
+CREATE TABLE users (
+    id           INT,
+    username     VARCHAR(100),
+    email        VARCHAR(255),
+    age          INT,
+    status       VARCHAR(20),
+    created_at   TIMESTAMP,
+    score        DOUBLE
+)
_______________________________________________
checkin-list mailing list -- [email protected]
To unsubscribe send an email to [email protected]

Reply via email to