This is an automated email from the ASF dual-hosted git repository.
maxyang pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/main by this push:
new 54087f70bf Fix unstable scan rows caused by limit squelch.
54087f70bf is described below
commit 54087f70bf8c9b62e6ccef81ee1b7799cf8c233c
Author: Dongxiao Song <[email protected]>
AuthorDate: Tue Jun 3 18:46:38 2025 +0800
Fix unstable scan rows caused by limit squelch.
Squelch in limit node may cause scan finish early, row count in scan may
vary.
Signed-off-by: Dongxiao Song <[email protected]>
---
src/test/regress/expected/gp_explain.out | 6 +++++-
src/test/regress/expected/gp_explain_optimizer.out | 6 +++++-
src/test/regress/sql/gp_explain.sql | 5 ++++-
3 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/src/test/regress/expected/gp_explain.out
b/src/test/regress/expected/gp_explain.out
index 7a838b69be..d71833f820 100644
--- a/src/test/regress/expected/gp_explain.out
+++ b/src/test/regress/expected/gp_explain.out
@@ -577,6 +577,10 @@ DROP USER regress_range_parted_user;
-- Test if explain analyze will hang with materialize node
CREATE TABLE recursive_table_ic (a INT) DISTRIBUTED BY (a);
INSERT INTO recursive_table_ic SELECT * FROM generate_series(20, 30000);
+-- start_matchsubs
+-- m/Seq Scan on recursive_table_ic \(actual rows=\d+ loops=1\)/
+-- s/Seq Scan on recursive_table_ic \(actual rows=\d+ loops=1\)/Seq Scan on
recursive_table_ic (actual rows=#### loops=1)/
+-- end_matchsubs
explain (analyze, costs off, timing off, summary off) WITH RECURSIVE
r(i) AS (
SELECT 1
@@ -600,7 +604,7 @@ SELECT * FROM y LIMIT 10;
-> WorkTable Scan on y (never executed)
-> Materialize (never executed)
-> Gather Motion 3:1 (slice1; segments: 3) (never
executed)
- -> Seq Scan on recursive_table_ic (actual
rows=4061 loops=1)
+ -> Seq Scan on recursive_table_ic (actual
rows=#### loops=1)
Optimizer: Postgres query optimizer
(13 rows)
diff --git a/src/test/regress/expected/gp_explain_optimizer.out
b/src/test/regress/expected/gp_explain_optimizer.out
index 0b476d484c..0ad9ffea63 100644
--- a/src/test/regress/expected/gp_explain_optimizer.out
+++ b/src/test/regress/expected/gp_explain_optimizer.out
@@ -599,6 +599,10 @@ DROP USER regress_range_parted_user;
-- Test if explain analyze will hang with materialize node
CREATE TABLE recursive_table_ic (a INT) DISTRIBUTED BY (a);
INSERT INTO recursive_table_ic SELECT * FROM generate_series(20, 30000);
+-- start_matchsubs
+-- m/Seq Scan on recursive_table_ic \(actual rows=\d+ loops=1\)/
+-- s/Seq Scan on recursive_table_ic \(actual rows=\d+ loops=1\)/Seq Scan on
recursive_table_ic (actual rows=#### loops=1)/
+-- end_matchsubs
explain (analyze, costs off, timing off, summary off) WITH RECURSIVE
r(i) AS (
SELECT 1
@@ -622,7 +626,7 @@ SELECT * FROM y LIMIT 10;
-> WorkTable Scan on y (never executed)
-> Materialize (never executed)
-> Gather Motion 3:1 (slice1; segments: 3) (never
executed)
- -> Seq Scan on recursive_table_ic (actual
rows=4061 loops=1)
+ -> Seq Scan on recursive_table_ic (actual
rows=#### loops=1)
Optimizer: Postgres query optimizer
(13 rows)
diff --git a/src/test/regress/sql/gp_explain.sql
b/src/test/regress/sql/gp_explain.sql
index ca9bbdb7d6..3cc60ed3d2 100644
--- a/src/test/regress/sql/gp_explain.sql
+++ b/src/test/regress/sql/gp_explain.sql
@@ -285,7 +285,10 @@ DROP USER regress_range_parted_user;
-- Test if explain analyze will hang with materialize node
CREATE TABLE recursive_table_ic (a INT) DISTRIBUTED BY (a);
INSERT INTO recursive_table_ic SELECT * FROM generate_series(20, 30000);
-
+-- start_matchsubs
+-- m/Seq Scan on recursive_table_ic \(actual rows=\d+ loops=1\)/
+-- s/Seq Scan on recursive_table_ic \(actual rows=\d+ loops=1\)/Seq Scan on
recursive_table_ic (actual rows=#### loops=1)/
+-- end_matchsubs
explain (analyze, costs off, timing off, summary off) WITH RECURSIVE
r(i) AS (
SELECT 1
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]