This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 643caa7f5c9 [fix](count) fix wrong count push down logic (#56182)
643caa7f5c9 is described below
commit 643caa7f5c949374a01a209dee325d7562808e6e
Author: Mingyu Chen (Rayner) <[email protected]>
AuthorDate: Sat Sep 20 19:06:16 2025 -0700
[fix](count) fix wrong count push down logic (#56182)
### What problem does this PR solve?
Introduced from topn optimization.
When executing query like `select count(*) from tbl`, it will trigger
"count push down optimization".
which means it will send some "dummy" split to BE, each with a part of
row count number.
But due to the bug, BE will use the range offset info in these dummy
split to do the row group filter logic,
which is incorrect and will result in empty result because all row group
will be filtered.
This PR fix it, to not filter the row group if it is a dummy split.
How to reproduce:
1. find an iceberg table with file size at least 16MB
2. set file_split_size=4MB
3. select count(*) from table, it will return empty result
---
be/src/vec/exec/scan/file_scanner.cpp | 12 +++++++++++-
.../doris/datasource/hive/source/HiveScanNode.java | 3 +++
.../datasource/iceberg/source/IcebergScanNode.java | 4 ++++
.../doris/datasource/paimon/source/PaimonScanNode.java | 4 ++++
gensrc/thrift/PlanNodes.thrift | 2 +-
.../iceberg/test_iceberg_optimize_count.out | Bin 245 -> 313 bytes
.../iceberg/test_iceberg_optimize_count.groovy | 12 ++++++++----
7 files changed, 31 insertions(+), 6 deletions(-)
diff --git a/be/src/vec/exec/scan/file_scanner.cpp
b/be/src/vec/exec/scan/file_scanner.cpp
index c18269179dd..eb30354fe3b 100644
--- a/be/src/vec/exec/scan/file_scanner.cpp
+++ b/be/src/vec/exec/scan/file_scanner.cpp
@@ -1154,7 +1154,17 @@ Status FileScanner::_get_next_reader() {
}
_cur_reader->set_push_down_agg_type(_get_push_down_agg_type());
-
RETURN_IF_ERROR(_set_fill_or_truncate_columns(need_to_get_parsed_schema));
+ if (_get_push_down_agg_type() == TPushAggOp::type::COUNT &&
+ range.__isset.table_format_params &&
+ range.table_format_params.table_level_row_count >= 0) {
+ // This is a table level count push down operation, no need to call
+ // _set_fill_or_truncate_columns.
+ // in _set_fill_or_truncate_columns, we will use
[range.start_offset, end offset]
+ // to filter the row group. But if this is count push down, the
offset is undefined,
+ // causing incorrect row group filter and may return empty result.
+ } else {
+
RETURN_IF_ERROR(_set_fill_or_truncate_columns(need_to_get_parsed_schema));
+ }
_cur_reader_eof = false;
break;
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java
index 00becbbc821..a11dc6d247e 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java
@@ -467,10 +467,12 @@ public class HiveScanNode extends FileQueryScanNode {
}
transactionalHiveDesc.setDeleteDeltas(deleteDeltaDescs);
tableFormatFileDesc.setTransactionalHiveParams(transactionalHiveDesc);
+ tableFormatFileDesc.setTableLevelRowCount(-1);
rangeDesc.setTableFormatParams(tableFormatFileDesc);
} else {
TTableFormatFileDesc tableFormatFileDesc = new
TTableFormatFileDesc();
tableFormatFileDesc.setTableFormatType(TableFormatType.HIVE.value());
+ tableFormatFileDesc.setTableLevelRowCount(-1);
rangeDesc.setTableFormatParams(tableFormatFileDesc);
}
}
@@ -593,3 +595,4 @@ public class HiveScanNode extends FileQueryScanNode {
}
}
+
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergScanNode.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergScanNode.java
index 5fc5d0349f3..912526e320b 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergScanNode.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergScanNode.java
@@ -181,6 +181,9 @@ public class IcebergScanNode extends FileQueryScanNode {
tableFormatFileDesc.setTableFormatType(icebergSplit.getTableFormatType().value());
if (tableLevelPushDownCount) {
tableFormatFileDesc.setTableLevelRowCount(icebergSplit.getTableLevelRowCount());
+ } else {
+ // MUST explicitly set to -1, to be distinct from valid row count
>= 0
+ tableFormatFileDesc.setTableLevelRowCount(-1);
}
TIcebergFileDesc fileDesc = new TIcebergFileDesc();
fileDesc.setFormatVersion(formatVersion);
@@ -621,3 +624,4 @@ public class IcebergScanNode extends FileQueryScanNode {
return Optional.empty();
}
}
+
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java
index 0e063e02fa6..7d49643e50d 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java
@@ -243,6 +243,9 @@ public class PaimonScanNode extends FileQueryScanNode {
}
if (paimonSplit.getRowCount().isPresent()) {
tableFormatFileDesc.setTableLevelRowCount(paimonSplit.getRowCount().get());
+ } else {
+ // MUST explicitly set to -1, to be distinct from valid row count
>= 0
+ tableFormatFileDesc.setTableLevelRowCount(-1);
}
tableFormatFileDesc.setPaimonParams(fileDesc);
Map<String, String> partitionValues =
paimonSplit.getPaimonPartitionValues();
@@ -714,3 +717,4 @@ public class PaimonScanNode extends FileQueryScanNode {
return baseTable;
}
}
+
diff --git a/gensrc/thrift/PlanNodes.thrift b/gensrc/thrift/PlanNodes.thrift
index 4ce7b4770d6..e99f93b3ff1 100644
--- a/gensrc/thrift/PlanNodes.thrift
+++ b/gensrc/thrift/PlanNodes.thrift
@@ -398,7 +398,7 @@ struct TTableFormatFileDesc {
6: optional TMaxComputeFileDesc max_compute_params
7: optional TTrinoConnectorFileDesc trino_connector_params
8: optional TLakeSoulFileDesc lakesoul_params
- 9: optional i64 table_level_row_count
+ 9: optional i64 table_level_row_count = -1
}
// Deprecated, hive text talbe is a special format, not a serde type
diff --git
a/regression-test/data/external_table_p0/iceberg/test_iceberg_optimize_count.out
b/regression-test/data/external_table_p0/iceberg/test_iceberg_optimize_count.out
index ec9129a00d2..20d03ad9c06 100644
Binary files
a/regression-test/data/external_table_p0/iceberg/test_iceberg_optimize_count.out
and
b/regression-test/data/external_table_p0/iceberg/test_iceberg_optimize_count.out
differ
diff --git
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
index bc3b006fb93..0f1f5535c05 100644
---
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
+++
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
@@ -50,10 +50,14 @@ suite("test_iceberg_optimize_count",
"p0,external,doris,external_docker,external
// use push down count
sql """ set enable_count_push_down_for_external_table=true; """
- qt_q01 """${sqlstr1}"""
- qt_q02 """${sqlstr2}"""
- qt_q03 """${sqlstr3}"""
- qt_q04 """${sqlstr4}"""
+ for (String val: ["1K", "0"]) {
+ sql "set file_split_size=${val}"
+ qt_q01 """${sqlstr1}"""
+ qt_q02 """${sqlstr2}"""
+ qt_q03 """${sqlstr3}"""
+ qt_q04 """${sqlstr4}"""
+ }
+ sql "unset variable file_split_size;"
// traditional mode
sql """set num_files_in_batch_mode=100000"""
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]