This is an automated email from the ASF dual-hosted git repository.
lihaopeng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new f40811ce39e Revert "[feat](storage) add read_null_map support for
COUNT_NULL push down aggregate (#60996) (#61439)
f40811ce39e is described below
commit f40811ce39e577c3c586e0719ae0a22a553650d0
Author: HappenLee <[email protected]>
AuthorDate: Tue Mar 24 09:51:06 2026 +0800
Revert "[feat](storage) add read_null_map support for COUNT_NULL push down
aggregate (#60996) (#61439)
This reverts commit 03a9b939e88a88df5cd37bff672430e8833c9be5. will do a
new way to fix the performance problem
---
be/src/storage/iterator/vgeneric_iterators.cpp | 51 ----------
be/src/storage/iterator/vgeneric_iterators.h | 1 -
be/src/storage/segment/column_reader.cpp | 110 ---------------------
be/src/storage/segment/column_reader.h | 17 +---
.../glue/translator/PhysicalPlanTranslator.java | 3 -
.../rules/implementation/AggregateStrategies.java | 76 ++++----------
.../physical/PhysicalStorageLayerAggregate.java | 2 +-
gensrc/thrift/PlanNodes.thrift | 3 +-
.../query_p0/aggregate/count_null_push_down.out | 10 --
.../explain/test_pushdown_explain.groovy | 2 +-
.../query_p0/aggregate/count_null_push_down.groovy | 44 ---------
11 files changed, 26 insertions(+), 293 deletions(-)
diff --git a/be/src/storage/iterator/vgeneric_iterators.cpp
b/be/src/storage/iterator/vgeneric_iterators.cpp
index f80b57d1afb..267df0bb715 100644
--- a/be/src/storage/iterator/vgeneric_iterators.cpp
+++ b/be/src/storage/iterator/vgeneric_iterators.cpp
@@ -25,7 +25,6 @@
#include "core/block/block.h"
#include "core/block/column_with_type_and_name.h"
#include "core/column/column.h"
-#include "core/column/column_nullable.h"
#include "core/data_type/data_type.h"
#include "storage/cache/schema_cache.h"
#include "storage/field.h"
@@ -46,19 +45,6 @@ using namespace ErrorCode;
Status VStatisticsIterator::init(const StorageReadOptions& opts) {
if (!_init) {
_push_down_agg_type_opt = opts.push_down_agg_type_opt;
- _tablet_schema = opts.tablet_schema;
-
- // COUNT_NULL needs to actually read nullmap pages, so the column
iterators must be
- // fully initialized with a valid ColumnIteratorOptions (file_reader,
stats, io_ctx).
- // Other agg types (COUNT, MINMAX, MIX) only use zone-map metadata and
never open
- // pages, so they do not need init.
- const bool need_iter_init = (_push_down_agg_type_opt ==
TPushAggOp::COUNT_NULL);
- ColumnIteratorOptions iter_opts {
- .use_page_cache = opts.use_page_cache,
- .file_reader = _segment->file_reader().get(),
- .stats = opts.stats,
- .io_ctx = opts.io_ctx,
- };
for (size_t i = 0; i < _schema.num_column_ids(); i++) {
auto cid = _schema.column_id(i);
@@ -66,16 +52,6 @@ Status VStatisticsIterator::init(const StorageReadOptions&
opts) {
if (_column_iterators_map.count(unique_id) < 1) {
RETURN_IF_ERROR(_segment->new_column_iterator(
opts.tablet_schema->column(cid),
&_column_iterators_map[unique_id], &opts));
- if (need_iter_init) {
-
RETURN_IF_ERROR(_column_iterators_map[unique_id]->init(iter_opts));
- // Seek to ordinal 0 once during init so that the page
iterator
- // is properly positioned for sequential read_null_map()
calls
- // in next_batch(). We must NOT seek again in next_batch()
—
- // doing so would reset the iterator to ordinal 0 on every
batch
- // and cause rows to be re-read/double-counted for segments
- // larger than MAX_ROW_SIZE_IN_COUNT (65535) rows.
-
RETURN_IF_ERROR(_column_iterators_map[unique_id]->seek_to_ordinal(0));
- }
}
_column_iterators.push_back(_column_iterators_map[unique_id].get());
}
@@ -100,33 +76,6 @@ Status VStatisticsIterator::next_batch(Block* block) {
for (auto& column : columns) {
column->insert_many_defaults(size);
}
- } else if (_push_down_agg_type_opt == TPushAggOp::COUNT_NULL) {
- for (int i = 0; i < (int)columns.size(); ++i) {
- auto& column = columns[i];
- auto cid = _schema.column_id(i);
- auto& tablet_column = _tablet_schema->column(cid);
-
- if (tablet_column.is_nullable()) {
- auto& nullable_col = assert_cast<ColumnNullable&>(*column);
- auto& nested_col = nullable_col.get_nested_column();
-
- // Read the real nullmap for this column from the current
position.
- // Do NOT seek back to ordinal 0 here: the column iterator
already
- // starts at ordinal 0 after init(), and each call to
read_null_map
- // advances it sequentially. Seeking to 0 on every
next_batch() call
- // would cause large segments (> MAX_ROW_SIZE_IN_COUNT
rows) to have
- // their first portion re-read and counted multiple times,
producing
- // a result higher than the true non-null count.
- size_t read_rows = size;
- auto& null_map_data = nullable_col.get_null_map_data();
-
RETURN_IF_ERROR(_column_iterators[i]->read_null_map(&read_rows, null_map_data));
-
- // nested column needs one default value per row
- nested_col.insert_many_defaults(size);
- } else {
- column->insert_many_defaults(size);
- }
- }
} else {
for (int i = 0; i < columns.size(); ++i) {
RETURN_IF_ERROR(_column_iterators[i]->next_batch_of_zone_map(&size,
columns[i]));
diff --git a/be/src/storage/iterator/vgeneric_iterators.h
b/be/src/storage/iterator/vgeneric_iterators.h
index 326cb750f1a..449d5cb1606 100644
--- a/be/src/storage/iterator/vgeneric_iterators.h
+++ b/be/src/storage/iterator/vgeneric_iterators.h
@@ -63,7 +63,6 @@ public:
private:
std::shared_ptr<Segment> _segment;
const Schema& _schema;
- std::shared_ptr<TabletSchema> _tablet_schema;
size_t _target_rows = 0;
size_t _output_rows = 0;
bool _init = false;
diff --git a/be/src/storage/segment/column_reader.cpp
b/be/src/storage/segment/column_reader.cpp
index 9bbffcfaff8..13e277d5660 100644
--- a/be/src/storage/segment/column_reader.cpp
+++ b/be/src/storage/segment/column_reader.cpp
@@ -1252,27 +1252,6 @@ Status MapFileColumnIterator::set_access_paths(const
TColumnAccessPaths& all_acc
return Status::OK();
}
-Status MapFileColumnIterator::read_null_map(size_t* n, NullMap& null_map) {
- if (!_map_reader->is_nullable()) {
- return Status::InternalError("read_null_map is not supported for
non-nullable column");
- }
- if (!_null_iterator) {
- // Schema-change scenario: column became nullable but old segment has
no null data.
- null_map.resize(*n);
- memset(null_map.data(), 0, *n);
- return Status::OK();
- }
- auto null_col = ColumnUInt8::create();
- auto null_col_ptr = null_col->get_ptr();
- size_t read_rows = *n;
- bool has_null = false;
- RETURN_IF_ERROR(_null_iterator->next_batch(&read_rows, null_col_ptr,
&has_null));
- *n = read_rows;
- null_map.resize(read_rows);
- memcpy(null_map.data(), null_col->get_data().data(), read_rows);
- return Status::OK();
-}
-
////////////////////////////////////////////////////////////////////////////////
StructFileColumnIterator::StructFileColumnIterator(
@@ -1490,27 +1469,6 @@ Status StructFileColumnIterator::set_access_paths(
return Status::OK();
}
-Status StructFileColumnIterator::read_null_map(size_t* n, NullMap& null_map) {
- if (!_struct_reader->is_nullable()) {
- return Status::InternalError("read_null_map is not supported for
non-nullable column");
- }
- if (!_null_iterator) {
- // Schema-change scenario: column became nullable but old segment has
no null data.
- null_map.resize(*n);
- memset(null_map.data(), 0, *n);
- return Status::OK();
- }
- auto null_col = ColumnUInt8::create();
- auto null_col_ptr = null_col->get_ptr();
- size_t read_rows = *n;
- bool has_null = false;
- RETURN_IF_ERROR(_null_iterator->next_batch(&read_rows, null_col_ptr,
&has_null));
- *n = read_rows;
- null_map.resize(read_rows);
- memcpy(null_map.data(), null_col->get_data().data(), read_rows);
- return Status::OK();
-}
-
////////////////////////////////////////////////////////////////////////////////
Status OffsetFileColumnIterator::init(const ColumnIteratorOptions& opts) {
RETURN_IF_ERROR(_offset_iterator->init(opts));
@@ -1769,27 +1727,6 @@ Status ArrayFileColumnIterator::set_access_paths(const
TColumnAccessPaths& all_a
return Status::OK();
}
-Status ArrayFileColumnIterator::read_null_map(size_t* n, NullMap& null_map) {
- if (!_array_reader->is_nullable()) {
- return Status::InternalError("read_null_map is not supported for
non-nullable column");
- }
- if (!_null_iterator) {
- // Schema-change scenario: column became nullable but old segment has
no null data.
- null_map.resize(*n);
- memset(null_map.data(), 0, *n);
- return Status::OK();
- }
- auto null_col = ColumnUInt8::create();
- auto null_col_ptr = null_col->get_ptr();
- size_t read_rows = *n;
- bool has_null = false;
- RETURN_IF_ERROR(_null_iterator->next_batch(&read_rows, null_col_ptr,
&has_null));
- *n = read_rows;
- null_map.resize(read_rows);
- memcpy(null_map.data(), null_col->get_data().data(), read_rows);
- return Status::OK();
-}
-
////////////////////////////////////////////////////////////////////////////////
FileColumnIterator::FileColumnIterator(std::shared_ptr<ColumnReader> reader) :
_reader(reader) {}
@@ -1964,53 +1901,6 @@ Status FileColumnIterator::next_batch(size_t* n,
MutableColumnPtr& dst, bool* ha
return Status::OK();
}
-Status FileColumnIterator::read_null_map(size_t* n, NullMap& null_map) {
- if (!_reader->is_nullable()) {
- return Status::InternalError("read_null_map is not supported for
non-nullable column");
- }
-
- null_map.resize(*n);
- size_t remaining = *n;
- size_t offset = 0;
-
- while (remaining > 0) {
- if (!_page.has_remaining()) {
- bool eos = false;
- RETURN_IF_ERROR(_load_next_page(&eos));
- if (eos) {
- break;
- }
- }
-
- if (!_page.has_null) {
- size_t nrows_in_page = std::min(remaining, _page.remaining());
- memset(null_map.data() + offset, 0, nrows_in_page);
- offset += nrows_in_page;
- _current_ordinal += nrows_in_page;
- _page.offset_in_page += nrows_in_page;
- remaining -= nrows_in_page;
- continue;
- }
-
- size_t nrows_in_page = std::min(remaining, _page.remaining());
- size_t this_run = 0;
- while (this_run < nrows_in_page) {
- bool is_null = false;
- size_t run_len = _page.null_decoder.GetNextRun(&is_null,
nrows_in_page - this_run);
- memset(null_map.data() + offset + this_run, is_null ? 1 : 0,
run_len);
- this_run += run_len;
- }
-
- offset += nrows_in_page;
- _page.offset_in_page += nrows_in_page;
- _current_ordinal += nrows_in_page;
- remaining -= nrows_in_page;
- }
-
- *n = offset;
- return Status::OK();
-}
-
Status FileColumnIterator::read_by_rowids(const rowid_t* rowids, const size_t
count,
MutableColumnPtr& dst) {
if (_reading_flag == ReadingFlag::SKIP_READING) {
diff --git a/be/src/storage/segment/column_reader.h
b/be/src/storage/segment/column_reader.h
index 4571c54c106..57a4bc4d74a 100644
--- a/be/src/storage/segment/column_reader.h
+++ b/be/src/storage/segment/column_reader.h
@@ -30,9 +30,8 @@
#include "common/config.h"
#include "common/logging.h"
-#include "common/status.h" // for Status
-#include "core/column/column_array.h" // ColumnArray
-#include "core/column/column_nullable.h" // NullMap
+#include "common/status.h" // for Status
+#include "core/column/column_array.h" // ColumnArray
#include "core/data_type/data_type.h"
#include "io/cache/cached_remote_file_reader.h"
#include "io/fs/file_reader_writer_fwd.h"
@@ -361,10 +360,6 @@ public:
virtual bool is_all_dict_encoding() const { return false; }
- virtual Status read_null_map(size_t* n, NullMap& null_map) {
- return Status::NotSupported("read_null_map not implemented");
- }
-
virtual Status set_access_paths(const TColumnAccessPaths& all_access_paths,
const TColumnAccessPaths&
predicate_access_paths) {
if (!predicate_access_paths.empty()) {
@@ -463,8 +458,6 @@ public:
bool is_all_dict_encoding() const override { return _is_all_dict_encoding;
}
- Status read_null_map(size_t* n, NullMap& null_map) override;
-
Status init_prefetcher(const SegmentPrefetchParams& params) override;
void collect_prefetchers(
std::map<PrefetcherInitMethod, std::vector<SegmentPrefetcher*>>&
prefetchers,
@@ -590,8 +583,6 @@ public:
void remove_pruned_sub_iterators() override;
- Status read_null_map(size_t* n, NullMap& null_map) override;
-
private:
std::shared_ptr<ColumnReader> _map_reader = nullptr;
ColumnIteratorUPtr _null_iterator;
@@ -633,8 +624,6 @@ public:
std::map<PrefetcherInitMethod, std::vector<SegmentPrefetcher*>>&
prefetchers,
PrefetcherInitMethod init_method) override;
- Status read_null_map(size_t* n, NullMap& null_map) override;
-
private:
std::shared_ptr<ColumnReader> _struct_reader = nullptr;
ColumnIteratorUPtr _null_iterator;
@@ -674,8 +663,6 @@ public:
std::map<PrefetcherInitMethod, std::vector<SegmentPrefetcher*>>&
prefetchers,
PrefetcherInitMethod init_method) override;
- Status read_null_map(size_t* n, NullMap& null_map) override;
-
private:
std::shared_ptr<ColumnReader> _array_reader = nullptr;
std::unique_ptr<OffsetFileColumnIterator> _offset_iterator;
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
index 4100fab5c23..48bf546ef6b 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java
@@ -1358,9 +1358,6 @@ public class PhysicalPlanTranslator extends
DefaultPlanVisitor<PlanFragment, Pla
case MIX:
pushAggOp = TPushAggOp.MIX;
break;
- case COUNT_NULL:
- pushAggOp = TPushAggOp.COUNT_NULL;
- break;
default:
throw new AnalysisException("Unsupported storage layer
aggregate: "
+ storageLayerAggregate.getAggOp());
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java
index a9c4ac4acaa..c7e7612a246 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java
@@ -565,8 +565,7 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
Map<Class<? extends AggregateFunction>, PushDownAggOp> supportedAgg =
PushDownAggOp.supportedFunctions();
boolean containsCount = false;
- boolean containsCountStar = false;
- Set<SlotReference> countNullableSlots = new HashSet<>();
+ Set<SlotReference> checkNullSlots = new HashSet<>();
Set<Expression> expressionAfterProject = new HashSet<>();
// Single loop through aggregateFunctions to handle multiple logic
@@ -581,19 +580,15 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
// Check if contains Count function
if (functionClass.equals(Count.class)) {
containsCount = true;
- Count countFunc = (Count) function;
- if (countFunc.isCountStar()) {
- containsCountStar = true;
- }
if (!function.getArguments().isEmpty()) {
Expression arg0 = function.getArguments().get(0);
if (arg0 instanceof SlotReference) {
- countNullableSlots.add((SlotReference) arg0);
+ checkNullSlots.add((SlotReference) arg0);
expressionAfterProject.add(arg0);
} else if (arg0 instanceof Cast) {
Expression child0 = arg0.child(0);
if (child0 instanceof SlotReference) {
- countNullableSlots.add((SlotReference) child0);
+ checkNullSlots.add((SlotReference) child0);
expressionAfterProject.add(arg0);
}
}
@@ -657,7 +652,7 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
if (argument instanceof SlotReference) {
// Argument is valid, continue
if (needCheckSlotNull) {
- countNullableSlots.add((SlotReference) argument);
+ checkNullSlots.add((SlotReference) argument);
}
} else if (argument instanceof Cast) {
boolean castMatch = argument.child(0) instanceof
SlotReference
@@ -667,7 +662,7 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
return canNotPush;
} else {
if (needCheckSlotNull) {
- countNullableSlots.add((SlotReference)
argument.child(0));
+ checkNullSlots.add((SlotReference)
argument.child(0));
}
}
} else {
@@ -677,52 +672,20 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
argumentsOfAggregateFunction = processedExpressions;
}
+ Set<PushDownAggOp> pushDownAggOps = functionClasses.stream()
+ .map(supportedAgg::get)
+ .collect(Collectors.toSet());
+
+ PushDownAggOp mergeOp = pushDownAggOps.size() == 1
+ ? pushDownAggOps.iterator().next()
+ : PushDownAggOp.MIX;
+
Set<SlotReference> aggUsedSlots =
ExpressionUtils.collect(argumentsOfAggregateFunction,
SlotReference.class::isInstance);
List<SlotReference> usedSlotInTable = (List<SlotReference>)
Project.findProject(aggUsedSlots,
logicalScan.getOutput());
- boolean hasCountNullable = false;
- for (SlotReference slot : usedSlotInTable) {
- if (!slot.getOriginalColumn().isPresent()) {
- continue;
- }
- Column column = slot.getOriginalColumn().get();
- if (countNullableSlots.contains(slot) && column.isAllowNull()) {
- hasCountNullable = true;
- break;
- }
- }
-
- if (containsCountStar && hasCountNullable) {
- return canNotPush;
- }
-
- boolean hasMinMax = functionClasses.stream()
- .anyMatch(c -> c.equals(Min.class) || c.equals(Max.class));
-
- if (hasCountNullable && hasMinMax) {
- return canNotPush;
- }
-
- Set<PushDownAggOp> pushDownAggOps = new HashSet<>();
- for (Class<? extends AggregateFunction> functionClass :
functionClasses) {
- if (functionClass.equals(Count.class)) {
- if (hasCountNullable) {
- pushDownAggOps.add(PushDownAggOp.COUNT_NULL);
- } else {
- pushDownAggOps.add(PushDownAggOp.COUNT);
- }
- } else {
- pushDownAggOps.add(supportedAgg.get(functionClass));
- }
- }
-
- PushDownAggOp mergeOp = pushDownAggOps.size() == 1
- ? pushDownAggOps.iterator().next()
- : PushDownAggOp.MIX;
-
for (SlotReference slot : usedSlotInTable) {
Column column = slot.getOriginalColumn().get();
if (column.isAggregated()) {
@@ -740,6 +703,14 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
return canNotPush;
}
}
+ if (mergeOp == PushDownAggOp.COUNT || mergeOp ==
PushDownAggOp.MIX) {
+ // NULL value behavior in `count` function is zero, so
+ // we should not use row_count to speed up query. the col
+ // must be not null
+ if (column.isAllowNull() && checkNullSlots.contains(slot)) {
+ return canNotPush;
+ }
+ }
}
if (logicalScan instanceof LogicalOlapScan) {
@@ -760,11 +731,6 @@ public class AggregateStrategies implements
ImplementationRuleFactory {
}
} else if (logicalScan instanceof LogicalFileScan) {
- // COUNT_NULL requires reading nullmaps from segment files, which
is not supported
- // by external file scans (parquet/orc etc.), so we refuse to push
it down here.
- if (mergeOp == PushDownAggOp.COUNT_NULL) {
- return canNotPush;
- }
Rule rule = (logicalScan instanceof LogicalHudiScan) ? new
LogicalHudiScanToPhysicalHudiScan().build()
: new LogicalFileScanToPhysicalFileScan().build();
PhysicalFileScan physicalScan = (PhysicalFileScan)
rule.transform(logicalScan, cascadesContext)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalStorageLayerAggregate.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalStorageLayerAggregate.java
index 9c7baa60f53..2db9c1afc27 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalStorageLayerAggregate.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalStorageLayerAggregate.java
@@ -110,7 +110,7 @@ public class PhysicalStorageLayerAggregate extends
PhysicalCatalogRelation {
/** PushAggOp */
public enum PushDownAggOp {
- COUNT, MIN_MAX, MIX, COUNT_ON_MATCH, COUNT_NULL;
+ COUNT, MIN_MAX, MIX, COUNT_ON_MATCH;
/** supportedFunctions */
public static Map<Class<? extends AggregateFunction>, PushDownAggOp>
supportedFunctions() {
diff --git a/gensrc/thrift/PlanNodes.thrift b/gensrc/thrift/PlanNodes.thrift
index 7328411d9ec..4d31fe81b36 100644
--- a/gensrc/thrift/PlanNodes.thrift
+++ b/gensrc/thrift/PlanNodes.thrift
@@ -863,8 +863,7 @@ enum TPushAggOp {
MINMAX = 1,
COUNT = 2,
MIX = 3,
- COUNT_ON_INDEX = 4,
- COUNT_NULL = 5
+ COUNT_ON_INDEX = 4
}
struct TScoreRangeInfo {
diff --git a/regression-test/data/query_p0/aggregate/count_null_push_down.out
b/regression-test/data/query_p0/aggregate/count_null_push_down.out
deleted file mode 100644
index e858bc64ef5..00000000000
--- a/regression-test/data/query_p0/aggregate/count_null_push_down.out
+++ /dev/null
@@ -1,10 +0,0 @@
--- This file is automatically generated. You should know what you did if you
want to edit this
--- !count_null --
-3
-
--- !count_star --
-5
-
--- !count_non_null --
-5
-
diff --git
a/regression-test/suites/nereids_p0/explain/test_pushdown_explain.groovy
b/regression-test/suites/nereids_p0/explain/test_pushdown_explain.groovy
index 8dfecc34b59..220ab4038c5 100644
--- a/regression-test/suites/nereids_p0/explain/test_pushdown_explain.groovy
+++ b/regression-test/suites/nereids_p0/explain/test_pushdown_explain.groovy
@@ -114,7 +114,7 @@ suite("test_pushdown_explain") {
}
explain {
sql("select count(a) from (select nullable_col as a from
test_null_columns) t1;")
- contains "pushAggOp=COUNT_NULL"
+ contains "pushAggOp=NONE"
}
explain {
sql("select count(a), min(a) from (select non_nullable_col as a from
test_null_columns) t1;")
diff --git
a/regression-test/suites/query_p0/aggregate/count_null_push_down.groovy
b/regression-test/suites/query_p0/aggregate/count_null_push_down.groovy
deleted file mode 100644
index 4a51e951237..00000000000
--- a/regression-test/suites/query_p0/aggregate/count_null_push_down.groovy
+++ /dev/null
@@ -1,44 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("count_null_push_down") {
- def tableName = "count_null_push_down_test"
-
- sql """ DROP TABLE IF EXISTS ${tableName} """
- sql """
- CREATE TABLE IF NOT EXISTS ${tableName} (
- id INT NOT NULL,
- value INT NULL
- )
- DUPLICATE KEY(id)
- DISTRIBUTED BY HASH(id) BUCKETS 1
- PROPERTIES (
- "replication_num" = "1"
- )
- """
-
- sql """ INSERT INTO ${tableName} VALUES (1, 1), (2, 2), (3, null), (4, 4),
(5, null) """
-
- // Test COUNT(column) on nullable column - should use COUNT_NULL push down
- qt_count_null """ SELECT COUNT(value) FROM ${tableName} """
-
- // Test COUNT(*) - should use COUNT push down
- qt_count_star """ SELECT COUNT(*) FROM ${tableName} """
-
- // Test COUNT on non-nullable column - should use COUNT push down
- qt_count_non_null """ SELECT COUNT(id) FROM ${tableName} """
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]