This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 589518ff72415275510947b603cfb98c34c7be08
Author: 924060929 <[email protected]>
AuthorDate: Tue May 28 20:06:01 2024 +0800

    [fix](Nereids) fix Illegal aggregate node: group by and output is empty 
(#35497)
    
    fix Illegal aggregate node: group by and output is empty.
    introduced by #33091
---
 be/src/runtime/descriptors.cpp                            |  5 +++++
 .../suites/nereids_p0/aggregate/aggregate.groovy          | 15 +++++++++++++++
 2 files changed, 20 insertions(+)

diff --git a/be/src/runtime/descriptors.cpp b/be/src/runtime/descriptors.cpp
index 0d0ace54ae3..7f7aae0d2ca 100644
--- a/be/src/runtime/descriptors.cpp
+++ b/be/src/runtime/descriptors.cpp
@@ -342,10 +342,12 @@ RowDescriptor::RowDescriptor(const DescriptorTbl& 
desc_tbl, const std::vector<TT
             << row_tuples.size();
     DCHECK_GT(row_tuples.size(), 0);
     _num_materialized_slots = 0;
+    _num_slots = 0;
 
     for (int row_tuple : row_tuples) {
         TupleDescriptor* tupleDesc = desc_tbl.get_tuple_descriptor(row_tuple);
         _num_materialized_slots += tupleDesc->num_materialized_slots();
+        _num_slots += tupleDesc->slots().size();
         _tuple_desc_map.push_back(tupleDesc);
         DCHECK(_tuple_desc_map.back() != nullptr);
     }
@@ -358,6 +360,7 @@ RowDescriptor::RowDescriptor(TupleDescriptor* tuple_desc, 
bool is_nullable)
         : _tuple_desc_map(1, tuple_desc), _tuple_idx_nullable_map(1, 
is_nullable) {
     init_tuple_idx_map();
     init_has_varlen_slots();
+    _num_slots = tuple_desc->slots().size();
 }
 
 RowDescriptor::RowDescriptor(const RowDescriptor& lhs_row_desc, const 
RowDescriptor& rhs_row_desc) {
@@ -373,6 +376,8 @@ RowDescriptor::RowDescriptor(const RowDescriptor& 
lhs_row_desc, const RowDescrip
                                    rhs_row_desc._tuple_idx_nullable_map.end());
     init_tuple_idx_map();
     init_has_varlen_slots();
+
+    _num_slots = lhs_row_desc.num_slots() + rhs_row_desc.num_slots();
 }
 
 void RowDescriptor::init_tuple_idx_map() {
diff --git a/regression-test/suites/nereids_p0/aggregate/aggregate.groovy 
b/regression-test/suites/nereids_p0/aggregate/aggregate.groovy
index c69e80eca35..60601cee7ce 100644
--- a/regression-test/suites/nereids_p0/aggregate/aggregate.groovy
+++ b/regression-test/suites/nereids_p0/aggregate/aggregate.groovy
@@ -332,4 +332,19 @@ suite("aggregate") {
     qt_having_with_limit """
         select k1 as k, avg(k2) as k2  from tempbaseall group by k1 having k2 
< -32765 limit 1;
     """
+
+    sql "drop table if exists 
table_10_undef_partitions2_keys3_properties4_distributed_by5"
+
+    sql """create table 
table_10_undef_partitions2_keys3_properties4_distributed_by5 (
+            col_bigint_undef_signed bigint/*agg_type_placeholder*/   ,
+                    col_varchar_10__undef_signed 
varchar(10)/*agg_type_placeholder*/   ,
+            col_varchar_64__undef_signed varchar(64)/*agg_type_placeholder*/   
,
+                    pk int/*agg_type_placeholder*/
+    ) engine=olap
+    distributed by hash(pk) buckets 10
+    properties("replication_num" = "1")"""
+
+    sql "insert into 
table_10_undef_partitions2_keys3_properties4_distributed_by5(pk,col_bigint_undef_signed,col_varchar_10__undef_signed,col_varchar_64__undef_signed)
 values 
(0,111,'from','t'),(1,null,'h','out'),(2,3814,'get','q'),(3,5166561111626303305,'s','right'),(4,2688963514917402600,'b','hey'),(5,-5065987944147755706,'p','mean'),(6,31061,'v','d'),(7,122,'the','t'),(8,-2882446,'going','a'),(9,-43,'y','a');"
+
+    sql "SELECT MIN( `pk` ) FROM 
table_10_undef_partitions2_keys3_properties4_distributed_by5  WHERE ( 
col_varchar_64__undef_signed  LIKE CONCAT ('come' , '%' ) OR 
col_varchar_10__undef_signed  IN ( 'could' , 'was' , 'that' ) ) OR ( `pk` IS  
NULL OR  ( `pk` <> 186 ) ) AND ( `pk` IS NOT NULL OR `pk`  BETWEEN 255 AND -99 
+ 8 ) AND (  ( `pk` != 6 ) OR `pk` IS  NULL );"
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to