This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 968e33f07e3 [cherry-pick](branch-21) pick (#39057) (#41352) (#41958)
968e33f07e3 is described below

commit 968e33f07e34d4b1252271ab4fe748a2633f95e9
Author: zhangstar333 <[email protected]>
AuthorDate: Thu Oct 17 14:30:40 2024 +0800

    [cherry-pick](branch-21) pick (#39057) (#41352) (#41958)
    
    ## Proposed changes
    
    pick from master (#39057) (#41352)
    
    <!--Describe your changes.-->
    
    ---------
    
    Co-authored-by: Mryange <[email protected]>
---
 be/src/common/config.cpp                           |  2 -
 be/src/common/config.h                             |  4 --
 .../pipeline/exec/partition_sort_sink_operator.cpp | 69 ++++++++++++++++++----
 .../pipeline/exec/partition_sort_sink_operator.h   | 10 +++-
 .../exec/partition_sort_source_operator.cpp        | 12 ++--
 .../pipeline/exec/partition_sort_source_operator.h |  6 +-
 be/src/runtime/runtime_state.h                     | 12 ++++
 be/src/vec/exec/vpartition_sort_node.cpp           |  4 +-
 .../java/org/apache/doris/qe/SessionVariable.java  | 20 +++++++
 gensrc/thrift/PaloInternalService.thrift           |  2 +
 .../external_table_p0/jdbc/test_jdbc_query_pg.out  |  6 +-
 .../jdbc/test_jdbc_query_pg.groovy                 |  2 +-
 12 files changed, 116 insertions(+), 33 deletions(-)

diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp
index 94418cc8357..6dff0a28dbc 100644
--- a/be/src/common/config.cpp
+++ b/be/src/common/config.cpp
@@ -1147,8 +1147,6 @@ DEFINE_mInt32(publish_version_gap_logging_threshold, 
"200");
 // The secure path with user files, used in the `local` table function.
 DEFINE_mString(user_files_secure_path, "${DORIS_HOME}");
 
-DEFINE_Int32(partition_topn_partition_threshold, "1024");
-
 DEFINE_Int32(fe_expire_duration_seconds, "60");
 
 DEFINE_Int32(grace_shutdown_wait_seconds, "120");
diff --git a/be/src/common/config.h b/be/src/common/config.h
index 384b1bd95ab..a282abb37ee 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -1214,10 +1214,6 @@ DECLARE_mInt32(publish_version_gap_logging_threshold);
 // The secure path with user files, used in the `local` table function.
 DECLARE_mString(user_files_secure_path);
 
-// This threshold determines how many partitions will be allocated for window 
function get topn.
-// and if this threshold is exceeded, the remaining data will be pass through 
to other node directly.
-DECLARE_Int32(partition_topn_partition_threshold);
-
 // If fe's frontend info has not been updated for more than 
fe_expire_duration_seconds, it will be regarded
 // as an abnormal fe, this will cause be to cancel this fe's related query.
 DECLARE_Int32(fe_expire_duration_seconds);
diff --git a/be/src/pipeline/exec/partition_sort_sink_operator.cpp 
b/be/src/pipeline/exec/partition_sort_sink_operator.cpp
index cbc2e3ee2eb..fdab0dc275b 100644
--- a/be/src/pipeline/exec/partition_sort_sink_operator.cpp
+++ b/be/src/pipeline/exec/partition_sort_sink_operator.cpp
@@ -17,6 +17,8 @@
 
 #include "partition_sort_sink_operator.h"
 
+#include <cstdint>
+
 #include "common/status.h"
 #include "partition_sort_source_operator.h"
 #include "vec/common/hash_table/hash.h"
@@ -38,19 +40,28 @@ Status PartitionSortSinkLocalState::init(RuntimeState* 
state, LocalSinkStateInfo
     for (size_t i = 0; i < p._partition_expr_ctxs.size(); i++) {
         RETURN_IF_ERROR(p._partition_expr_ctxs[i]->clone(state, 
_partition_expr_ctxs[i]));
     }
+    _topn_phase = p._topn_phase;
     _partition_exprs_num = p._partition_exprs_num;
     _partitioned_data = 
std::make_unique<vectorized::PartitionedHashMapVariants>();
     _agg_arena_pool = std::make_unique<vectorized::Arena>();
     _hash_table_size_counter = ADD_COUNTER(_profile, "HashTableSize", 
TUnit::UNIT);
+    _serialize_key_arena_memory_usage =
+            _profile->AddHighWaterMarkCounter("SerializeKeyArena", 
TUnit::BYTES, "MemoryUsage", 1);
+    _hash_table_memory_usage =
+            ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "HashTable", TUnit::BYTES, 
"MemoryUsage", 1);
     _build_timer = ADD_TIMER(_profile, "HashTableBuildTime");
     _selector_block_timer = ADD_TIMER(_profile, "SelectorBlockTime");
     _emplace_key_timer = ADD_TIMER(_profile, "EmplaceKeyTime");
     _passthrough_rows_counter = ADD_COUNTER(_profile, 
"PassThroughRowsCounter", TUnit::UNIT);
+    _sorted_partition_input_rows_counter =
+            ADD_COUNTER(_profile, "SortedPartitionInputRows", TUnit::UNIT);
     _partition_sort_info = std::make_shared<vectorized::PartitionSortInfo>(
             &_vsort_exec_exprs, p._limit, 0, p._pool, p._is_asc_order, 
p._nulls_first,
             p._child_x->row_desc(), state, _profile, p._has_global_limit, 
p._partition_inner_limit,
             p._top_n_algorithm, p._topn_phase);
     _init_hash_method();
+    _profile->add_info_string("PartitionTopNPhase", to_string(p._topn_phase));
+    _profile->add_info_string("PartitionTopNLimit", 
std::to_string(p._partition_inner_limit));
     return Status::OK();
 }
 
@@ -111,14 +122,9 @@ Status PartitionSortSinkOperatorX::sink(RuntimeState* 
state, vectorized::Block*
             }
             local_state._value_places[0]->append_whole_block(input_block, 
_child_x->row_desc());
         } else {
-            //just simply use partition num to check
-            //if is TWO_PHASE_GLOBAL, must be sort all data thought partition 
num threshold have been exceeded.
-            if (_topn_phase != TPartTopNPhase::TWO_PHASE_GLOBAL &&
-                local_state._num_partition > 
config::partition_topn_partition_threshold &&
-                local_state.child_input_rows < 10000 * 
local_state._num_partition) {
+            if (local_state._is_need_passthrough) {
                 {
-                    COUNTER_UPDATE(local_state._passthrough_rows_counter,
-                                   (int64_t)input_block->rows());
+                    COUNTER_UPDATE(local_state._passthrough_rows_counter, 
(int64_t)current_rows);
                     std::lock_guard<std::mutex> 
lock(local_state._shared_state->buffer_mutex);
                     
local_state._shared_state->blocks_buffer.push(std::move(*input_block));
                     // buffer have data, source could read this.
@@ -128,7 +134,6 @@ Status PartitionSortSinkOperatorX::sink(RuntimeState* 
state, vectorized::Block*
                 RETURN_IF_ERROR(_split_block_by_partition(input_block, 
local_state, eos));
                 RETURN_IF_CANCELLED(state);
                 input_block->clear_column_data();
-                local_state.child_input_rows = local_state.child_input_rows + 
current_rows;
             }
         }
     }
@@ -151,12 +156,16 @@ Status PartitionSortSinkOperatorX::sink(RuntimeState* 
state, vectorized::Block*
         }
 
         COUNTER_SET(local_state._hash_table_size_counter, 
int64_t(local_state._num_partition));
+        COUNTER_SET(local_state._sorted_partition_input_rows_counter,
+                    local_state._sorted_partition_input_rows);
         //so all data from child have sink completed
         {
             std::unique_lock<std::mutex> 
lc(local_state._shared_state->sink_eos_lock);
             local_state._shared_state->sink_eos = true;
             local_state._dependency->set_ready_to_read();
         }
+        local_state._profile->add_info_string("HasPassThrough",
+                                              local_state._is_need_passthrough 
? "Yes" : "No");
     }
 
     return Status::OK();
@@ -177,7 +186,7 @@ Status 
PartitionSortSinkOperatorX::_split_block_by_partition(
 }
 
 Status PartitionSortSinkOperatorX::_emplace_into_hash_table(
-        const vectorized::ColumnRawPtrs& key_columns, const vectorized::Block* 
input_block,
+        const vectorized::ColumnRawPtrs& key_columns, vectorized::Block* 
input_block,
         PartitionSortSinkLocalState& local_state, bool eos) {
     return std::visit(
             vectorized::Overload {
@@ -212,15 +221,39 @@ Status 
PartitionSortSinkOperatorX::_emplace_into_hash_table(
                         };
 
                         SCOPED_TIMER(local_state._emplace_key_timer);
-                        for (size_t row = 0; row < num_rows; ++row) {
+                        int row = num_rows;
+                        for (row = row - 1; row >= 0 && 
!local_state._is_need_passthrough; --row) {
                             auto& mapped = agg_method.lazy_emplace(state, row, 
creator,
                                                                    
creator_for_null_key);
                             mapped->add_row_idx(row);
+                            local_state._sorted_partition_input_rows++;
+                            local_state._is_need_passthrough =
+                                    
local_state.check_whether_need_passthrough();
                         }
                         for (auto* place : local_state._value_places) {
                             SCOPED_TIMER(local_state._selector_block_timer);
                             
RETURN_IF_ERROR(place->append_block_by_selector(input_block, eos));
                         }
+                        //Perform passthrough for the range [0, row] of 
input_block
+                        if (local_state._is_need_passthrough && row >= 0) {
+                            {
+                                
COUNTER_UPDATE(local_state._passthrough_rows_counter,
+                                               (int64_t)(row + 1));
+                                std::lock_guard<std::mutex> lock(
+                                        
local_state._shared_state->buffer_mutex);
+                                // have emplace (num_rows - row) to hashtable, 
and now have row remaining needed in block;
+                                // set_num_rows(x) retains the range [0, x - 
1], so row + 1 is needed here.
+                                input_block->set_num_rows(row + 1);
+                                local_state._shared_state->blocks_buffer.push(
+                                        std::move(*input_block));
+                                // buffer have data, source could read this.
+                                local_state._dependency->set_ready_to_read();
+                            }
+                        }
+                        local_state._serialize_key_arena_memory_usage->set(
+                                (int64_t)local_state._agg_arena_pool->size());
+                        COUNTER_SET(local_state._hash_table_memory_usage,
+                                    
(int64_t)agg_method.hash_table->get_buffer_size_in_bytes());
                         return Status::OK();
                     }},
             local_state._partitioned_data->method_variant);
@@ -302,4 +335,20 @@ void PartitionSortSinkLocalState::_init_hash_method() {
     }
 }
 
+// NOLINTBEGIN(readability-simplify-boolean-expr)
+// just simply use partition num to check
+// but if is TWO_PHASE_GLOBAL, must be sort all data thought partition num 
threshold have been exceeded.
+// partition_topn_max_partitions     default is : 1024
+// partition_topn_per_partition_rows default is : 1000
+bool PartitionSortSinkLocalState::check_whether_need_passthrough() {
+    if (_topn_phase != TPartTopNPhase::TWO_PHASE_GLOBAL &&
+        _num_partition > _state->partition_topn_max_partitions() &&
+        _sorted_partition_input_rows <
+                _state->partition_topn_per_partition_rows() * _num_partition) {
+        return true;
+    }
+    return false;
+}
+// NOLINTEND(readability-simplify-boolean-expr)
+
 } // namespace doris::pipeline
diff --git a/be/src/pipeline/exec/partition_sort_sink_operator.h 
b/be/src/pipeline/exec/partition_sort_sink_operator.h
index 8602b096f51..6ba090c0442 100644
--- a/be/src/pipeline/exec/partition_sort_sink_operator.h
+++ b/be/src/pipeline/exec/partition_sort_sink_operator.h
@@ -66,7 +66,7 @@ private:
     // Expressions and parameters used for build _sort_description
     vectorized::VSortExecExprs _vsort_exec_exprs;
     vectorized::VExprContextSPtrs _partition_expr_ctxs;
-    int64_t child_input_rows = 0;
+    int64_t _sorted_partition_input_rows = 0;
     std::vector<vectorized::PartitionDataPtr> _value_places;
     int _num_partition = 0;
     std::vector<const vectorized::IColumn*> _partition_columns;
@@ -74,13 +74,19 @@ private:
     std::unique_ptr<vectorized::Arena> _agg_arena_pool;
     int _partition_exprs_num = 0;
     std::shared_ptr<vectorized::PartitionSortInfo> _partition_sort_info = 
nullptr;
+    TPartTopNPhase::type _topn_phase;
+    bool _is_need_passthrough = false;
 
     RuntimeProfile::Counter* _build_timer = nullptr;
     RuntimeProfile::Counter* _emplace_key_timer = nullptr;
     RuntimeProfile::Counter* _selector_block_timer = nullptr;
     RuntimeProfile::Counter* _hash_table_size_counter = nullptr;
     RuntimeProfile::Counter* _passthrough_rows_counter = nullptr;
+    RuntimeProfile::Counter* _sorted_partition_input_rows_counter = nullptr;
+    RuntimeProfile::Counter* _hash_table_memory_usage = nullptr;
+    RuntimeProfile::HighWaterMarkCounter* _serialize_key_arena_memory_usage = 
nullptr;
     void _init_hash_method();
+    bool check_whether_need_passthrough();
 };
 
 class PartitionSortSinkOperatorX final : public 
DataSinkOperatorX<PartitionSortSinkLocalState> {
@@ -124,7 +130,7 @@ private:
     Status _split_block_by_partition(vectorized::Block* input_block,
                                      PartitionSortSinkLocalState& local_state, 
bool eos);
     Status _emplace_into_hash_table(const vectorized::ColumnRawPtrs& 
key_columns,
-                                    const vectorized::Block* input_block,
+                                    vectorized::Block* input_block,
                                     PartitionSortSinkLocalState& local_state, 
bool eos);
 };
 
diff --git a/be/src/pipeline/exec/partition_sort_source_operator.cpp 
b/be/src/pipeline/exec/partition_sort_source_operator.cpp
index 7fd03a11f7a..7c89b6ba423 100644
--- a/be/src/pipeline/exec/partition_sort_source_operator.cpp
+++ b/be/src/pipeline/exec/partition_sort_source_operator.cpp
@@ -34,6 +34,8 @@ Status PartitionSortSourceLocalState::init(RuntimeState* 
state, LocalStateInfo&
     SCOPED_TIMER(exec_time_counter());
     SCOPED_TIMER(_open_timer);
     _get_sorted_timer = ADD_TIMER(profile(), "GetSortedTime");
+    _sorted_partition_output_rows_counter =
+            ADD_COUNTER(profile(), "SortedPartitionOutputRows", TUnit::UNIT);
     return Status::OK();
 }
 
@@ -62,7 +64,7 @@ Status PartitionSortSourceOperatorX::get_block(RuntimeState* 
state, vectorized::
             }
             if (!output_block->empty()) {
                 COUNTER_UPDATE(local_state.blocks_returned_counter(), 1);
-                COUNTER_UPDATE(local_state.rows_returned_counter(), 
output_block->rows());
+                local_state._num_rows_returned += output_block->rows();
             }
             return Status::OK();
         }
@@ -84,7 +86,7 @@ Status PartitionSortSourceOperatorX::get_block(RuntimeState* 
state, vectorized::
     }
     if (!output_block->empty()) {
         COUNTER_UPDATE(local_state.blocks_returned_counter(), 1);
-        COUNTER_UPDATE(local_state.rows_returned_counter(), 
output_block->rows());
+        local_state._num_rows_returned += output_block->rows();
     }
     return Status::OK();
 }
@@ -98,12 +100,10 @@ Status 
PartitionSortSourceOperatorX::get_sorted_block(RuntimeState* state,
     if (local_state._sort_idx < 
local_state._shared_state->partition_sorts.size()) {
         
RETURN_IF_ERROR(local_state._shared_state->partition_sorts[local_state._sort_idx]->get_next(
                 state, output_block, &current_eos));
+        COUNTER_UPDATE(local_state._sorted_partition_output_rows_counter, 
output_block->rows());
     }
     if (current_eos) {
-        //current sort have eos, so get next idx
-        auto rows = 
local_state._shared_state->partition_sorts[local_state._sort_idx]
-                            ->get_output_rows();
-        local_state._num_rows_returned += rows;
+        // current sort have eos, so get next idx
         
local_state._shared_state->partition_sorts[local_state._sort_idx].reset(nullptr);
         local_state._sort_idx++;
     }
diff --git a/be/src/pipeline/exec/partition_sort_source_operator.h 
b/be/src/pipeline/exec/partition_sort_source_operator.h
index 9d810db2039..ecc83c737c2 100644
--- a/be/src/pipeline/exec/partition_sort_source_operator.h
+++ b/be/src/pipeline/exec/partition_sort_source_operator.h
@@ -55,14 +55,14 @@ public:
     ENABLE_FACTORY_CREATOR(PartitionSortSourceLocalState);
     using Base = PipelineXLocalState<PartitionSortNodeSharedState>;
     PartitionSortSourceLocalState(RuntimeState* state, OperatorXBase* parent)
-            : PipelineXLocalState<PartitionSortNodeSharedState>(state, parent),
-              _get_sorted_timer(nullptr) {}
+            : PipelineXLocalState<PartitionSortNodeSharedState>(state, parent) 
{}
 
     Status init(RuntimeState* state, LocalStateInfo& info) override;
 
 private:
     friend class PartitionSortSourceOperatorX;
-    RuntimeProfile::Counter* _get_sorted_timer;
+    RuntimeProfile::Counter* _get_sorted_timer = nullptr;
+    RuntimeProfile::Counter* _sorted_partition_output_rows_counter = nullptr;
     std::atomic<int> _sort_idx = 0;
 };
 
diff --git a/be/src/runtime/runtime_state.h b/be/src/runtime/runtime_state.h
index 3c8d181f16f..732b7e3f396 100644
--- a/be/src/runtime/runtime_state.h
+++ b/be/src/runtime/runtime_state.h
@@ -536,6 +536,18 @@ public:
                        : 0;
     }
 
+    int partition_topn_max_partitions() const {
+        return _query_options.__isset.partition_topn_max_partitions
+                       ? _query_options.partition_topn_max_partitions
+                       : 1024;
+    }
+
+    int partition_topn_per_partition_rows() const {
+        return _query_options.__isset.partition_topn_pre_partition_rows
+                       ? _query_options.partition_topn_pre_partition_rows
+                       : 1000;
+    }
+
     int64_t parallel_scan_min_rows_per_scanner() const {
         return _query_options.__isset.parallel_scan_min_rows_per_scanner
                        ? _query_options.parallel_scan_min_rows_per_scanner
diff --git a/be/src/vec/exec/vpartition_sort_node.cpp 
b/be/src/vec/exec/vpartition_sort_node.cpp
index 0c737a2def0..06ee130ec23 100644
--- a/be/src/vec/exec/vpartition_sort_node.cpp
+++ b/be/src/vec/exec/vpartition_sort_node.cpp
@@ -239,8 +239,8 @@ Status VPartitionSortNode::sink(RuntimeState* state, 
vectorized::Block* input_bl
             //just simply use partition num to check
             //if is TWO_PHASE_GLOBAL, must be sort all data thought partition 
num threshold have been exceeded.
             if (_topn_phase != TPartTopNPhase::TWO_PHASE_GLOBAL &&
-                _num_partition > config::partition_topn_partition_threshold &&
-                child_input_rows < 10000 * _num_partition) {
+                _num_partition > state->partition_topn_max_partitions() &&
+                child_input_rows < state->partition_topn_per_partition_rows() 
* _num_partition) {
                 {
                     std::lock_guard<std::mutex> lock(_buffer_mutex);
                     _blocks_buffer.push(std::move(*input_block));
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
index e0ad520b9c6..79503ca77d8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
@@ -230,6 +230,8 @@ public class SessionVariable implements Serializable, 
Writable {
 
     public static final String MAX_JOIN_NUMBER_BUSHY_TREE = 
"max_join_number_bushy_tree";
     public static final String ENABLE_PARTITION_TOPN = "enable_partition_topn";
+    public static final String PARTITION_TOPN_MAX_PARTITIONS = 
"partition_topn_max_partitions";
+    public static final String PARTITION_TOPN_PER_PARTITION_ROWS = 
"partition_topn_pre_partition_rows";
 
     public static final String GLOBAL_PARTITION_TOPN_THRESHOLD = 
"global_partition_topn_threshold";
 
@@ -1196,6 +1198,22 @@ public class SessionVariable implements Serializable, 
Writable {
     @VariableMgr.VarAttr(name = ENABLE_PARTITION_TOPN)
     private boolean enablePartitionTopN = true;
 
+    @VariableMgr.VarAttr(name = PARTITION_TOPN_MAX_PARTITIONS, needForward = 
true, description = {
+            
"这个阈值决定了partition_topn计算时的最大分区数量,超过这个阈值后且输入总行数少于预估总量,剩余的数据将直接透传给下一个算子",
+            "This threshold determines how many partitions will be allocated 
for window function get topn."
+                    + " if this threshold is exceeded and input rows less than 
the estimated total rows, the remaining"
+                    + " data will be pass through to other node directly."
+    })
+    private int partitionTopNMaxPartitions = 1024;
+
+    @VariableMgr.VarAttr(name = PARTITION_TOPN_PER_PARTITION_ROWS, needForward 
= true, description = {
+            "这个数值用于partition_topn预估每个分区的行数,用来计算所有分区的预估数据总量,决定是否能透传下一个算子",
+            "This value is used for partition_topn to estimate the number of 
rows in each partition, to calculate "
+            + " the estimated total amount of data for all partitions, and to 
determine whether the next operator "
+            + " can be passed transparently."
+    })
+    private int partitionTopNPerPartitionRows = 1000;
+
     @VariableMgr.VarAttr(name = GLOBAL_PARTITION_TOPN_THRESHOLD)
     private double globalPartitionTopNThreshold = 100;
 
@@ -3503,6 +3521,8 @@ public class SessionVariable implements Serializable, 
Writable {
         tResult.setBatchSize(batchSize);
         tResult.setDisableStreamPreaggregations(disableStreamPreaggregations);
         
tResult.setEnableDistinctStreamingAggregation(enableDistinctStreamingAggregation);
+        tResult.setPartitionTopnMaxPartitions(partitionTopNMaxPartitions);
+        
tResult.setPartitionTopnPrePartitionRows(partitionTopNPerPartitionRows);
 
         if (maxScanKeyNum > -1) {
             tResult.setMaxScanKeyNum(maxScanKeyNum);
diff --git a/gensrc/thrift/PaloInternalService.thrift 
b/gensrc/thrift/PaloInternalService.thrift
index 1b3fc7cc063..aada77ba258 100644
--- a/gensrc/thrift/PaloInternalService.thrift
+++ b/gensrc/thrift/PaloInternalService.thrift
@@ -330,6 +330,8 @@ struct TQueryOptions {
   131: optional i32 adaptive_pipeline_task_serial_read_on_limit = 10000;
 
   132: optional i32 parallel_prepare_threshold = 0;
+  133: optional i32 partition_topn_max_partitions = 1024;
+  134: optional i32 partition_topn_pre_partition_rows = 1000;
   // For cloud, to control if the content would be written into file cache
   1000: optional bool disable_file_cache = false
 }
diff --git a/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out 
b/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out
index 06b0247814a..5fd5558295e 100644
--- a/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out
+++ b/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out
@@ -1338,9 +1338,9 @@ true      abc     def     2022-10-11      1.234   1       
2       3       2022-10-22T10:59:59     34.123  true    abc     def     2022
 6
 
 -- !sql87 --
-1      3
-2      0
-3      1
+1      0
+2      1
+3      2
 
 -- !sql88 --
 1
diff --git 
a/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy 
b/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy
index ae2566c445c..10004ba374e 100644
--- a/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy
+++ b/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy
@@ -575,7 +575,7 @@ suite("test_jdbc_query_pg", 
"p0,external,pg,external_docker,external_docker_pg")
         order_qt_sql84 """ SELECT NULL, NULL INTERSECT SELECT NULL, NULL FROM 
$jdbcPg14Table1 """
         order_qt_sql85 """ SELECT COUNT(*) FROM $jdbcPg14Table1 INTERSECT 
SELECT COUNT(k8) FROM $jdbcPg14Table1 HAVING SUM(k7) IS NOT NULL """
         order_qt_sql86 """ SELECT k8 FROM $jdbcPg14Table1 WHERE k8 < 7 EXCEPT 
SELECT k8 FROM $jdbcPg14Table1 WHERE k8 > 21 """
-        order_qt_sql87 """ SELECT row_number() OVER (PARTITION BY k7) rn, k8 
FROM $jdbcPg14Table1 LIMIT 3 """
+        order_qt_sql87 """ SELECT row_number() OVER (PARTITION BY k7 order by 
k8) rn, k8 FROM $jdbcPg14Table1 LIMIT 3 """
         order_qt_sql88 """ SELECT row_number() OVER (PARTITION BY k7 ORDER BY 
k8) rn FROM $jdbcPg14Table1 LIMIT 3 """
         order_qt_sql89 """ SELECT row_number() OVER (ORDER BY k8) rn FROM 
$jdbcPg14Table1 LIMIT 3 """
         order_qt_sql90 """ SELECT row_number() OVER () FROM $jdbcPg14Table1 as 
a JOIN ${dorisExTable1} as b ON a.k8 = b.id WHERE a.k8 > 111 LIMIT 2 """


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to