This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 059f005bfd6 branch-3.1: [fix](case) fix some case #54481 (#54562)
059f005bfd6 is described below

commit 059f005bfd61e606a09e5544d4154f6cd4ae8210
Author: meiyi <[email protected]>
AuthorDate: Wed Aug 13 12:06:35 2025 +0800

    branch-3.1: [fix](case) fix some case #54481 (#54562)
    
    pick #54481
---
 .../data/insert_p0/insert_group_commit_into.out    | Bin 15760 -> 15670 bytes
 .../txn_insert_restart_fe_with_schema_change.out   | Bin 344 -> 344 bytes
 .../group_commit/replay_wal_restart_fe.groovy      |   3 +-
 .../insert_p0/insert_group_commit_into.groovy      |  58 +--------------------
 .../insert_p0/test_group_commit_timeout.groovy     |  14 ++---
 ...txn_insert_restart_fe_with_schema_change.groovy |   6 +--
 ...t_unique_schema_key_change_multi_tablets.groovy |   2 +-
 7 files changed, 13 insertions(+), 70 deletions(-)

diff --git a/regression-test/data/insert_p0/insert_group_commit_into.out 
b/regression-test/data/insert_p0/insert_group_commit_into.out
index 44e6a32fd56..640eca5d361 100644
Binary files a/regression-test/data/insert_p0/insert_group_commit_into.out and 
b/regression-test/data/insert_p0/insert_group_commit_into.out differ
diff --git 
a/regression-test/data/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.out
 
b/regression-test/data/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.out
index 1d906c6bd61..1234a227bfe 100644
Binary files 
a/regression-test/data/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.out
 and 
b/regression-test/data/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.out
 differ
diff --git 
a/regression-test/suites/insert_p0/group_commit/replay_wal_restart_fe.groovy 
b/regression-test/suites/insert_p0/group_commit/replay_wal_restart_fe.groovy
index 33664f52f2b..ee31b9eda1a 100644
--- a/regression-test/suites/insert_p0/group_commit/replay_wal_restart_fe.groovy
+++ b/regression-test/suites/insert_p0/group_commit/replay_wal_restart_fe.groovy
@@ -61,9 +61,8 @@ suite("replay_wal_restart_fe", 'docker') {
         cluster.stopBackends()
         cluster.restartFrontends()
         sleep(30000)
-        context.reconnectFe()
-        check_schema_change('RUNNING')
         cluster.startBackends()
+        context.reconnectFe()
 
         // check schema change status and row count
         check_schema_change('FINISHED')
diff --git a/regression-test/suites/insert_p0/insert_group_commit_into.groovy 
b/regression-test/suites/insert_p0/insert_group_commit_into.groovy
index 57e40b5f2dd..b486c443881 100644
--- a/regression-test/suites/insert_p0/insert_group_commit_into.groovy
+++ b/regression-test/suites/insert_p0/insert_group_commit_into.groovy
@@ -387,29 +387,9 @@ suite("insert_group_commit_into") {
             "replication_allocation" = "tag.location.default: 1",
             "group_commit_interval_ms" = "200"
             );"""
-        sql """drop table if exists ${table_tmp};"""
-        sql """CREATE TABLE ${table_tmp} (
-            `dnt` varchar(200) NULL,
-            `ordernum` varchar(200) NULL,
-            `type` varchar(20) NULL,
-            `powers` double SUM NULL,
-            `p0` double REPLACE NULL,
-            `heatj` double SUM NULL,
-            `j0` double REPLACE NULL,
-            `heatg` double SUM NULL,
-            `g0` double REPLACE NULL,
-            `solar` double SUM NULL
-            ) ENGINE=OLAP
-            AGGREGATE KEY(`dnt`, `ordernum`, `type`)
-            COMMENT 'OLAP'
-            DISTRIBUTED BY HASH(`ordernum`) BUCKETS 1
-            PROPERTIES (
-            "replication_allocation" = "tag.location.default: 1",
-            "group_commit_interval_ms" = "200"
-            ); """
         sql """DROP MATERIALIZED VIEW IF EXISTS ods_zn_dnt_max1 ON ${table};"""
         createMV("""create materialized view ods_zn_dnt_max1 as
-            select ordernum,max(dnt) as dnt from ${table}
+            select ordernum as a1,max(dnt) as dntm from ${table}
             group by ordernum
             ORDER BY ordernum;""")
         connect( context.config.jdbcUser, context.config.jdbcPassword, 
context.config.jdbcUrl) {
@@ -436,42 +416,6 @@ suite("insert_group_commit_into") {
 
             getRowCount(4)
 
-            qt_order """select
-                '2023-06-10',
-                tmp.ordernum,
-                cast(nvl(if(tmp.p0-tmp1.p0>0,tmp.p0-tmp1.p0,tmp.p0-tmp.p1),0) 
as decimal(10,4)),
-                nvl(tmp.p0,0),
-                
cast(nvl(if(tmp.j0-tmp1.j0>0,tmp.j0-tmp1.j0,tmp.j0-tmp.j1)*277.78,0) as 
decimal(10,4)),
-                nvl(tmp.j0,0),
-                
cast(nvl(if(tmp.g0-tmp1.g0>0,tmp.g0-tmp1.g0,tmp.g0-tmp.g1)*277.78,0) as 
decimal(10,4)),
-                nvl(tmp.g0,0),
-                cast(nvl(tmp.solar,0) as decimal(20,4)),
-                'day'
-                from 
-                (
-                select
-                    ordernum,
-                    max(ljrl1) g0,min(ljrl1) g1,
-                    max(ljrl2) j0,min(ljrl2) j1,
-                    max(db1) p0,min(db1) p1,
-                    max(fzl)*1600*0.278 solar
-                from(
-                    select ordernum,dnt,
-                            cast(if(json_extract(data,'\$.LJRL1')=0 or 
json_extract(data,'\$.LJRL1') like '%E%',null,json_extract(data,'\$.LJRL1')) as 
double) ljrl1,
-                            cast(if(json_extract(data,'\$.LJRL2')=0 or 
json_extract(data,'\$.LJRL2') like '%E%',null,json_extract(data,'\$.LJRL2')) as 
double) ljrl2,
-                            first_value(cast(if(json_extract(data,'\$.FZL')=0 
or json_extract(data,'\$.FZL') like '%E%',null,
-                            json_extract(data,'\$.FZL')) as double)) over 
(partition by ordernum order by dnt desc) fzl,
-                            cast(if(json_extract(data,'\$.DB1')=0 or 
json_extract(data,'\$.DB1') like '%E%',null,json_extract(data,'\$.DB1')) as 
double) db1
-                    from ${table}
-                        )a1
-                group by ordernum
-                )tmp left join (
-                select
-                    ordernum,MAX(p0) p0,MAX(j0) j0,MAX(g0) g0
-                from ${table_tmp}
-                    group by ordernum
-                )tmp1
-                on tmp.ordernum=tmp1.ordernum;"""
             qt_order2 """
                 SELECT  
                 row_number() over(partition by add_date order by pc_num desc)
diff --git a/regression-test/suites/insert_p0/test_group_commit_timeout.groovy 
b/regression-test/suites/insert_p0/test_group_commit_timeout.groovy
index add4d3c1eec..e33f6b7c922 100644
--- a/regression-test/suites/insert_p0/test_group_commit_timeout.groovy
+++ b/regression-test/suites/insert_p0/test_group_commit_timeout.groovy
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-suite("test_group_commit_timeout", "nonConcurrent") {
+suite("test_group_commit_timeout") {
     def tableName = "test_group_commit_timeout"
     sql """
         CREATE TABLE if not exists ${tableName} (
@@ -31,14 +31,14 @@ suite("test_group_commit_timeout", "nonConcurrent") {
         );
     """
 
-    def query_timeout = sql """show variables where variable_name = 
'query_timeout';"""
+    /*def query_timeout = sql """show variables where variable_name = 
'query_timeout';"""
     def insert_timeout = sql """show variables where variable_name = 
'insert_timeout';"""
-    logger.info("query_timeout: ${query_timeout}, insert_timeout: 
${insert_timeout}")
+    logger.info("query_timeout: ${query_timeout}, insert_timeout: 
${insert_timeout}")*/
 
     long start = System.currentTimeMillis()
     try {
-        sql "SET global query_timeout = 5"
-        sql "SET global insert_timeout = 5"
+        sql "SET query_timeout = 5"
+        sql "SET insert_timeout = 5"
 
         sql "set group_commit = sync_mode"
         sql "insert into ${tableName} values(1, 'a', 10)"
@@ -49,7 +49,7 @@ suite("test_group_commit_timeout", "nonConcurrent") {
         assertTrue(e.getMessage().contains("FragmentMgr cancel worker going to 
cancel timeout instance") || e.getMessage().contains("Execute timeout") || 
e.getMessage().contains("timeout"))
         assertTrue(end - start <= 60000)
     } finally {
-        sql "SET global query_timeout = ${query_timeout[0][1]}"
-        sql "SET global insert_timeout = ${insert_timeout[0][1]}"
+        /*sql "SET query_timeout = ${query_timeout[0][1]}"
+        sql "SET insert_timeout = ${insert_timeout[0][1]}"*/
     }
 }
diff --git 
a/regression-test/suites/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.groovy
 
b/regression-test/suites/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.groovy
index d2537bfe8c6..89f3905280a 100644
--- 
a/regression-test/suites/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.groovy
+++ 
b/regression-test/suites/insert_p0/transaction/txn_insert_restart_fe_with_schema_change.groovy
@@ -80,11 +80,11 @@ suite("txn_insert_restart_fe_with_schema_change", 'docker') 
{
         }
 
         result = sql_return_maparray 'SHOW PROC "/transactions"'
-        runningTxn = result.find { it.DbName.indexOf(dbName) >= 0 
}.RunningTransactionNum as int
+        def runningTxn = result.find { it.DbName.indexOf(dbName) >= 0 
}.RunningTransactionNum as int
         assertEquals(4, runningTxn)
 
         sql "ALTER TABLE tbl_2 ADD COLUMN k3 INT DEFAULT '-1'"
-        sql 'CREATE MATERIALIZED VIEW tbl_3_mv AS SELECT k1, k1 + k2 FROM 
tbl_3'
+        sql 'CREATE MATERIALIZED VIEW tbl_3_mv AS SELECT k1 + 1, k1 + k2 FROM 
tbl_3'
         sql 'ALTER TABLE tbl_4 ADD ROLLUP tbl_3_r1(k1, v)'
         sql 'ALTER TABLE tbl_5 ORDER BY (k2, k1)'
 
@@ -112,7 +112,7 @@ suite("txn_insert_restart_fe_with_schema_change", 'docker') 
{
         // should publish visible
         order_qt_select_2 'SELECT k1, k2 FROM tbl_2'
         order_qt_select_3 'SELECT * FROM tbl_3'
-        order_qt_select_3m 'SELECT k1, k1 + k2 FROM tbl_3'
+        order_qt_select_3m 'SELECT k1 + 1, k1 + k2 FROM tbl_3'
         order_qt_select_4 'SELECT * FROM tbl_4'
         order_qt_select_4r 'SELECT k1, v FROM tbl_4'
         order_qt_select_5 'SELECT k1, k2 FROM tbl_5'
diff --git 
a/regression-test/suites/schema_change_p0/test_unique_schema_key_change_multi_tablets.groovy
 
b/regression-test/suites/schema_change_p0/test_unique_schema_key_change_multi_tablets.groovy
index fb88ae6a739..29288f4e8f8 100644
--- 
a/regression-test/suites/schema_change_p0/test_unique_schema_key_change_multi_tablets.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_unique_schema_key_change_multi_tablets.groovy
@@ -32,7 +32,6 @@ suite("test_unique_schema_key_change_multi_tablets","docker") 
{
      options.msConfigs.add('enable_retry_txn_conflict=false')
 
      def tbName = "test_unique_schema_key_change_multi_tablets"
-     sql """ DROP TABLE IF EXISTS ${tbName} """
      def initTable = " CREATE TABLE `${tbName}` (\n" +
              "  `source` int NOT NULL,\n" +
              "  `data_region` varchar(6) NULL,\n" +
@@ -372,6 +371,7 @@ 
suite("test_unique_schema_key_change_multi_tablets","docker") {
              "PROPERTIES (\"enable_unique_key_merge_on_write\" = \"true\");"
 
      docker(options) {
+          sql """ DROP TABLE IF EXISTS ${tbName} """
           sql initTable
           sql """  alter table ${tbName} add column p_order_ivr_version 
varchar(200) KEY NULL after p_open_ivr_version """
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to