This is an automated email from the ASF dual-hosted git repository.

kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
     new 64a98eddf9e [cases](index compaction)Add index compaction cases 
(#29393) (#29425)
64a98eddf9e is described below

commit 64a98eddf9ea4811d5622341f34dc354d11b3b0c
Author: qiye <[email protected]>
AuthorDate: Tue Jan 2 20:30:44 2024 +0800

    [cases](index compaction)Add index compaction cases (#29393) (#29425)
---
 .../test_index_compaction_dup_keys.out             | 123 ++++++++
 .../test_index_compaction_unique_keys.out          |  69 +++++
 ..._index_compaction_with_multi_index_segments.out | 105 +++++++
 .../test_index_compaction_dup_keys.groovy          | 225 ++++++++++++++
 .../test_index_compaction_unique_keys.groovy       | 229 ++++++++++++++
 ...dex_compaction_with_multi_index_segments.groovy | 343 +++++++++++++--------
 6 files changed, 964 insertions(+), 130 deletions(-)

diff --git 
a/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.out
 
b/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.out
new file mode 100644
index 00000000000..11f5a6ed34c
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.out
@@ -0,0 +1,123 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      andy    andy love apple 100
+1      andy    andy love apple 100
+2      andy    andy love apple 100
+2      andy    andy love apple 100
+3      andy    andy love apple 100
+3      andy    andy love apple 100
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
diff --git 
a/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.out
 
b/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.out
new file mode 100644
index 00000000000..3edad07916c
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.out
@@ -0,0 +1,69 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
+-- !sql --
+1      bason   bason hate pear 99
+2      bason   bason hate pear 99
+3      bason   bason hate pear 99
+
diff --git 
a/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.out
 
b/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.out
index 57ad3c1080c..4b086658735 100644
--- 
a/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.out
+++ 
b/regression-test/data/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.out
@@ -212,3 +212,108 @@
 2018-02-21T12:00       7       I'm using the builds
 2018-02-21T12:00       7       I'm using the builds
 
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+2018-02-21T19:00       8       I'm using the builds
+2018-02-21T20:00       9       I'm using the builds
+2018-02-21T21:00       10      I'm using the builds
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+2018-02-21T19:00       8       I'm using the builds
+2018-02-21T20:00       9       I'm using the builds
+2018-02-21T21:00       10      I'm using the builds
+
+-- !sql --
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+2018-02-21T19:00       8       I'm using the builds
+2018-02-21T20:00       9       I'm using the builds
+2018-02-21T21:00       10      I'm using the builds
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+2018-02-21T19:00       8       I'm using the builds
+2018-02-21T20:00       9       I'm using the builds
+2018-02-21T21:00       10      I'm using the builds
+
+-- !sql --
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+2018-02-21T19:00       8       I'm using the builds
+2018-02-21T20:00       9       I'm using the builds
+2018-02-21T21:00       10      I'm using the builds
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+2018-02-21T19:00       8       I'm using the builds
+2018-02-21T20:00       9       I'm using the builds
+2018-02-21T21:00       10      I'm using the builds
+
+-- !sql --
+
+-- !sql --
+2018-02-21T12:00       1       I'm using the builds
+2018-02-21T13:00       2       I'm using the builds
+2018-02-21T14:00       3       I'm using the builds
+2018-02-21T15:00       4       I'm using the builds
+2018-02-21T16:00       5       I'm using the builds
+2018-02-21T17:00       6       I'm using the builds
+2018-02-21T18:00       7       I'm using the builds
+
diff --git 
a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy
 
b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy
new file mode 100644
index 00000000000..e555d85456c
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy
@@ -0,0 +1,225 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_index_compaction_dup_keys", "p0") {
+    def tableName = "test_index_compaction_dup_keys"
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    boolean disableAutoCompaction = false
+  
+    def set_be_config = { key, value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
update_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), key, value)
+            logger.info("update config: code=" + code + ", out=" + out + ", 
err=" + err)
+        }
+    }
+
+    def trigger_full_compaction_on_tablets = { String[][] tablets ->
+        for (String[] tablet : tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            times = 1
+            
+            String compactionStatus;
+            do{
+                def (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                ++times
+                sleep(2000)
+                compactionStatus = parseJson(out.trim()).status.toLowerCase();
+            } while (compactionStatus!="success" && times<=10)
+
+
+            if (compactionStatus == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactionStatus)
+            }
+        }
+    }
+
+    def wait_full_compaction_done = { String[][] tablets ->
+        for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(1000)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+        }
+    }
+
+    def get_rowset_count = {String[][] tablets ->
+        int rowsetCount = 0
+        for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            def compactionStatusUrlIndex = 18
+            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
+            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
+            assertEquals(code, 0)
+            def tabletJson = parseJson(out.trim())
+            assert tabletJson.rowsets instanceof List
+            rowsetCount +=((List<String>) tabletJson.rowsets).size()
+        }
+        return rowsetCount
+    }
+
+    def check_config = { String key, String value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+            logger.info("Show config: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def configList = parseJson(out.trim())
+            assert configList instanceof List
+            for (Object ele in (List) configList) {
+                assert ele instanceof List<String>
+                if (((List<String>) ele)[0] == key) {
+                    assertEquals(value, ((List<String>) ele)[2])
+                }
+            }
+        }
+    }
+
+    boolean invertedIndexCompactionEnable = false
+    boolean has_update_be_config = false
+    try {
+        String backend_id;
+        backend_id = backendId_to_backendIP.keySet()[0]
+        def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+        
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "inverted_index_compaction_enable") 
{
+                invertedIndexCompactionEnable = 
Boolean.parseBoolean(((List<String>) ele)[2])
+                logger.info("inverted_index_compaction_enable: 
${((List<String>) ele)[2]}")
+            }
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+                logger.info("disable_auto_compaction: ${((List<String>) 
ele)[2]}")
+            }
+        }
+        set_be_config.call("inverted_index_compaction_enable", "true")
+        has_update_be_config = true
+        // check updated config
+        check_config.call("inverted_index_compaction_enable", "true");
+
+        sql """ DROP TABLE IF EXISTS ${tableName}; """
+        sql """
+            CREATE TABLE ${tableName} (
+                `id` int(11) NULL,
+                `name` varchar(255) NULL,
+                `hobbies` text NULL,
+                `score` int(11) NULL,
+                index index_name (name) using inverted,
+                index index_hobbies (hobbies) using inverted 
properties("parser"="english"),
+                index index_score (score) using inverted
+            ) ENGINE=OLAP
+            DUPLICATE KEY(`id`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true");
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "bason", "bason hate 
pear", 99); """
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+
+        int replicaNum = 1
+        // before full compaction, there are 7 rowsets.
+        int rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // trigger full compactions for all tablets in ${tableName}
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+        // insert more data and trigger full compaction again
+        sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "bason", "bason hate 
pear", 99); """
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // trigger full compactions for all tablets in ${tableName}
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+    } finally {
+        if (has_update_be_config) {
+            set_be_config.call("inverted_index_compaction_enable", 
invertedIndexCompactionEnable.toString())
+        }
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy
 
b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy
new file mode 100644
index 00000000000..b05059e9172
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy
@@ -0,0 +1,229 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_index_compaction_unique_keys", "p0") {
+    def tableName = "test_index_compaction_unique_keys"
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    boolean disableAutoCompaction = false
+  
+    def set_be_config = { key, value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
update_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), key, value)
+            logger.info("update config: code=" + code + ", out=" + out + ", 
err=" + err)
+        }
+    }
+
+    def trigger_full_compaction_on_tablets = { String[][] tablets ->
+        for (String[] tablet : tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            times = 1
+            
+            String compactionStatus;
+            do{
+                def (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                ++times
+                sleep(2000)
+                compactionStatus = parseJson(out.trim()).status.toLowerCase();
+            } while (compactionStatus!="success" && times<=10)
+
+
+            if (compactionStatus == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactionStatus)
+            }
+        }
+    }
+
+    def wait_full_compaction_done = { String[][] tablets ->
+        for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(1000)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+        }
+    }
+
+    def get_rowset_count = {String[][] tablets ->
+        int rowsetCount = 0
+        for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            def compactionStatusUrlIndex = 18
+            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
+            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
+            assertEquals(code, 0)
+            def tabletJson = parseJson(out.trim())
+            assert tabletJson.rowsets instanceof List
+            rowsetCount +=((List<String>) tabletJson.rowsets).size()
+        }
+        return rowsetCount
+    }
+
+    def check_config = { String key, String value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+            logger.info("Show config: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def configList = parseJson(out.trim())
+            assert configList instanceof List
+            for (Object ele in (List) configList) {
+                assert ele instanceof List<String>
+                if (((List<String>) ele)[0] == key) {
+                    assertEquals(value, ((List<String>) ele)[2])
+                }
+            }
+        }
+    }
+
+    boolean invertedIndexCompactionEnable = false
+    boolean has_update_be_config = false
+    try {
+        String backend_id;
+        backend_id = backendId_to_backendIP.keySet()[0]
+        def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+        
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "inverted_index_compaction_enable") 
{
+                invertedIndexCompactionEnable = 
Boolean.parseBoolean(((List<String>) ele)[2])
+                logger.info("inverted_index_compaction_enable: 
${((List<String>) ele)[2]}")
+            }
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+                logger.info("disable_auto_compaction: ${((List<String>) 
ele)[2]}")
+            }
+        }
+        set_be_config.call("inverted_index_compaction_enable", "true")
+        has_update_be_config = true
+        // check updated config
+        check_config.call("inverted_index_compaction_enable", "true");
+
+        sql """ DROP TABLE IF EXISTS ${tableName}; """
+        sql """
+            CREATE TABLE ${tableName} (
+                `id` int(11) NULL,
+                `name` varchar(255) NULL,
+                `hobbies` text NULL,
+                `score` int(11) NULL,
+                index index_name (name) using inverted,
+                index index_hobbies (hobbies) using inverted 
properties("parser"="english"),
+                index index_score (score) using inverted
+            ) ENGINE=OLAP
+            UNIQUE KEY(`id`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES ( 
+                "replication_num" = "1",
+                "disable_auto_compaction" = "true",
+                "enable_unique_key_merge_on_write" = "true"
+            );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "bason", "bason hate 
pear", 99); """
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus
+        String[][] tablets = sql """ show tablets from ${tableName}; """
+
+        int replicaNum = 1
+        // before full compaction, there are 7 rowsets.
+        int rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // trigger full compactions for all tablets in ${tableName}
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+        // insert more data and trigger full compaction again
+        sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (2, "bason", "bason hate 
pear", 99); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "andy", "andy love apple", 
100); """
+        sql """ INSERT INTO ${tableName} VALUES (3, "bason", "bason hate 
pear", 99); """
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 7 * replicaNum)
+
+        // trigger full compactions for all tablets in ${tableName}
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets);
+        assert (rowsetCount == 1 * replicaNum)
+
+        qt_sql """ select * from ${tableName} order by id, name, hobbies, 
score """
+        qt_sql """ select * from ${tableName} where name match "andy" order by 
id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where hobbies match "pear" order 
by id, name, hobbies, score """
+        qt_sql """ select * from ${tableName} where score < 100 order by id, 
name, hobbies, score """
+
+    } finally {
+        if (has_update_be_config) {
+            set_be_config.call("inverted_index_compaction_enable", 
invertedIndexCompactionEnable.toString())
+        }
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy
 
b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy
index 03ab16af5b5..af92e5c4657 100644
--- 
a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy
+++ 
b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy
@@ -19,30 +19,99 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
 
 suite("test_index_compaction_with_multi_index_segments", "p0") {
     def tableName = "test_index_compaction_with_multi_index_segments"
-  
-    def set_be_config = { key, value ->
-        def backendId_to_backendIP = [:]
-        def backendId_to_backendHttpPort = [:]
-        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    boolean disableAutoCompaction = false
 
+    def set_be_config = { key, value ->
         for (String backend_id: backendId_to_backendIP.keySet()) {
             def (code, out, err) = 
update_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), key, value)
             logger.info("update config: code=" + code + ", out=" + out + ", 
err=" + err)
         }
     }
 
-    boolean disableAutoCompaction = true
+    def trigger_full_compaction_on_tablets = { String[][] tablets ->
+        for (String[] tablet : tablets) {
+            String tablet_id = tablet[0]
+            backend_id = tablet[2]
+            times = 1
+
+            String compactionStatus;
+            do{
+                def (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
+                ++times
+                sleep(2000)
+                compactionStatus = parseJson(out.trim()).status.toLowerCase();
+            } while (compactionStatus!="success" && times<=10)
+
+
+            if (compactionStatus == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactionStatus)
+            }
+        }
+    }
+
+    def wait_full_compaction_done = { String[][] tablets ->
+        for (String[] tablet in tablets) {
+            boolean running = true
+            do {
+                Thread.sleep(1000)
+                String tablet_id = tablet[0]
+                backend_id = tablet[2]
+                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+        }
+    }
+
+    def get_rowset_count = {String[][] tablets ->
+        int rowsetCount = 0
+        for (String[] tablet in tablets) {
+            String tablet_id = tablet[0]
+            def compactionStatusUrlIndex = 18
+            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
+            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
+            assertEquals(code, 0)
+            def tabletJson = parseJson(out.trim())
+            assert tabletJson.rowsets instanceof List
+            rowsetCount +=((List<String>) tabletJson.rowsets).size()
+        }
+        return rowsetCount
+    }
+
+    def check_config = { String key, String value ->
+        for (String backend_id: backendId_to_backendIP.keySet()) {
+            def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
+            logger.info("Show config: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def configList = parseJson(out.trim())
+            assert configList instanceof List
+            for (Object ele in (List) configList) {
+                assert ele instanceof List<String>
+                if (((List<String>) ele)[0] == key) {
+                    assertEquals(value, ((List<String>) ele)[2])
+                }
+            }
+        }
+    }
+
     boolean invertedIndexCompactionEnable = false
     int invertedIndexMaxBufferedDocs = -1;
     boolean has_update_be_config = false
 
     try {
-        String backend_id;
-        def backendId_to_backendIP = [:]
-        def backendId_to_backendHttpPort = [:]
-        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
-
-        backend_id = backendId_to_backendIP.keySet()[0]
+        String backend_id = backendId_to_backendIP.keySet()[0]
         def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
         
         logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
@@ -65,6 +134,19 @@ suite("test_index_compaction_with_multi_index_segments", 
"p0") {
         set_be_config.call("inverted_index_max_buffered_docs", "5")
         has_update_be_config = true
 
+        // check config
+        check_config.call("inverted_index_compaction_enable", "true")
+        check_config.call("inverted_index_max_buffered_docs", "5")
+
+        /**
+        * test duplicated tables
+        * 1. insert 10 rows
+        * 2. insert another 10 rows
+        * 3. full compaction
+        * 4. insert 10 rows, again
+        * 5. full compaction
+        */
+        table_name = "test_index_compaction_with_multi_index_segments_dups"
         sql """ DROP TABLE IF EXISTS ${tableName}; """
         sql """
             CREATE TABLE ${tableName} (
@@ -114,74 +196,19 @@ suite("test_index_compaction_with_multi_index_segments", 
"p0") {
         
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus
         String[][] tablets = sql """ show tablets from ${tableName}; """
 
-        def replicaNum = 1
-        logger.info("get table replica num: " + replicaNum)
+        int replicaNum = 1
         // before full compaction, there are 3 rowsets.
-        int rowsetCount = 0
-        for (String[] tablet in tablets) {
-            String tablet_id = tablet[0]
-            def compactionStatusUrlIndex = 18
-            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
-            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
-            assertEquals(code, 0)
-            def tabletJson = parseJson(out.trim())
-            assert tabletJson.rowsets instanceof List
-            rowsetCount +=((List<String>) tabletJson.rowsets).size()
-        }
+        int rowsetCount = get_rowset_count.call(tablets)
         assert (rowsetCount == 3 * replicaNum)
 
         // trigger full compactions for all tablets in ${tableName}
-        for (String[] tablet in tablets) {
-            String tablet_id = tablet[0]
-            backend_id = tablet[2]
-            times = 1
-
-            do{
-                (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
-                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
-                ++times
-                sleep(2000)
-            } while (parseJson(out.trim()).status.toLowerCase()!="success" && 
times<=10)
-
-            def compactJson = parseJson(out.trim())
-            if (compactJson.status.toLowerCase() == "fail") {
-                assertEquals(disableAutoCompaction, false)
-                logger.info("Compaction was done automatically!")
-            }
-            if (disableAutoCompaction) {
-                assertEquals("success", compactJson.status.toLowerCase())
-            }
-        }
+        trigger_full_compaction_on_tablets.call(tablets)
 
         // wait for full compaction done
-        for (String[] tablet in tablets) {
-            boolean running = true
-            do {
-                Thread.sleep(1000)
-                String tablet_id = tablet[0]
-                backend_id = tablet[2]
-                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
-                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
-                assertEquals(code, 0)
-                def compactionStatus = parseJson(out.trim())
-                assertEquals("success", compactionStatus.status.toLowerCase())
-                running = compactionStatus.run_status
-            } while (running)
-        }
+        wait_full_compaction_done.call(tablets)
 
         // after full compaction, there is only 1 rowset.
-        
-        rowsetCount = 0
-        for (String[] tablet in tablets) {
-            String tablet_id = tablet[0]
-            def compactionStatusUrlIndex = 18
-            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
-            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
-            assertEquals(code, 0)
-            def tabletJson = parseJson(out.trim())
-            assert tabletJson.rowsets instanceof List
-            rowsetCount +=((List<String>) tabletJson.rowsets).size()
-        }
+        rowsetCount = get_rowset_count.call(tablets)
         assert (rowsetCount == 1 * replicaNum)
 
         qt_sql """ select * from ${tableName} order by file_time, comment_id, 
body """
@@ -203,73 +230,129 @@ suite("test_index_compaction_with_multi_index_segments", 
"p0") {
 
         tablets = sql """ show tablets from ${tableName}; """
 
-        logger.info("get table replica num: " + replicaNum)
         // before full compaction, there are 2 rowsets.
-        rowsetCount = 0
-        for (String[] tablet in tablets) {
-            String tablet_id = tablet[0]
-            def compactionStatusUrlIndex = 18
-            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
-            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
-            assertEquals(code, 0)
-            def tabletJson = parseJson(out.trim())
-            assert tabletJson.rowsets instanceof List
-            rowsetCount +=((List<String>) tabletJson.rowsets).size()
-        }
+        rowsetCount = get_rowset_count.call(tablets)
         assert (rowsetCount == 2 * replicaNum)
 
         // trigger full compactions for all tablets in ${tableName}
-        for (String[] tablet in tablets) {
-            String tablet_id = tablet[0]
-            backend_id = tablet[2]
-            times = 1
+        trigger_full_compaction_on_tablets.call(tablets)
 
-            do{
-                (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
-                logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
-                ++times
-                sleep(2000)
-            } while (parseJson(out.trim()).status.toLowerCase()!="success" && 
times<=10)
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
 
-            def compactJson = parseJson(out.trim())
-            if (compactJson.status.toLowerCase() == "fail") {
-                assertEquals(disableAutoCompaction, false)
-                logger.info("Compaction was done automatically!")
-            }
-            if (disableAutoCompaction) {
-                assertEquals("success", compactJson.status.toLowerCase())
-            }
-        }
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets)
+        assert (rowsetCount == 1 * replicaNum)
+
+        qt_sql """ select * from ${tableName} order by file_time, comment_id, 
body """
+        qt_sql """ select * from ${tableName} where body match "using" order 
by file_time, comment_id, body """
+        qt_sql """ select * from ${tableName} where body match "the" order by 
file_time, comment_id, body """
+        qt_sql """ select * from ${tableName} where comment_id < 8 order by 
file_time, comment_id, body """
+
+        /**
+        * test unique tables
+        * 1. insert 10 rows
+        * 2. insert another 10 rows
+        * 3. full compaction
+        * 4. insert 10 rows, again
+        * 5. full compaction
+        */
+        table_name = "test_index_compaction_with_multi_index_segments_unique"
+        sql """ DROP TABLE IF EXISTS ${tableName}; """
+        sql """
+            CREATE TABLE ${tableName} (
+                `file_time` DATETIME NOT NULL,
+                `comment_id` int(11)  NULL,
+                `body` TEXT NULL DEFAULT "",
+                INDEX idx_comment_id (`comment_id`) USING INVERTED COMMENT 
'''',
+                INDEX idx_body (`body`) USING INVERTED PROPERTIES("parser" = 
"unicode") COMMENT ''''
+            ) ENGINE=OLAP
+            UNIQUE KEY(`file_time`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`file_time`) BUCKETS 1
+            PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "disable_auto_compaction" = "true",
+            "enable_unique_key_merge_on_write" = "true"
+            );
+        """
+
+        // insert 10 rows
+        sql """ INSERT INTO ${tableName} VALUES ("2018-02-21 12:00:00", 1, 
"I\'m using the builds"),
+                                                ("2018-02-21 13:00:00", 2, 
"I\'m using the builds"),
+                                                ("2018-02-21 14:00:00", 3, 
"I\'m using the builds"),
+                                                ("2018-02-21 15:00:00", 4, 
"I\'m using the builds"),
+                                                ("2018-02-21 16:00:00", 5, 
"I\'m using the builds"),
+                                                ("2018-02-21 17:00:00", 6, 
"I\'m using the builds"),
+                                                ("2018-02-21 18:00:00", 7, 
"I\'m using the builds"),
+                                                ("2018-02-21 19:00:00", 8, 
"I\'m using the builds"),
+                                                ("2018-02-21 20:00:00", 9, 
"I\'m using the builds"),
+                                                ("2018-02-21 21:00:00", 10, 
"I\'m using the builds"); """
+        // insert another 10 rows
+        sql """ INSERT INTO ${tableName} VALUES ("2018-02-21 12:00:00", 1, 
"I\'m using the builds"),
+                                                ("2018-02-21 13:00:00", 2, 
"I\'m using the builds"),
+                                                ("2018-02-21 14:00:00", 3, 
"I\'m using the builds"),
+                                                ("2018-02-21 15:00:00", 4, 
"I\'m using the builds"),
+                                                ("2018-02-21 16:00:00", 5, 
"I\'m using the builds"),
+                                                ("2018-02-21 17:00:00", 6, 
"I\'m using the builds"),
+                                                ("2018-02-21 18:00:00", 7, 
"I\'m using the builds"),
+                                                ("2018-02-21 19:00:00", 8, 
"I\'m using the builds"),
+                                                ("2018-02-21 20:00:00", 9, 
"I\'m using the builds"),
+                                                ("2018-02-21 21:00:00", 10, 
"I\'m using the builds"); """
+
+        qt_sql """ select * from ${tableName} order by file_time, comment_id, 
body """
+        qt_sql """ select * from ${tableName} where body match "using" order 
by file_time, comment_id, body """
+        qt_sql """ select * from ${tableName} where body match "the" order by 
file_time, comment_id, body """
+        qt_sql """ select * from ${tableName} where comment_id < 8 order by 
file_time, comment_id, body """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus
+        tablets = sql """ show tablets from ${tableName}; """
+
+        // before full compaction, there are 3 rowsets.
+        rowsetCount = get_rowset_count.call(tablets)
+        assert (rowsetCount == 3 * replicaNum)
+
+        // trigger full compactions for all tablets in ${tableName}
+        trigger_full_compaction_on_tablets.call(tablets)
 
         // wait for full compaction done
-        for (String[] tablet in tablets) {
-            boolean running = true
-            do {
-                Thread.sleep(1000)
-                String tablet_id = tablet[0]
-                backend_id = tablet[2]
-                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
-                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
-                assertEquals(code, 0)
-                def compactionStatus = parseJson(out.trim())
-                assertEquals("success", compactionStatus.status.toLowerCase())
-                running = compactionStatus.run_status
-            } while (running)
-        }
+        wait_full_compaction_done.call(tablets)
 
         // after full compaction, there is only 1 rowset.
-        
-        rowsetCount = 0
-        for (String[] tablet in tablets) {
-            String tablet_id = tablet[0]
-            def compactionStatusUrlIndex = 18
-            (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex])
-            logger.info("Show tablets status: code=" + code + ", out=" + out + 
", err=" + err)
-            assertEquals(code, 0)
-            def tabletJson = parseJson(out.trim())
-            assert tabletJson.rowsets instanceof List
-            rowsetCount +=((List<String>) tabletJson.rowsets).size()
-        }
+        rowsetCount = get_rowset_count.call(tablets)
+        assert (rowsetCount == 1 * replicaNum)
+
+        qt_sql """ select * from ${tableName} order by file_time, comment_id, 
body """
+        qt_sql """ select * from ${tableName} where body match "using" order 
by file_time, comment_id, body """
+        qt_sql """ select * from ${tableName} where body match "the" order by 
file_time, comment_id, body """
+        qt_sql """ select * from ${tableName} where comment_id < 8 order by 
file_time, comment_id, body """
+
+        // insert 10 rows, again
+        sql """ INSERT INTO ${tableName} VALUES ("2018-02-21 12:00:00", 1, 
"I\'m using the builds"),
+                                                ("2018-02-21 13:00:00", 2, 
"I\'m using the builds"),
+                                                ("2018-02-21 14:00:00", 3, 
"I\'m using the builds"),
+                                                ("2018-02-21 15:00:00", 4, 
"I\'m using the builds"),
+                                                ("2018-02-21 16:00:00", 5, 
"I\'m using the builds"),
+                                                ("2018-02-21 17:00:00", 6, 
"I\'m using the builds"),
+                                                ("2018-02-21 18:00:00", 7, 
"I\'m using the builds"),
+                                                ("2018-02-21 19:00:00", 8, 
"I\'m using the builds"),
+                                                ("2018-02-21 20:00:00", 9, 
"I\'m using the builds"),
+                                                ("2018-02-21 21:00:00", 10, 
"I\'m using the builds"); """
+
+        tablets = sql """ show tablets from ${tableName}; """
+
+        // before full compaction, there are 2 rowsets.
+        rowsetCount = get_rowset_count.call(tablets)
+        assert (rowsetCount == 2 * replicaNum)
+
+        // trigger full compactions for all tablets in ${tableName}
+        trigger_full_compaction_on_tablets.call(tablets)
+
+        // wait for full compaction done
+        wait_full_compaction_done.call(tablets)
+
+        // after full compaction, there is only 1 rowset.
+        rowsetCount = get_rowset_count.call(tablets)
         assert (rowsetCount == 1 * replicaNum)
 
         qt_sql """ select * from ${tableName} order by file_time, comment_id, 
body """
@@ -283,4 +366,4 @@ suite("test_index_compaction_with_multi_index_segments", 
"p0") {
             set_be_config.call("inverted_index_max_buffered_docs", 
invertedIndexMaxBufferedDocs.toString())
         }
     }
-}
\ No newline at end of file
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to