This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new f21d7e3833e [test](inverted index)Add cases for inverted index format 
v2 (#38132)(#38443) (#38222)
f21d7e3833e is described below

commit f21d7e3833e2da62392c7f7610a1957c21405775
Author: qiye <[email protected]>
AuthorDate: Fri Aug 2 12:04:26 2024 +0800

    [test](inverted index)Add cases for inverted index format v2 
(#38132)(#38443) (#38222)
    
    ## Proposed changes
    
    backport #38132 #38443
---
 .../analysis/ModifyTablePropertiesClause.java      |   4 +
 .../test_add_build_index_with_format_v2.out        |   6 +
 .../test_create_table_with_format_v2.out           |   6 +
 .../test_cumulative_compaction_with_format_v2.out  |   9 +
 .../test_drop_column_index_with_format_v2.out      |   6 +
 .../test_drop_column_with_format_v2.out            |   6 +
 .../test_drop_index_with_format_v2.out             |   6 +
 .../test_mor_table_with_format_v2.out              |   7 +
 .../test_mow_table_with_format_v2.out              |   7 +
 .../test_rename_column_with_format_v2.out          |  11 +
 ...st_single_replica_compaction_with_format_v2.out |   9 +
 .../plugins/plugin_curl_requester.groovy           |  62 +++--
 .../test_add_build_index_with_format_v2.groovy     | 143 ++++++++++++
 .../test_create_table_with_format_v2.groovy        |  61 +++++
 ...est_cumulative_compaction_with_format_v2.groovy | 254 ++++++++++++++++++++
 .../test_drop_column_with_format_v2.groovy         | 125 ++++++++++
 .../test_drop_index_with_format_v2.groovy          |  96 ++++++++
 .../test_index_change_format.groovy                |  51 ++++
 .../test_mor_table_with_format_v2.groovy           | 259 +++++++++++++++++++++
 .../test_mow_table_with_format_v2.groovy           | 252 ++++++++++++++++++++
 .../test_recover_with_format_v2.groovy             |  84 +++++++
 .../test_rename_column_with_format_v2.groovy       |  94 ++++++++
 ...single_replica_compaction_with_format_v2.groovy | 254 ++++++++++++++++++++
 23 files changed, 1799 insertions(+), 13 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
index f6e7605dcbc..85333a95854 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
@@ -258,6 +258,10 @@ public class ModifyTablePropertiesClause extends 
AlterTableClause {
             }
             this.needTableStable = false;
             this.opType = AlterOpType.MODIFY_TABLE_PROPERTY_SYNC;
+        } else if 
(properties.containsKey(PropertyAnalyzer.PROPERTIES_INVERTED_INDEX_STORAGE_FORMAT))
 {
+            throw new AnalysisException(
+                "Property "
+                + PropertyAnalyzer.PROPERTIES_INVERTED_INDEX_STORAGE_FORMAT + 
" is not allowed to change");
         } else if 
(properties.containsKey(PropertyAnalyzer.PROPERTIES_ENABLE_SINGLE_REPLICA_COMPACTION))
 {
             if 
(!properties.get(PropertyAnalyzer.PROPERTIES_ENABLE_SINGLE_REPLICA_COMPACTION).equalsIgnoreCase("true")
                     && !properties.get(PropertyAnalyzer
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.out
new file mode 100644
index 00000000000..d00ff307840
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.out
@@ -0,0 +1,9 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
+-- !select_default2 --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_index_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_index_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_index_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.out
new file mode 100644
index 00000000000..d5747528271
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.out
@@ -0,0 +1,7 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+4      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Shenzhen        10      1       \N      \N      
\N      \N      2020-01-05T00:00        1       34      20
+
+-- !select_default2 --
+4      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Shenzhen        10      1       \N      \N      
\N      \N      2020-01-05T00:00        1       34      20
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.out
new file mode 100644
index 00000000000..8958d7f42f6
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.out
@@ -0,0 +1,7 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
+-- !select_default2 --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.out
new file mode 100644
index 00000000000..c1c19e9ab3c
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.out
@@ -0,0 +1,11 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.out
new file mode 100644
index 00000000000..d00ff307840
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.out
@@ -0,0 +1,9 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
+-- !select_default2 --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
diff --git a/regression-test/plugins/plugin_curl_requester.groovy 
b/regression-test/plugins/plugin_curl_requester.groovy
index 543eb1feb6f..0fc102960cc 100644
--- a/regression-test/plugins/plugin_curl_requester.groovy
+++ b/regression-test/plugins/plugin_curl_requester.groovy
@@ -35,7 +35,7 @@ Suite.metaClass.http_client = { String method, String url /* 
param */ ->
     if (!url || !(url =~ /^https?:\/\/.+/)) {
         throw new Exception("Invalid url: ${url}")
     }
-    
+
     Integer timeout = 60 // seconds
     Integer maxRetries = 10
     Integer retryCount = 0
@@ -69,16 +69,17 @@ Suite.metaClass.http_client = { String method, String url 
/* param */ ->
             try {
                 CloseableHttpResponse response = httpClient.execute(request)
                 try {
-                    int statusCode = response.getStatusLine().getStatusCode()
-                    String responseBody = 
EntityUtils.toString(response.getEntity())
-                    
-                    if (statusCode >= 200 && statusCode < 300) {
+                    code = response.getStatusLine().getStatusCode()
+                    out = EntityUtils.toString(response.getEntity())
+
+                    if (code >= 200 && code < 300) {
                         code = 0 // to be compatible with the old curl function
-                        out = responseBody
                         err = ""
                         return [code, out, err]
+                    } else if (code == 500) {
+                        return [code, out, "Internal Server Error"]
                     } else {
-                        logger.warn("HTTP request failed with status code 
${statusCode}, retrying (${++retryCount}/${maxRetries})")
+                        logger.warn("HTTP request failed with status code 
${code}, response ${out}, retrying (${++retryCount}/${maxRetries})")
                     }
                 } finally {
                     response.close()
@@ -90,19 +91,15 @@ Suite.metaClass.http_client = { String method, String url 
/* param */ ->
                 logger.warn("Read timed out, retrying 
(${++retryCount}/${maxRetries}): ${e.message}")
             } catch (Exception e) {
                 logger.error("Error executing HTTP request: ${e.message}")
-                code = -1
-                out = ""
                 err = e.message
                 return [code, out, err]
             }
 
             sleep(sleepTime)
-            sleepTime = Math.min(sleepTime * 2, 60000) 
+            sleepTime = Math.min(sleepTime * 2, 60000)
         }
 
         logger.error("HTTP request failed after ${maxRetries} attempts")
-        code = -1
-        out = ""
         err = "Failed after ${maxRetries} attempts"
         return [code, out, err]
     } finally {
@@ -112,7 +109,7 @@ Suite.metaClass.http_client = { String method, String url 
/* param */ ->
 
 logger.info("Added 'http_client' function to Suite")
 
-Suite.metaClass.curl = { String method, String url /* param */-> 
+Suite.metaClass.curl = { String method, String url /* param */->
     Suite suite = delegate as Suite
     if (method != "GET" && method != "POST")
     {
@@ -255,3 +252,42 @@ Suite.metaClass.be_report_task = { String ip, int port ->
 }
 
 logger.info("Added 'be_report_task' function to Suite")
+
+// check nested index file api
+Suite.metaClass.check_nested_index_file = { ip, port, tablet_id, 
expected_rowsets_count, expected_indices_count, format ->
+    def (code, out, err) = http_client("GET", 
String.format("http://%s:%s/api/show_nested_index_file?tablet_id=%s";, ip, port, 
tablet_id))
+    logger.info("Run show_nested_index_file_on_tablet: code=" + code + ", 
out=" + out + ", err=" + err)
+    if (code == 500) {
+        assertEquals("E-6003", parseJson(out.trim()).status)
+        assertTrue(parseJson(out.trim()).msg.contains("not found"))
+        return
+    }
+    assertTrue(code == 0)
+    assertEquals(tablet_id, parseJson(out.trim()).tablet_id.toString())
+    def rowsets_count = parseJson(out.trim()).rowsets.size();
+    assertEquals(expected_rowsets_count, rowsets_count)
+    def index_files_count = 0
+    def segment_files_count = 0
+    for (def rowset in parseJson(out.trim()).rowsets) {
+        assertEquals(format, rowset.index_storage_format)
+        for (int i = 0; i < rowset.segments.size(); i++) {
+            def segment = rowset.segments[i]
+            assertEquals(i, segment.segment_id)
+            def indices_count = segment.indices.size()
+            assertEquals(expected_indices_count, indices_count)
+            if (format == "V1") {
+                index_files_count += indices_count
+            } else {
+                index_files_count++
+            }
+        }
+        segment_files_count += rowset.segments.size()
+    }
+    if (format == "V1") {
+        assertEquals(index_files_count, segment_files_count * 
expected_indices_count)
+    } else {
+        assertEquals(index_files_count, segment_files_count)
+    }
+}
+
+logger.info("Added 'check_nested_index_file' function to Suite")
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.groovy
new file mode 100644
index 00000000000..dabb3534e32
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.groovy
@@ -0,0 +1,143 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_add_build_index_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_add_build_index_with_format_v2"
+
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def wait_for_build_index_on_partition_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW BUILD INDEX WHERE TableName = 
"${table_name}";"""
+            def expected_finished_num = alter_res.size();
+            def finished_num = 0;
+            for (int i = 0; i < expected_finished_num; i++) {
+                logger.info(table_name + " build index job state: " + 
alter_res[i][7] + i)
+                if (alter_res[i][7] == "FINISHED") {
+                    ++finished_num;
+                }
+            }
+            if (finished_num == expected_finished_num) {
+                logger.info(table_name + " all build index jobs finished, 
detail: " + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, 
"wait_for_latest_build_index_on_partition_finish timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    // add index
+    sql """
+        ALTER TABLE ${tableName}
+        ADD INDEX idx_name (name) using inverted;
+    """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+    
+    sql """
+        ALTER TABLE ${tableName}
+        ADD INDEX idx_score (score) using inverted;
+    """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    // show index after add index
+    def show_result = sql_return_maparray "show index from ${tableName}"
+    logger.info("show index from " + tableName + " result: " + show_result)
+    assertEquals(show_result[0].Key_name, "idx_name")
+    assertEquals(show_result[1].Key_name, "idx_score")
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+
+    // cloud mode is directly schema change, local mode is light schema change.
+    // cloud mode is 12, local mode is 6
+    if (isCloudMode()) {
+        check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+        qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+        return
+    } else {
+        check_nested_index_file(ip, port, tablet_id, 7, 0, "V2")
+    }
+
+    // build index 
+    sql """
+        BUILD INDEX idx_name ON ${tableName};
+    """
+    wait_for_build_index_on_partition_finish(tableName, timeout)
+
+    check_nested_index_file(ip, port, tablet_id, 7, 1, "V2")
+
+    // build index 
+    sql """
+        BUILD INDEX idx_score ON ${tableName};
+    """
+    wait_for_build_index_on_partition_finish(tableName, timeout)
+
+    check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.groovy
new file mode 100644
index 00000000000..cf31e2ca80f
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.groovy
@@ -0,0 +1,61 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_create_table_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_create_table_with_format_v2"
+
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy
new file mode 100644
index 00000000000..c1f9ac2d7ce
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy
@@ -0,0 +1,254 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_cumulative_compaction_with_format_v2", "inverted_index_format_v2") 
{
+    def tableName = "test_cumulative_compaction_with_format_v2"
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            DUPLICATE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", 
"inverted_index_storage_format" = "V2", "disable_auto_compaction" = "true" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            check_nested_index_file(ip, port, tablet_id, 9, 3, "V2")
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            check_nested_index_file(ip, port, tablet_id, 2, 3, "V2")
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum != 1 && replicaNum != 3)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.groovy
new file mode 100644
index 00000000000..1a75137e1e4
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.groovy
@@ -0,0 +1,125 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_drop_column_with_format_v2", "inverted_index_format_v2"){
+    // cloud mode is light schema change, tablet meta will not be updated 
after alter table
+    // so we can't get the latest tablet meta
+    if (isCloudMode()) {
+        return
+    }
+    def tableName = "test_drop_column_with_format_v2"
+
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def wait_for_build_index_on_partition_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW BUILD INDEX WHERE TableName = 
"${table_name}";"""
+            def expected_finished_num = alter_res.size();
+            def finished_num = 0;
+            for (int i = 0; i < expected_finished_num; i++) {
+                logger.info(table_name + " build index job state: " + 
alter_res[i][7] + i)
+                if (alter_res[i][7] == "FINISHED") {
+                    ++finished_num;
+                }
+            }
+            if (finished_num == expected_finished_num) {
+                logger.info(table_name + " all build index jobs finished, 
detail: " + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, 
"wait_for_latest_build_index_on_partition_finish timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true",
+            "light_schema_change" = "false"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+
+    // drop column
+    sql """ ALTER TABLE ${tableName} DROP COLUMN score; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    tablet_id = tablets[0].TabletId
+    // when drop column, the index files will not be deleted, so the index 
files count is still 2
+    check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+
+    sql """ ALTER TABLE ${tableName} DROP COLUMN name; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    tablet_id = tablets[0].TabletId
+    // when drop column, the index files will not be deleted, so the index 
files count is still 2
+    // when all index columns are dropped, the index files will be deleted by 
GC later
+    check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.groovy
new file mode 100644
index 00000000000..12877099f38
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.groovy
@@ -0,0 +1,96 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_drop_index_with_format_v2", "inverted_index_format_v2"){
+    // cloud mode is light schema change, tablet meta will not be updated 
after alter table
+    // so we can't get the latest tablet meta
+    if (isCloudMode()) {
+        return
+    }
+    def tableName = "test_drop_index_with_format_v2"
+
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    check_nested_index_file(ip, port, tablet_id, 7, 2, "V2")
+
+    // drop index 
+    sql """ DROP INDEX index_name on ${tableName}; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+    check_nested_index_file(ip, port, tablet_id, 7, 1, "V2")
+
+    // drop index 
+    sql """ DROP INDEX index_score on ${tableName}; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+    check_nested_index_file(ip, port, tablet_id, 7, 0, "V2")
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_index_change_format.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_index_change_format.groovy
new file mode 100644
index 00000000000..8a5179b0010
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_index_change_format.groovy
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_index_change_format", "inverted_index_format_v2"){
+    def createAndAlterTable = { tableName, initialFormat, newFormat -> 
+        sql "DROP TABLE IF EXISTS ${tableName}"
+        
+        sql """
+            CREATE TABLE ${tableName} (
+                `id` int(11) NULL,
+                `name` varchar(255) NULL,
+                `score` int(11) NULL,
+                index index_name (name) using inverted,
+                index index_score (score) using inverted
+            ) ENGINE=OLAP
+            DUPLICATE KEY(`id`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1",
+                "inverted_index_storage_format" = "${initialFormat}",
+                "disable_auto_compaction" = "true"
+            );
+        """
+        try {
+            sql "ALTER TABLE ${tableName} SET ('inverted_index_storage_format' 
= '${newFormat}')"
+        } catch (Exception e) {
+            log.info(e.getMessage())
+            assertTrue(e.getMessage().contains('Property 
inverted_index_storage_format is not allowed to change'))
+        }
+    }
+
+    createAndAlterTable("test_index_change_format_v1", "V1", "V2")
+    createAndAlterTable("test_index_change_format_v2", "V2", "V1")
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy
new file mode 100644
index 00000000000..639f73349cd
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy
@@ -0,0 +1,259 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_mor_table_with_format_v2", "inverted_index_format_v2") {
+    def tableName = "test_mor_table_with_format_v2"
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true", "inverted_index_storage_format" = "V2", 
"enable_unique_key_merge_on_write" = "false" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        sql """ DELETE FROM ${tableName} WHERE user_id = 3 """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Shenzhen' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            check_nested_index_file(ip, port, tablet_id, 10, 3, "V2")
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=full")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            // after compaction, there are 1 rwoset in local mode and 2 
rowsets in cloud mode.
+            if (isCloudMode()) {
+                check_nested_index_file(ip, port, tablet_id, 2, 3, "V2")
+            } else {
+                check_nested_index_file(ip, port, tablet_id, 1, 3, "V2")
+            }
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum != 1 && replicaNum != 3)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Shenzhen' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy
new file mode 100644
index 00000000000..fcb1a15f3d2
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy
@@ -0,0 +1,252 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_mow_table_with_format_v2", "inverted_index_format_v2") {
+    def tableName = "test_mow_table_with_format_v2"
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true", "inverted_index_storage_format" = "V2", 
"enable_unique_key_merge_on_write" = "true" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            check_nested_index_file(ip, port, tablet_id, 9, 3, "V2")
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            check_nested_index_file(ip, port, tablet_id, 2, 3, "V2")
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum != 1 && replicaNum != 3)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_recover_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_recover_with_format_v2.groovy
new file mode 100644
index 00000000000..8de145d7921
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_recover_with_format_v2.groovy
@@ -0,0 +1,84 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_recover_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_recover_with_format_v2"
+
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def check_index_file = { ->
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+        for (int i = 0; i < tablets.size(); i++) {
+            String tablet_id = tablets[i].TabletId
+            String backend_id = tablets[i].BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            check_nested_index_file(ip, port, tablet_id, 3, 2, "V2")
+        }
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        PARTITION BY LIST(`id`)
+        (
+            PARTITION p1 VALUES IN ("1"),
+            PARTITION p2 VALUES IN ("2"),
+            PARTITION p3 VALUES IN ("3")
+        )
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    // select to sync tablet meta in cloud mode
+    sql """ SELECT * FROM ${tableName}; """
+
+    check_index_file()
+
+    // drop table and recover
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    sql "RECOVER TABLE ${tableName}"
+    check_index_file()
+
+    // drop partition and recover
+    sql "ALTER TABLE ${tableName} DROP PARTITION p1"
+    sql "RECOVER PARTITION p1 from ${tableName}"
+    check_index_file()
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.groovy
new file mode 100644
index 00000000000..38b03510a0c
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.groovy
@@ -0,0 +1,94 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_rename_column_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_rename_column_with_format_v2"
+
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    check_nested_index_file(ip, port, tablet_id, 7, 1, "V2")
+
+    // rename column
+    sql """ ALTER TABLE ${tableName} RENAME COLUMN name name_new; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    qt_sql "SELECT * FROM $tableName WHERE name_new match 'andy' order by id, 
name_new, score;"
+
+    check_nested_index_file(ip, port, tablet_id, 7, 1, "V2")
+
+    // drop column
+    sql """ ALTER TABLE ${tableName} DROP COLUMN name_new; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+    // when drop column, the index files will not be deleted, so the index 
files count is still 1
+    // when all index columns are dropped, the index files will be deleted by 
GC later
+    check_nested_index_file(ip, port, tablet_id, 7, 1, "V2")
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy
new file mode 100644
index 00000000000..fce14b8796b
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy
@@ -0,0 +1,254 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_single_replica_compaction_with_format_v2", 
"inverted_index_format_v2") {
+    def tableName = "test_single_replica_compaction_with_format_v2"
+
+    def backends = sql_return_maparray('show backends')
+    // if backens is less than 2, skip this case
+    if (backends.size() < 2) {
+        return
+    }
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            DUPLICATE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "2", "disable_auto_compaction" = 
"true", "inverted_index_storage_format" = "V2", 
"enable_single_replica_compaction" = "true" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            check_nested_index_file(ip, port, tablet_id, 9, 3, "V2")
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            check_nested_index_file(ip, port, tablet_id, 2, 3, "V2")
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum < 1)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to