This is an automated email from the ASF dual-hosted git repository.

morrysnow pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new ed3fc95b312 branch-3.1: [feature](struct) support struct schema-change 
behavior for add sub-columns #47096 (#52224)
ed3fc95b312 is described below

commit ed3fc95b31236e8e5e2fcf19d6418e0f819d90be
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Tue Jun 24 23:10:02 2025 +0800

    branch-3.1: [feature](struct) support struct schema-change behavior for add 
sub-columns #47096 (#52224)
    
    Cherry-picked from #47096
    
    Co-authored-by: amory <[email protected]>
---
 be/src/olap/rowset/segment_v2/column_reader.cpp    |  55 ++-
 be/src/olap/rowset/segment_v2/column_reader.h      |   8 +-
 .../rowset/segment_v2/hierarchical_data_reader.cpp |   4 +-
 be/src/olap/rowset/segment_v2/segment.cpp          |  18 +-
 be/src/olap/rowset/segment_v2/segment.h            |   3 +
 .../java/org/apache/doris/catalog/ColumnType.java  |  55 ++-
 .../doris/alter/SchemaChangeHandlerTest.java       | 225 ++++++++++-
 .../data/schema_change_p0/test_modify_struct.out   | Bin 0 -> 95375 bytes
 .../schema_change_p0/test_modify_struct.groovy     | 426 +++++++++++++++++++++
 9 files changed, 757 insertions(+), 37 deletions(-)

diff --git a/be/src/olap/rowset/segment_v2/column_reader.cpp 
b/be/src/olap/rowset/segment_v2/column_reader.cpp
index 82db0dd4aef..99ab5829c79 100644
--- a/be/src/olap/rowset/segment_v2/column_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/column_reader.cpp
@@ -171,6 +171,7 @@ Status ColumnReader::create_struct(const 
ColumnReaderOptions& opts, const Column
     std::unique_ptr<ColumnReader> struct_reader(
             new ColumnReader(opts, meta, num_rows, file_reader));
     struct_reader->_sub_readers.reserve(meta.children_columns_size());
+    // now we support struct column can add the children columns according to 
the schema-change behavior
     for (size_t i = 0; i < meta.children_columns_size(); i++) {
         std::unique_ptr<ColumnReader> sub_reader;
         RETURN_IF_ERROR(ColumnReader::create(opts, meta.children_columns(i),
@@ -701,7 +702,7 @@ Status ColumnReader::seek_at_or_before(ordinal_t ordinal, 
OrdinalPageIndexIterat
     return Status::OK();
 }
 
-Status ColumnReader::new_iterator(ColumnIterator** iterator) {
+Status ColumnReader::new_iterator(ColumnIterator** iterator, const 
TabletColumn* tablet_column) {
     if (is_empty()) {
         *iterator = new EmptyFileColumnIterator();
         return Status::OK();
@@ -716,13 +717,13 @@ Status ColumnReader::new_iterator(ColumnIterator** 
iterator) {
             return new_agg_state_iterator(iterator);
         }
         case FieldType::OLAP_FIELD_TYPE_STRUCT: {
-            return new_struct_iterator(iterator);
+            return new_struct_iterator(iterator, tablet_column);
         }
         case FieldType::OLAP_FIELD_TYPE_ARRAY: {
-            return new_array_iterator(iterator);
+            return new_array_iterator(iterator, tablet_column);
         }
         case FieldType::OLAP_FIELD_TYPE_MAP: {
-            return new_map_iterator(iterator);
+            return new_map_iterator(iterator, tablet_column);
         }
         case FieldType::OLAP_FIELD_TYPE_VARIANT: {
             *iterator = new VariantRootColumnIterator(new 
FileColumnIterator(this));
@@ -740,55 +741,77 @@ Status 
ColumnReader::new_agg_state_iterator(ColumnIterator** iterator) {
     return Status::OK();
 }
 
-Status ColumnReader::new_array_iterator(ColumnIterator** iterator) {
+Status ColumnReader::new_array_iterator(ColumnIterator** iterator,
+                                        const TabletColumn* tablet_column) {
     ColumnIterator* item_iterator = nullptr;
-    RETURN_IF_ERROR(_sub_readers[0]->new_iterator(&item_iterator));
+    RETURN_IF_ERROR(_sub_readers[0]->new_iterator(
+            &item_iterator, tablet_column && 
tablet_column->get_subtype_count() > 0
+                                    ? &tablet_column->get_sub_column(0)
+                                    : nullptr));
 
     ColumnIterator* offset_iterator = nullptr;
-    RETURN_IF_ERROR(_sub_readers[1]->new_iterator(&offset_iterator));
+    RETURN_IF_ERROR(_sub_readers[1]->new_iterator(&offset_iterator, nullptr));
     auto* ofcIter =
             new 
OffsetFileColumnIterator(reinterpret_cast<FileColumnIterator*>(offset_iterator));
 
     ColumnIterator* null_iterator = nullptr;
     if (is_nullable()) {
-        RETURN_IF_ERROR(_sub_readers[2]->new_iterator(&null_iterator));
+        RETURN_IF_ERROR(_sub_readers[2]->new_iterator(&null_iterator, 
nullptr));
     }
     *iterator = new ArrayFileColumnIterator(this, ofcIter, item_iterator, 
null_iterator);
     return Status::OK();
 }
 
-Status ColumnReader::new_map_iterator(ColumnIterator** iterator) {
+Status ColumnReader::new_map_iterator(ColumnIterator** iterator,
+                                      const TabletColumn* tablet_column) {
     ColumnIterator* key_iterator = nullptr;
-    RETURN_IF_ERROR(_sub_readers[0]->new_iterator(&key_iterator));
+    RETURN_IF_ERROR(_sub_readers[0]->new_iterator(
+            &key_iterator, tablet_column && tablet_column->get_subtype_count() 
> 1
+                                   ? &tablet_column->get_sub_column(0)
+                                   : nullptr));
     ColumnIterator* val_iterator = nullptr;
-    RETURN_IF_ERROR(_sub_readers[1]->new_iterator(&val_iterator));
+    RETURN_IF_ERROR(_sub_readers[1]->new_iterator(
+            &val_iterator, tablet_column && tablet_column->get_subtype_count() 
> 1
+                                   ? &tablet_column->get_sub_column(1)
+                                   : nullptr));
     ColumnIterator* offsets_iterator = nullptr;
-    RETURN_IF_ERROR(_sub_readers[2]->new_iterator(&offsets_iterator));
+    RETURN_IF_ERROR(_sub_readers[2]->new_iterator(&offsets_iterator, nullptr));
     auto* ofcIter =
             new 
OffsetFileColumnIterator(reinterpret_cast<FileColumnIterator*>(offsets_iterator));
 
     ColumnIterator* null_iterator = nullptr;
     if (is_nullable()) {
-        RETURN_IF_ERROR(_sub_readers[3]->new_iterator(&null_iterator));
+        RETURN_IF_ERROR(_sub_readers[3]->new_iterator(&null_iterator, 
nullptr));
     }
     *iterator = new MapFileColumnIterator(this, null_iterator, ofcIter, 
key_iterator, val_iterator);
     return Status::OK();
 }
 
-Status ColumnReader::new_struct_iterator(ColumnIterator** iterator) {
+Status ColumnReader::new_struct_iterator(ColumnIterator** iterator,
+                                         const TabletColumn* tablet_column) {
     std::vector<ColumnIterator*> sub_column_iterators;
     size_t child_size = is_nullable() ? _sub_readers.size() - 1 : 
_sub_readers.size();
+    size_t tablet_column_size = tablet_column ? 
tablet_column->get_sub_columns().size() : 0;
     sub_column_iterators.reserve(child_size);
 
     ColumnIterator* sub_column_iterator;
     for (size_t i = 0; i < child_size; i++) {
-        RETURN_IF_ERROR(_sub_readers[i]->new_iterator(&sub_column_iterator));
+        RETURN_IF_ERROR(_sub_readers[i]->new_iterator(
+                &sub_column_iterator, tablet_column ? 
&tablet_column->get_sub_column(i) : nullptr));
         sub_column_iterators.push_back(sub_column_iterator);
     }
 
+    // create default_iterator for schema-change behavior which increase column
+    for (size_t i = child_size; i < tablet_column_size; i++) {
+        TabletColumn column = tablet_column->get_sub_column(i);
+        std::unique_ptr<ColumnIterator>* it = new 
std::unique_ptr<ColumnIterator>();
+        RETURN_IF_ERROR(Segment::new_default_iterator(column, it));
+        sub_column_iterators.push_back(it->get());
+    }
+
     ColumnIterator* null_iterator = nullptr;
     if (is_nullable()) {
-        
RETURN_IF_ERROR(_sub_readers[child_size]->new_iterator(&null_iterator));
+        RETURN_IF_ERROR(_sub_readers[child_size]->new_iterator(&null_iterator, 
nullptr));
     }
     *iterator = new StructFileColumnIterator(this, null_iterator, 
sub_column_iterators);
     return Status::OK();
diff --git a/be/src/olap/rowset/segment_v2/column_reader.h 
b/be/src/olap/rowset/segment_v2/column_reader.h
index 97fef8435ec..24a54f180f4 100644
--- a/be/src/olap/rowset/segment_v2/column_reader.h
+++ b/be/src/olap/rowset/segment_v2/column_reader.h
@@ -135,10 +135,10 @@ public:
     virtual ~ColumnReader();
 
     // create a new column iterator. Client should delete returned iterator
-    Status new_iterator(ColumnIterator** iterator);
-    Status new_array_iterator(ColumnIterator** iterator);
-    Status new_struct_iterator(ColumnIterator** iterator);
-    Status new_map_iterator(ColumnIterator** iterator);
+    Status new_iterator(ColumnIterator** iterator, const TabletColumn* 
tablet_column);
+    Status new_array_iterator(ColumnIterator** iterator, const TabletColumn* 
tablet_column);
+    Status new_struct_iterator(ColumnIterator** iterator, const TabletColumn* 
tablet_column);
+    Status new_map_iterator(ColumnIterator** iterator, const TabletColumn* 
tablet_column);
     Status new_agg_state_iterator(ColumnIterator** iterator);
     // Client should delete returned iterator
     Status new_bitmap_index_iterator(BitmapIndexIterator** iterator);
diff --git a/be/src/olap/rowset/segment_v2/hierarchical_data_reader.cpp 
b/be/src/olap/rowset/segment_v2/hierarchical_data_reader.cpp
index db6bac6b8b4..a63c169c369 100644
--- a/be/src/olap/rowset/segment_v2/hierarchical_data_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/hierarchical_data_reader.cpp
@@ -56,7 +56,7 @@ Status 
HierarchicalDataReader::create(std::unique_ptr<ColumnIterator>* reader,
     // like {"a" : "b" : {"e" : 1.1}} in jsonb format
     if (read_type == ReadType::MERGE_SPARSE) {
         ColumnIterator* it;
-        RETURN_IF_ERROR(root->data.reader->new_iterator(&it));
+        RETURN_IF_ERROR(root->data.reader->new_iterator(&it, nullptr));
         stream_iter->set_root(std::make_unique<SubstreamIterator>(
                 root->data.file_column_type->create_column(), 
std::unique_ptr<ColumnIterator>(it),
                 root->data.file_column_type));
@@ -133,7 +133,7 @@ Status HierarchicalDataReader::add_stream(const 
SubcolumnColumnReaders::Node* no
     }
     CHECK(node);
     ColumnIterator* it;
-    RETURN_IF_ERROR(node->data.reader->new_iterator(&it));
+    RETURN_IF_ERROR(node->data.reader->new_iterator(&it, nullptr));
     std::unique_ptr<ColumnIterator> it_ptr;
     it_ptr.reset(it);
     SubstreamIterator reader(node->data.file_column_type->create_column(), 
std::move(it_ptr),
diff --git a/be/src/olap/rowset/segment_v2/segment.cpp 
b/be/src/olap/rowset/segment_v2/segment.cpp
index 86884d7adce..5bf099f5fc3 100644
--- a/be/src/olap/rowset/segment_v2/segment.cpp
+++ b/be/src/olap/rowset/segment_v2/segment.cpp
@@ -699,8 +699,8 @@ Status Segment::_create_column_readers(const 
SegmentFooterPB& footer) {
     return Status::OK();
 }
 
-static Status new_default_iterator(const TabletColumn& tablet_column,
-                                   std::unique_ptr<ColumnIterator>* iter) {
+Status Segment::new_default_iterator(const TabletColumn& tablet_column,
+                                     std::unique_ptr<ColumnIterator>* iter) {
     if (!tablet_column.has_default_value() && !tablet_column.is_nullable()) {
         return Status::InternalError(
                 "invalid nonexistent column without default value. 
column_uid={}, column_name={}, "
@@ -724,7 +724,7 @@ Status Segment::_new_iterator_with_variant_root(const 
TabletColumn& tablet_colum
                                                 const 
SubcolumnColumnReaders::Node* root,
                                                 vectorized::DataTypePtr 
target_type_hint) {
     ColumnIterator* it;
-    RETURN_IF_ERROR(root->data.reader->new_iterator(&it));
+    RETURN_IF_ERROR(root->data.reader->new_iterator(&it, &tablet_column));
     auto* stream_iter = new ExtractReader(
             tablet_column,
             
std::make_unique<SubstreamIterator>(root->data.file_column_type->create_column(),
@@ -791,7 +791,7 @@ Status Segment::new_column_iterator_with_path(const 
TabletColumn& tablet_column,
             assert(leaf);
             std::unique_ptr<ColumnIterator> sibling_iter;
             ColumnIterator* sibling_iter_ptr;
-            
RETURN_IF_ERROR(leaf->data.reader->new_iterator(&sibling_iter_ptr));
+            RETURN_IF_ERROR(leaf->data.reader->new_iterator(&sibling_iter_ptr, 
&tablet_column));
             sibling_iter.reset(sibling_iter_ptr);
             *iter = 
std::make_unique<DefaultNestedColumnIterator>(std::move(sibling_iter),
                                                                   
leaf->data.file_column_type);
@@ -822,7 +822,7 @@ Status Segment::new_column_iterator_with_path(const 
TabletColumn& tablet_column,
             return Status::OK();
         }
         ColumnIterator* it;
-        RETURN_IF_ERROR(node->data.reader->new_iterator(&it));
+        RETURN_IF_ERROR(node->data.reader->new_iterator(&it, &tablet_column));
         iter->reset(it);
         return Status::OK();
     }
@@ -833,7 +833,7 @@ Status Segment::new_column_iterator_with_path(const 
TabletColumn& tablet_column,
             // Direct read extracted columns
             const auto* node = 
_sub_column_tree[unique_id].find_leaf(relative_path);
             ColumnIterator* it;
-            RETURN_IF_ERROR(node->data.reader->new_iterator(&it));
+            RETURN_IF_ERROR(node->data.reader->new_iterator(&it, 
&tablet_column));
             iter->reset(it);
         } else {
             // Node contains column with children columns or has correspoding 
sparse columns
@@ -887,7 +887,8 @@ Status Segment::new_column_iterator(const TabletColumn& 
tablet_column,
     }
     // init iterator by unique id
     ColumnIterator* it;
-    
RETURN_IF_ERROR(_column_readers.at(tablet_column.unique_id())->new_iterator(&it));
+    RETURN_IF_ERROR(
+            _column_readers.at(tablet_column.unique_id())->new_iterator(&it, 
&tablet_column));
     iter->reset(it);
 
     if (config::enable_column_type_check &&
@@ -906,7 +907,8 @@ Status Segment::new_column_iterator(int32_t unique_id, 
const StorageReadOptions*
                                     std::unique_ptr<ColumnIterator>* iter) {
     RETURN_IF_ERROR(_create_column_readers_once(opt->stats));
     ColumnIterator* it;
-    RETURN_IF_ERROR(_column_readers.at(unique_id)->new_iterator(&it));
+    TabletColumn tablet_column = _tablet_schema->column_by_uid(unique_id);
+    RETURN_IF_ERROR(_column_readers.at(unique_id)->new_iterator(&it, 
&tablet_column));
     iter->reset(it);
     return Status::OK();
 }
diff --git a/be/src/olap/rowset/segment_v2/segment.h 
b/be/src/olap/rowset/segment_v2/segment.h
index f4f465c9396..47e45ee81f8 100644
--- a/be/src/olap/rowset/segment_v2/segment.h
+++ b/be/src/olap/rowset/segment_v2/segment.h
@@ -98,6 +98,9 @@ public:
     Status new_iterator(SchemaSPtr schema, const StorageReadOptions& 
read_options,
                         std::unique_ptr<RowwiseIterator>* iter);
 
+    static Status new_default_iterator(const TabletColumn& tablet_column,
+                                       std::unique_ptr<ColumnIterator>* iter);
+
     uint32_t id() const { return _segment_id; }
 
     RowsetId rowset_id() const { return _rowset_id; }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnType.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnType.java
index 6227bea2b1b..58eeab49a23 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnType.java
@@ -28,6 +28,9 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 
 /**
  * 这个是对Column类型的一个封装,对于大多数类型,primitive type足够了,这里有两个例外需要用到这个信息
@@ -184,7 +187,8 @@ public abstract class ColumnType {
         }
         if (srcTypeLen > dstTypeLen) {
             throw new DdlException(
-                String.format("Shorten type length is prohibited, srcType=%s, 
dstType=%s", src.toSql(), dst.toSql()));
+                    String.format("Shorten type length is prohibited, 
srcType=%s, dstType=%s", src.toSql(),
+                            dst.toSql()));
         }
     }
 
@@ -205,6 +209,24 @@ public abstract class ColumnType {
         }
     }
 
+    private static void validateStructFieldCompatibility(StructField 
originalField, StructField newField)
+            throws DdlException {
+        // check field name
+        if (!originalField.getName().equals(newField.getName())) {
+            throw new DdlException(
+                    "Cannot rename struct field from '" + 
originalField.getName() + "' to '" + newField.getName()
+                            + "'");
+        }
+
+        Type originalType = originalField.getType();
+        Type newType = newField.getType();
+
+        // deal with type change
+        if (!originalType.equals(newType)) {
+            checkSupportSchemaChangeForComplexType(originalType, newType, 
true);
+        }
+    }
+
     // This method defines the complex type which is struct, array, map if 
nested char-type
     // to support the schema-change behavior of length growth.
     public static void checkSupportSchemaChangeForComplexType(Type checkType, 
Type other, boolean nested)
@@ -212,12 +234,33 @@ public abstract class ColumnType {
         if (checkType.isStructType() && other.isStructType()) {
             StructType thisStructType = (StructType) checkType;
             StructType otherStructType = (StructType) other;
-            if (thisStructType.getFields().size() != 
otherStructType.getFields().size()) {
-                throw new DdlException("Cannot change struct type with 
different field size");
+
+            // now we only support add new field for struct type
+            if (thisStructType.getFields().size() > 
otherStructType.getFields().size()) {
+                throw new DdlException("Cannot reduce struct fields from " + 
checkType.toSql() + " to "
+                        + other.toSql());
             }
-            for (int i = 0; i < thisStructType.getFields().size(); i++) {
-                
checkSupportSchemaChangeForComplexType(thisStructType.getFields().get(i).getType(),
-                        otherStructType.getFields().get(i).getType(), true);
+
+            Set<String> existingNames = new HashSet<>();
+            List<StructField> originalFields = thisStructType.getFields();
+            List<StructField> newFields = otherStructType.getFields();
+
+            // check each original field compatibility
+            for (int i = 0; i < originalFields.size(); i++) {
+                StructField originalField = originalFields.get(i);
+                StructField newField = newFields.get(i);
+
+                validateStructFieldCompatibility(originalField, newField);
+                existingNames.add(originalField.getName());
+            }
+
+            // check new field name is not conflict with old field name
+            for (int i = originalFields.size(); i < 
otherStructType.getFields().size(); i++) {
+                // to check new field name is not conflict with old field name
+                String newFieldName = 
otherStructType.getFields().get(i).getName();
+                if (existingNames.contains(newFieldName)) {
+                    throw new DdlException("Added struct field '" + 
newFieldName + "' conflicts with existing field");
+                }
             }
         } else if (checkType.isArrayType()) {
             if (!other.isArrayType()) {
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java
index a1dcff46471..73af753a69d 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/alter/SchemaChangeHandlerTest.java
@@ -54,7 +54,7 @@ public class SchemaChangeHandlerTest extends 
TestWithFeService {
         // create database db1
         createDatabase("test");
 
-        //create tables
+        // create tables
         String createAggTblStmtStr = "CREATE TABLE IF NOT EXISTS test.sc_agg 
(\n" + "user_id LARGEINT NOT NULL,\n"
                 + "date DATE NOT NULL,\n" + "city VARCHAR(20),\n" + "age 
SMALLINT,\n" + "sex TINYINT,\n"
                 + "last_visit_date DATETIME REPLACE DEFAULT '1970-01-01 
00:00:00',\n" + "cost BIGINT SUM DEFAULT '0',\n"
@@ -76,6 +76,30 @@ public class SchemaChangeHandlerTest extends 
TestWithFeService {
                 + "op_time DATETIME)\n" + "DUPLICATE  KEY(timestamp, type)\n" 
+ "DISTRIBUTED BY HASH(type) BUCKETS 1\n"
                 + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true');";
         createTable(createDupTblStmtStr);
+
+        String createAggTblStmtStrForStruct = "CREATE TABLE IF NOT EXISTS 
test.sc_agg_s (\n"
+                + "user_id LARGEINT NOT NULL,\n"
+                + "date DATE NOT NULL,\n" + "city VARCHAR(20),\n" + "age 
SMALLINT,\n" + "sex TINYINT,\n"
+                + "last_visit_date DATETIME REPLACE DEFAULT '1970-01-01 
00:00:00',\n" + "cost BIGINT SUM DEFAULT '0',\n"
+                + "max_dwell_time INT MAX DEFAULT '0',\n" + "min_dwell_time 
INT MIN DEFAULT '99999')\n"
+                + "AGGREGATE KEY(user_id, date, city, age, sex)\n" + 
"DISTRIBUTED BY HASH(user_id) BUCKETS 1\n"
+                + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true');";
+        createTable(createAggTblStmtStrForStruct);
+
+        String createUniqTblStmtStrForStruct = "CREATE TABLE IF NOT EXISTS 
test.sc_uniq_s (\n"
+                + "user_id LARGEINT NOT NULL,\n"
+                + "username VARCHAR(50) NOT NULL,\n" + "city VARCHAR(20),\n" + 
"age SMALLINT,\n" + "sex TINYINT,\n"
+                + "phone LARGEINT,\n" + "address VARCHAR(500),\n" + 
"register_time DATETIME)\n"
+                + "UNIQUE  KEY(user_id, username)\n" + "DISTRIBUTED BY 
HASH(user_id) BUCKETS 1\n"
+                + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true',\n"
+                + "'enable_unique_key_merge_on_write' = 'true');";
+        createTable(createUniqTblStmtStrForStruct);
+
+        String createDupTblStmtStrForStruct = "CREATE TABLE IF NOT EXISTS 
test.sc_dup_s (\n" + "timestamp DATETIME,\n"
+                + "type INT,\n" + "error_code INT,\n" + "error_msg 
VARCHAR(1024),\n" + "op_id BIGINT,\n"
+                + "op_time DATETIME)\n" + "DUPLICATE  KEY(timestamp, type)\n" 
+ "DISTRIBUTED BY HASH(type) BUCKETS 1\n"
+                + "PROPERTIES ('replication_num' = '1', 'light_schema_change' 
= 'true');";
+        createTable(createDupTblStmtStrForStruct);
     }
 
     private void waitAlterJobDone(Map<Long, AlterJobV2> alterJobs) throws 
Exception {
@@ -95,6 +119,205 @@ public class SchemaChangeHandlerTest extends 
TestWithFeService {
         }
     }
 
+    private void executeAlterAndVerify(String alterStmt, OlapTable tbl, String 
expectedStruct, int expectSchemaVersion,
+            String columnName) throws Exception {
+        AlterTableStmt stmt = (AlterTableStmt) parseAndAnalyzeStmt(alterStmt);
+        Env.getCurrentEnv().getAlterInstance().processAlterTable(stmt);
+        
waitAlterJobDone(Env.getCurrentEnv().getSchemaChangeHandler().getAlterJobsV2());
+        jobSize++;
+
+        tbl.readLock();
+        try {
+            Column column = tbl.getColumn(columnName);
+            
Assertions.assertTrue(column.getType().toSql().toLowerCase().contains(expectedStruct.toLowerCase()),
+                    "Actual struct: " + column.getType().toSql());
+            // then check schema version increase
+            MaterializedIndexMeta indexMeta = 
tbl.getIndexMetaByIndexId(tbl.getBaseIndexId());
+            int schemaVersion = indexMeta.getSchemaVersion();
+            LOG.info("schema version: {}", schemaVersion);
+            Assertions.assertEquals(expectSchemaVersion, schemaVersion);
+        } finally {
+            tbl.readUnlock();
+        }
+    }
+
+    private void expectException(String alterStmt, String expectedErrorMsg) {
+        try {
+            AlterTableStmt stmt = (AlterTableStmt) 
parseAndAnalyzeStmt(alterStmt);
+            Env.getCurrentEnv().getAlterInstance().processAlterTable(stmt);
+            
waitAlterJobDone(Env.getCurrentEnv().getSchemaChangeHandler().getAlterJobsV2());
+            Assertions.fail("Expected exception: " + expectedErrorMsg);
+        } catch (Exception e) {
+            System.out.println(e.getMessage());
+            Assertions.assertTrue(e.getMessage().contains(expectedErrorMsg),
+                    "Actual error: " + e.getMessage() + "\nExpected: " + 
expectedErrorMsg);
+        }
+    }
+
+    // In this test we should cover this following cases:
+    //  Positive Test Case
+    //    3.1 add sub-column
+    //    3.2 add sub-columns
+    //    3.3 add sub-column + lengthen sub-varchar-column
+    //  Negative Test Case
+    //    3.4 add sub-column + re-order struct-column
+    //    3.5 reduce sub-column
+    //    3.6 reduce sub-columns
+    //    3.7 add sub-column + shorten sub-varchar-column
+    //    3.8 change struct to other type
+    //    3.9 add sub-column + duplicate sub-column name
+    //    3.10 add sub-column + change origin sub-column name
+    //    3.11 add sub-column + change origin sub-column type
+    //    3.12 add sub-column with json/variant
+    // ------------------------- Positive Test Case -------------------------
+    private void testAddSingleSubColumn(OlapTable tbl, String tableName, 
String defaultValue) throws Exception {
+        String alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN 
c_s STRUCT<col:VARCHAR(10), col1:INT> "
+                + defaultValue;
+        executeAlterAndVerify(alterStmt, tbl, 
"STRUCT<col:varchar(10),col1:int>", 3, "c_s");
+    }
+
+    private void testAddNestedStructSubColumn(OlapTable tbl, String tableName, 
String defaultValue) throws Exception {
+        // origin c_s_s : struct<s1:struct<a:int>, 
s2:struct<a:array<struct<a:int>>>>
+        // case1. add s1 sub-column : struct<s1:struct<a:int, b:double>, 
s2:struct<a:array<struct<a:int>>>>
+        // case2. add s2 sub-column : struct<s1:struct<a:int,b:double>, 
s2:struct<a:array<struct<a:int>>, b:double>>
+        // case3. add s2.a sub-column : 
struct<s1:struct<a:int,b:double>,s2:struct<a:array<struct<a:int,b:double>>,b:double>>
+        // case4. add multiple sub-columns : 
struct<s1:struct<a:int,b:double,c:varchar(10)>,s2:struct<a:array<struct<a:int,b:double,c:varchar(10)>>,b:double,c:varchar(10)>,c:varchar(10)>
+        String alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN 
c_s_s "
+                + 
"struct<s1:struct<a:int,b:double>,s2:struct<a:array<struct<a:int>>>> " + 
defaultValue;
+        executeAlterAndVerify(alterStmt, tbl,
+                
"struct<s1:struct<a:int,b:double>,s2:struct<a:array<struct<a:int>>>>", 4, 
"c_s_s");
+        alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN c_s_s "
+                + "struct<s1:struct<a:int,b:double>, 
s2:struct<a:array<struct<a:int>>,b:double>> " + defaultValue;
+        executeAlterAndVerify(alterStmt, tbl,
+                
"struct<s1:struct<a:int,b:double>,s2:struct<a:array<struct<a:int>>,b:double>>", 
5, "c_s_s");
+        alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN c_s_s "
+                + "struct<s1:struct<a:int,b:double>, 
s2:struct<a:array<struct<a:int, b:double>>,b:double>> "
+                + defaultValue;
+        executeAlterAndVerify(alterStmt, tbl,
+                
"struct<s1:struct<a:int,b:double>,s2:struct<a:array<struct<a:int,b:double>>,b:double>>",
 6, "c_s_s");
+        alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN c_s_s "
+                + 
"struct<s1:struct<a:int,b:double,c:varchar(10)>,s2:struct<a:array<struct<a:int,b:double,c:varchar(10)>>,b:double,c:varchar(10)>,c:varchar(10)>
 "
+                + defaultValue;
+        executeAlterAndVerify(alterStmt, tbl,
+                
"struct<s1:struct<a:int,b:double,c:varchar(10)>,s2:struct<a:array<struct<a:int,b:double,c:varchar(10)>>,b:double,c:varchar(10)>,c:varchar(10)>",
+                7, "c_s_s");
+    }
+
+    private void testAddMultipleSubColumns(OlapTable tbl, String tableName, 
String defaultValue) throws Exception {
+        String alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN 
c_s STRUCT<col:VARCHAR(10), "
+                + "col1:INT, col2:DECIMAL(10,2), col3:DATETIME> " + 
defaultValue;
+        executeAlterAndVerify(alterStmt, tbl,
+                
"struct<col:varchar(10),col1:int,col2:decimalv3(10,2),col3:datetimev2(0)>", 8, 
"c_s");
+    }
+
+    private void testLengthenVarcharSubColumn(OlapTable tbl, String tableName, 
String defaultValue) throws Exception {
+        String alterStmt = "ALTER TABLE test." + tableName
+                + " MODIFY COLUMN c_s 
STRUCT<col:VARCHAR(30),col1:int,col2:decimal(10,2),col3:datetime,col4:string> "
+                + defaultValue;
+        executeAlterAndVerify(alterStmt, tbl,
+                
"struct<col:varchar(30),col1:int,col2:decimalv3(10,2),col3:datetimev2(0),col4:text>",
 9, "c_s");
+    }
+
+    // ------------------------- Negative Test Case -------------------------
+    private void testReduceSubColumns(String defaultValue, String tableName) {
+        String alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN 
c_s STRUCT<col:VARCHAR(10)> "
+                + defaultValue;
+        expectException(alterStmt, "Cannot reduce struct fields");
+    }
+
+    private void testShortenVarcharSubColumn(String defaultValue, String 
tableName) {
+        String alterStmt = "ALTER TABLE test." + tableName
+                + " MODIFY COLUMN c_s 
struct<col:varchar(10),col1:int,col2:decimalv3(10,2),col3:datetimev2(0),col4:string>
 "
+                + defaultValue;
+        expectException(alterStmt, "Shorten type length is prohibited");
+    }
+
+    private void testChangeStructToOtherType(String defaultValue, String 
tableName) {
+        String alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN 
c_s VARCHAR(100) " + defaultValue;
+        expectException(alterStmt, "Can not change");
+    }
+
+    private void testDuplicateSubColumnName(String defaultValue, String 
tableName) {
+        String alterStmt = "ALTER TABLE test." + tableName + " MODIFY COLUMN 
c_s STRUCT<col:VARCHAR(10), col:INT> "
+                + defaultValue;
+        expectException(alterStmt, "Duplicate field name");
+    }
+
+    private void testChangeExistingSubColumnName(String defaultValue, String 
tableName) {
+        String alterStmt = "ALTER TABLE test." + tableName
+                + " MODIFY COLUMN c_s struct<col6:varchar(30),"
+                + "col1:int,col2:decimalv3(10,2),col3:datetimev2(0),col4:text> 
"
+                + defaultValue;
+        expectException(alterStmt, "Cannot rename");
+    }
+
+    private void testChangeExistingSubColumnType(String defaultValue, String 
tableName) {
+        String alterStmt = "ALTER TABLE test." + tableName
+                + " MODIFY COLUMN c_s 
struct<col:varchar(30),col1:varchar(10),col2:decimalv3(10,2),col3:datetimev2(0),col4:text>
 "
+                + defaultValue;
+        expectException(alterStmt, "Cannot change");
+    }
+
+    private void testAddUnsupportedSubColumnType(String defaultValue, String 
tableName) {
+        String alterStmtJson = "ALTER TABLE test." + tableName
+                + " MODIFY COLUMN c_s 
struct<col:varchar(30),col1:varchar(10),col2:decimalv3(10,2),col3:datetimev2(0),col4:text,col5:json>
 "
+                + defaultValue;
+        expectException(alterStmtJson, "STRUCT unsupported sub-type");
+        String alterStmtVariant = "ALTER TABLE test." + tableName
+                + " MODIFY COLUMN c_s 
struct<col:varchar(30),col1:varchar(10),col2:decimalv3(10,2),col3:datetimev2(0),col4:text,col5:variant>
 "
+                + defaultValue;
+        expectException(alterStmtVariant, "STRUCT unsupported sub-type");
+    }
+
+    @Test
+    public void testModifyStructColumn() throws Exception {
+        // loop for all tables to add struct column
+        String[] tableNames = {"sc_agg_s", "sc_uniq_s", "sc_dup_s"};
+        String[] defaultValues = {"REPLACE_IF_NOT_NULL", "NULL", "NULL"};
+        for (int i = 0; i < tableNames.length; i++) {
+            String tableName = tableNames[i];
+            String defaultVal = defaultValues[i];
+            Database db = 
Env.getCurrentInternalCatalog().getDbOrMetaException("test");
+            OlapTable tbl = (OlapTable) db.getTableOrMetaException(tableName, 
Table.TableType.OLAP);
+            // add struct column
+            String addValColStmtStr = "alter table test." + tableName + " add 
column c_s struct<col:varchar(10)> "
+                    + defaultVal;
+            AlterTableStmt addValColStmt = (AlterTableStmt) 
parseAndAnalyzeStmt(addValColStmtStr);
+            
Env.getCurrentEnv().getAlterInstance().processAlterTable(addValColStmt);
+            // check alter job, do not create job
+            Map<Long, AlterJobV2> alterJobs = 
Env.getCurrentEnv().getSchemaChangeHandler().getAlterJobsV2();
+            jobSize++;
+            waitAlterJobDone(alterJobs);
+            // add struct column
+            // support nested struct can also be support add sub-column
+            addValColStmtStr = "alter table test." + tableName
+                    + " add column c_s_s struct<s1:struct<a:int>, 
s2:struct<a:array<struct<a:int>>>> "
+                    + defaultVal;
+            addValColStmt = (AlterTableStmt) 
parseAndAnalyzeStmt(addValColStmtStr);
+            
Env.getCurrentEnv().getAlterInstance().processAlterTable(addValColStmt);
+            // check alter job, do not create job
+            alterJobs = 
Env.getCurrentEnv().getSchemaChangeHandler().getAlterJobsV2();
+            jobSize++;
+            waitAlterJobDone(alterJobs);
+
+
+            // positive test
+            testAddSingleSubColumn(tbl, tableName, defaultVal);
+            testAddNestedStructSubColumn(tbl, tableName, defaultVal);
+            testAddMultipleSubColumns(tbl, tableName, defaultVal);
+            testLengthenVarcharSubColumn(tbl, tableName, defaultVal);
+
+            // negative test
+            testReduceSubColumns(defaultVal, tableName);
+            testShortenVarcharSubColumn(defaultVal, tableName);
+            testChangeStructToOtherType(defaultVal, tableName);
+            testDuplicateSubColumnName(defaultVal, tableName);
+            testChangeExistingSubColumnName(defaultVal, tableName);
+            testChangeExistingSubColumnType(defaultVal, tableName);
+            testAddUnsupportedSubColumnType(defaultVal, tableName);
+        }
+    }
+
     @Test
     public void testAggAddOrDropColumn() throws Exception {
         LOG.info("dbName: {}", Env.getCurrentInternalCatalog().getDbNames());
diff --git a/regression-test/data/schema_change_p0/test_modify_struct.out 
b/regression-test/data/schema_change_p0/test_modify_struct.out
new file mode 100644
index 00000000000..a5560a4f9a5
Binary files /dev/null and 
b/regression-test/data/schema_change_p0/test_modify_struct.out differ
diff --git a/regression-test/suites/schema_change_p0/test_modify_struct.groovy 
b/regression-test/suites/schema_change_p0/test_modify_struct.groovy
new file mode 100644
index 00000000000..1f865f919cd
--- /dev/null
+++ b/regression-test/suites/schema_change_p0/test_modify_struct.groovy
@@ -0,0 +1,426 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+import java.util.concurrent.TimeUnit
+import org.awaitility.Awaitility
+
+suite ("test_modify_struct") {
+    def waitUntilSchemaChangeDone = { tableName, insert_sql, canceled=false ->
+        if (canceled) {
+            Awaitility.await().atMost(300, 
TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).await().until(() 
-> {
+                def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE 
TableName='${tableName}' ORDER BY createtime DESC LIMIT 1  """
+                if (jobStateResult[0][9].toString().toUpperCase() == 
"CANCELLED") {
+                    return true
+                }
+                return false
+            })
+        } else {
+            waitForSchemaChangeDone({
+                sql " SHOW ALTER TABLE COLUMN WHERE TableName='${tableName}' 
ORDER BY createtime DESC LIMIT 1 "
+                time 600
+            }, insert_sql)
+        }
+    }
+
+    def tableNamePrefix = "test_struct_add_sub_column"
+    def tableName = tableNamePrefix
+    List<String> tableNames = new ArrayList<String>()
+    List<String> mv_query_sql = new ArrayList<String>()
+    List<String> mvNames = new ArrayList<String>()
+
+    try {
+
+        // 1. create table with duplicate key, agg key, unique key
+        def suffixTypes = ["duplicate", "unique", "aggregate"]
+        def defaultValues = ["NULL", "NULL", "REPLACE_IF_NOT_NULL"]
+        for (int j = 0; j < suffixTypes.size(); j++) {
+            String suffix = suffixTypes[j]
+            String defaultValue = defaultValues[j]
+            String notNullValue = j == 2 ? defaultValue : "NOT NULL"
+            String c1DefaultValue = j == 2 ? defaultValue : "DEFAULT '10.5'"
+
+            tableName = tableNamePrefix + "_" + suffix
+            tableNames.add(tableName)
+            sql "DROP TABLE IF EXISTS ${tableName} FORCE;"
+            sql """
+                CREATE TABLE IF NOT EXISTS `${tableName}`
+                (
+                    `c0` LARGEINT NOT NULL,
+                    `c1` DECIMAL(10,2) ${c1DefaultValue},
+                    `c_s_not_null`  STRUCT<col:VARCHAR(10)> ${notNullValue},
+                    `c_s` STRUCT<col:VARCHAR(10)> ${defaultValue},
+                )
+                ${suffix.toUpperCase()} KEY(`c0`)
+                DISTRIBUTED BY HASH(c0) BUCKETS 1
+                PROPERTIES (
+                    "replication_num" = "1",
+                    "disable_auto_compaction" = "true"
+                );
+            """
+            // we can create MV from original table
+            // CREATE MATERIALIZED VIEW for complex type is forbidden, which 
defined MaterializedViewHandler::checkAndPrepareMaterializedView
+            mvNames.add("${tableName}_mv")
+            def query_sql = "SELECT c1, c_s FROM ${tableName}"
+            mv_query_sql.add(query_sql)
+            def mvNotSupportedMsg = "errCode = 2"
+            expectExceptionLike({
+                sql """ CREATE MATERIALIZED VIEW ${tableName}_mv AS 
${query_sql} """
+            }, mvNotSupportedMsg)
+
+            // 2. insert some data before modify struct column
+            sql """ insert into $tableName values
+                    (0, 13.7, named_struct('col','commiter'), 
named_struct('col','commiter'));
+                """
+            sql """ insert into $tableName values
+                    (1, 14.9, named_struct('col','commiter'), 
named_struct('col','amory'));
+                """
+            // more than 10 characters
+            test {
+                sql """ insert into $tableName values
+                    (11, 111.111, named_struct('col','commiter'), 
named_struct('col','amoryIsBetter'));
+                """
+                exception "Insert has filtered data in strict mode"
+            }
+
+            order_qt_sc_before """ select * from ${tableName} order by c0; """
+
+            // 3. modify struct column
+                //  Positive Test Case
+                //    3.1 add sub-column
+                //    3.2 add sub-columns
+                //    3.3 add sub-column + lengthen sub-varchar-column
+                //
+                //  Negative Test Case
+                //    3.4 add sub-column + re-order struct-column
+                //    3.5 reduce sub-column
+                //    3.6 reduce sub-columns
+                //    3.7 add sub-column + shorten sub-varchar-column
+                //    3.8 change struct to other type
+                //    3.9 add sub-column + duplicate sub-column name
+                //    3.10 add sub-column + change origin sub-column name
+                //    3.11 add sub-column + change origin sub-column type
+                //    3.12 add sub-column with json/variant
+            ////////////////////////////////==================   Positive Test 
Case =================//////////////////
+            // 3.1 add sub-column
+            def sub_columns = ["col1:INT", "col2:DECIMAL(10, 2)", 
"col3:DATETIME", "col4:ARRAY<STRING>", "col5:MAP<INT, STRING>", 
"col6:STRUCT<a:INT, b:STRING>"]
+            def sub_column_values = ["'col1', 1", "'col2', 1.1", "'col3', 
'2021-01-01 00:00:01'", "'col4', ['a', 'b']", "'col5', {1:'a', 2:'b'}", 
"'col6', {1, 'a'}"]
+            String sub_col = "Struct<col: VARCHAR(10)>"
+            for (int i = 0; i < sub_columns.size(); i++) {
+                // we should replace > with , in sub_col
+                sub_col = sub_col[0..<sub_col.length()-1] + ", " + 
sub_columns[i] + ">"
+                sql """ alter table ${tableName} modify column c_s ${sub_col} 
${defaultValue}"""
+                String add_columns = ""
+                for (int k = 0 ; k <= i; k++) {
+                    add_columns += ", " + sub_column_values[k]
+                }
+                def insert_sql = "insert into ${tableName} values (" + 
(i+2).toString() + ", 21.12, named_struct('col','commiter'), 
named_struct('col','amory2'${add_columns}))"
+                logger.info(insert_sql)
+                waitUntilSchemaChangeDone.call(tableName, insert_sql)
+                // check result
+                qt_sql_after """ select * from ${tableName} order by c0; """
+            }
+
+            // 3.2 add sub-columns
+            def all_sub_column = "col11:INT, col12:DECIMAL(10, 2), 
col13:DATETIME, col14:ARRAY<STRING>, col15:MAP<INT, STRING>, 
col16:STRUCT<a:INT, b:STRING>"
+            sql """ alter table ${tableName} modify column c_s 
STRUCT<col:VARCHAR(10), col1:INT, col2:DECIMAL(10, 2), col3:DATETIME, 
col4:ARRAY<STRING>, col5:MAP<INT, STRING>, col6:STRUCT<a:INT, b:STRING>, 
${all_sub_column}> ${defaultValue}"""
+            def insert_sql1 = "insert into ${tableName} values (8, 31.13, 
named_struct('col','commiter'), named_struct('col','amory3','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}))"
+            waitUntilSchemaChangeDone.call(tableName, insert_sql1)
+            // check result
+            qt_sql_after1 """ select * from ${tableName} order by c0; """
+
+            // 3.3 add sub-column + lengthen sub-varchar-column
+            sql """ alter table ${tableName} add column c_s_1 
STRUCT<col:VARCHAR(20)> ${defaultValue}"""
+            def insert_sql11 = "insert into ${tableName} values (9, 41.14, 
named_struct('col','commiter'), named_struct('col','amory4','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 
named_struct('col','amoryIsBetter'))"
+            waitUntilSchemaChangeDone.call(tableName, insert_sql11)
+            // check new create struct column
+            qt_sql_after2 """ select c_s_1 from ${tableName} where c0 = 9; """
+            // add sub-column + lengthen sub-varchar-column
+            sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT> ${defaultValue}"""
+            def insert_sql12 = "insert into ${tableName} values (10, 51.15, 
named_struct('col','commiter'), named_struct('col','amory5','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 
named_struct('col','amoryIsMoreMoreThan30Better', 'col1', 1))"
+            waitUntilSchemaChangeDone.call(tableName, insert_sql12)
+            // check new create struct column
+            qt_sql_after3 """ select c_s_1 from ${tableName} where c0 = 10; """
+
+            //////////////////==================   Negative Test Case 
=================//////////////////
+            // 3.4 add sub-column + re-order struct-column
+            // add a scala column
+            sql """ alter table ${tableName} add column str_col STRING 
${defaultValue}"""
+            // insert data
+            sql """ insert into ${tableName} values (11, 61.16, 
named_struct('col','commiter'), named_struct('col','amory6','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}),  
named_struct('col','amoryIsBetter', 'col1', 1), 'amory6')"""
+            // check new create string column
+            qt_sql_after4 """ select * from ${tableName} where c0 = 11; """
+            // add sub-column + re-order struct column: which sc task will 
failed
+            sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT, col2:decimal(10,2)> ${defaultValue} after 
str_col """
+            def insert_sql13 = "insert into ${tableName} values (12, 71.17, 
named_struct('col','commiter'), named_struct('col','amory7','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory7', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 12.2))"
+            waitUntilSchemaChangeDone.call(tableName, insert_sql13, true)
+            // check data
+            qt_sql_after5 """ select * from ${tableName} where c0 = 12; """
+            // then we add subcolumn then re-order struct column
+            sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT, col2:decimal(10,2)> ${defaultValue} """
+            waitUntilSchemaChangeDone.call(tableName, "")
+            sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT, col2:decimal(10,2)> ${defaultValue} after 
str_col """
+            waitUntilSchemaChangeDone.call(tableName, insert_sql13)
+            // check data
+            qt_sql_after51 """ select * from ${tableName} where c0 = 12; """
+
+            // desc for c_s_1
+            String[][] res = sql """ desc ${tableName} """
+            logger.info(res[5][1])
+            
assertEquals(res[5][1].toLowerCase(),"struct<col:varchar(30),col1:int,col2:decimal(10,2)>")
+
+            // 3.5 reduce sub-column
+            def reduceErrMsg="errCode = 2, detailMessage = Cannot reduce 
struct fields from"
+            expectExceptionLike({
+                sql """ alter  table ${tableName} MODIFY  column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT>  ${defaultValue} """
+                waitUntilSchemaChangeDone.call(tableName, "")
+            },reduceErrMsg)
+            // 3.6 reduce sub-columns
+            expectExceptionLike({
+                sql """ alter  table ${tableName} MODIFY  column c_s_1 
STRUCT<col:VARCHAR(30)> ${defaultValue} """
+                waitUntilSchemaChangeDone.call(tableName, "")
+            },reduceErrMsg)
+            // 3.7 add sub-column + shorten sub-varchar-column
+            def shortenErrMsg="errCode = 2, detailMessage = Shorten type 
length is prohibited"
+            expectExceptionLike({
+                sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(10), col1:INT, col2:DECIMAL(10,2), col3:DATETIME> 
${defaultValue} """
+                waitUntilSchemaChangeDone.call(tableName, "")
+            },shortenErrMsg)
+            // 3.8 change struct to other type
+            def changeErrMsg="errCode = 2, detailMessage = Can not change"
+            for (String type : ["STRING", "INT", "DECIMAL(10, 2)", "DATETIME", 
"ARRAY<STRING>", "MAP<INT, STRING>"]) {
+                expectExceptionLike({
+                    sql """ alter table ${tableName} modify column c_s_1 
${type} ${defaultValue} """
+                    waitUntilSchemaChangeDone.call(tableName, "")
+                },changeErrMsg)
+            }
+            // 3.9 add sub-column + duplicate sub-column name; when in 
DataType::validateCatalogDataType will throw exception
+            def duplicateErrMsg="errCode = 2, detailMessage = Duplicate field 
name"
+            expectExceptionLike({
+                sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT, col2:DECIMAL(10,2), col1:INT> ${defaultValue} 
"""
+                waitUntilSchemaChangeDone.call(tableName, "")
+            },duplicateErrMsg)
+            // 3.10 add sub-column + change origin sub-column name
+            def changeNameErrMsg="errCode = 2, detailMessage = Cannot rename"
+            expectExceptionLike({
+                sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col4:VARCHAR(30), col1:INT, col2:DECIMAL(10,2), col3:INT> 
${defaultValue} """
+                waitUntilSchemaChangeDone.call(tableName, "")
+            },changeNameErrMsg)
+            // 3.11 add sub-column + change origin sub-column type
+            def changeTypeErrMsg="errCode = 2, detailMessage = Cannot change"
+            expectExceptionLike({
+                sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:STRING, col2:DECIMAL(10,2), col3:VARCHAR(10)> 
${defaultValue} """
+                waitUntilSchemaChangeDone.call(tableName, "")
+            },changeTypeErrMsg)
+            // 3.12 add sub-column with json/variant; when in 
DataType::validateNestedType will throw exception
+            def jsonVariantErrMsg="errCode = 2, detailMessage = STRUCT 
unsupported sub-type"
+            for (String type : ["JSON", "VARIANT"]) {
+                expectExceptionLike({
+                    sql """ alter table ${tableName} modify column c_s_1 
STRUCT<col:VARCHAR(30), col1:INT, col2:DECIMAL(10,2), col3:${type}> 
${defaultValue} """
+                    waitUntilSchemaChangeDone.call(tableName, "")
+                },jsonVariantErrMsg)
+            }
+
+            // add column with some array cases
+            // 1. add array<struct<a1:int>> then modify to 
array<struct<a1:int, a2:int>>
+            // 2. add struct<a1:array<struct<a1:int>>> then modify to 
struct<a1:array<struct<a1:int, a2:int>>>
+            // add column with some struct cases
+            // 3. add struct<a1:int, a2:struct<a1:int>> then modify 
struct<a1:int, a2: struct<a1:int, a2:string, a3:int>>
+            // 4. add struct<a1:struct<a1:int>, a2:struct<a1:int>> then modify 
struct<a1:struct<a1:int, a2:int>, a2: struct<a1:int,a2:string>>
+            def insert_sql14 = "insert into ${tableName} values (14, 81.18, 
named_struct('col','commiter'), named_struct('col','amory8','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory8', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 12.2), 
array(named_struct('a1', 1)))"
+            sql """ alter table ${tableName} add column c_a 
ARRAY<STRUCT<a1:INT>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql14)
+            qt_sql_after6 """ select * from ${tableName} where c0 = 14; """
+
+            def insert_sql15 = "insert into ${tableName} values (15, 91.19, 
named_struct('col','commiter'), named_struct('col','amory9','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory9', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 1, 'a2', 2)))"
+            sql """ alter table ${tableName} modify column c_a 
ARRAY<STRUCT<a1:INT, a2:INT>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql15)
+            qt_sql_after7 """ select * from ${tableName} where c0 = 15; """
+
+            def insert_sql16 = "insert into ${tableName} values (16, 100.01, 
named_struct('col','commiter'), named_struct('col','amory10','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory10', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 3, 'a2', 4)), name [...]
+            sql """ alter table ${tableName} add column c_s_a 
STRUCT<a1:ARRAY<STRUCT<a1:INT>>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql16)
+            qt_sql_after8 """ select * from ${tableName} where c0 = 16; """
+
+            def insert_sql17 = "insert into ${tableName} values (17, 110.11, 
named_struct('col','commiter'), named_struct('col','amory11','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory11', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 3, 'a2', 4)), name [...]
+            sql """ alter table ${tableName} modify column c_s_a 
STRUCT<a1:ARRAY<STRUCT<a1:INT, a2:INT>>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql17)
+            qt_sql_after9 """ select * from ${tableName} where c0 = 17; """
+
+            def insert_sql18 = "insert into ${tableName} values (18, 120.21, 
named_struct('col','commiter'), named_struct('col','amory12','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory12', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 5, 'a2', 6)), name [...]
+            sql """ alter table ${tableName} ADD COLUMN c_s_s STRUCT<a1:INT, 
a2:STRUCT<a1:INT>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql18)
+            qt_sql_after10 """ select * from ${tableName} where c0 = 18; """
+
+            def insert_sql19 = "insert into ${tableName} values (19, 130.31, 
named_struct('col','commiter'), named_struct('col','amory13','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory13', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 5, 'a2', 6)), name [...]
+            sql """ alter table ${tableName} modify column c_s_s 
struct<a1:int, a2: struct<a1:int, a2:string, a3:int>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql19)
+            qt_sql_after11 """ select * from ${tableName} where c0 = 19; """
+
+            def insert_sql20 = "insert into ${tableName} values (20, 140.41, 
named_struct('col','commiter'), named_struct('col','amory14','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory14', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 7, 'a2', 8)), name [...]
+            sql """ alter table ${tableName} add column c_s_2 
STRUCT<a1:STRUCT<a1:INT>, a2:STRUCT<a1:INT>> ${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql20)
+            qt_sql_after12 """ select * from ${tableName} where c0 = 20; """
+
+            def insert_sql21 = "insert into ${tableName} values (21, 150.51, 
named_struct('col','commiter'), named_struct('col','amory15','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory15', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 1), 
array(named_struct('a1', 7, 'a2', 8)), name [...]
+            sql """ alter table ${tableName} modify column c_s_2 
STRUCT<a1:STRUCT<a1:INT, a2:INT>, a2: STRUCT<a1:INT,a2:STRING>> 
${defaultValue}"""
+            waitUntilSchemaChangeDone.call(tableName, insert_sql21)
+            qt_sql_after13 """ select * from ${tableName} where c0 = 21; """
+
+        }
+
+        // test nullable to not nullable
+        // desc for c_s_1
+        for (int idx = 0; idx < tableNames.size() - 1; idx++) {
+            String table_name = tableNames[idx]
+            String[][] descRes = sql """ desc ${table_name} """
+            logger.info(descRes[5][2])
+            assertEquals(descRes[5][2].toString().toLowerCase(), "yes")
+            def insert_sql_22 = "insert into ${table_name} values (22, 160.61, 
named_struct('col','commiter'), named_struct('col','amory16','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory16', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 12.21), 
array(named_struct('a1', 7, 'a2', 8)) [...]
+            def changeNull2NotNull="errCode = 2, detailMessage = Can not 
change from nullable to non-nullable"
+            expectExceptionLike({
+                sql """ alter table ${table_name} modify column c_s_1 
struct<col:varchar(30),col1:int,col2:decimal(10,2)> NOT NULL """
+                waitUntilSchemaChangeDone.call(table_name, insert_sql_22)
+            },changeNull2NotNull)
+            waitUntilSchemaChangeDone.call(table_name, insert_sql_22)
+        }
+
+        // but we support not nullable to nullable
+        for (int idx = 0; idx < tableNames.size()-1; idx++) {
+            String table_name = tableNames[idx]
+            String[][] descRes = sql """ desc ${table_name} """
+            logger.info(descRes[2][2])
+            assertEquals(descRes[2][2].toString().toLowerCase(), "no")
+            // sc do not support new struct type with not null.
+            def changeNotNullErr = "Struct type column default value just 
support null"
+            expectExceptionLike({
+                sql """ alter table ${table_name} add column c_s_not_null1 
struct<col:varchar(30),col1:int,col2:decimal(10,2)> NOT NULL default '{}'"""
+                waitUntilSchemaChangeDone.call(table_name, "")
+            }, changeNotNullErr)
+
+            // alter origin table column struct not_null to noll
+            // insert null data
+            def insert_sql_23  = "insert into ${table_name} values (23, 
160.61, null, named_struct('col','amory16','col1', 1, 'col2', 1.1, 'col3', 
'2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 'col6', {1, 
'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 'col14', ['a', 
'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory16', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 12.21), 
array(named_struct('a1', 7, 'a2', 8)), named_struct('a1', arra [...]
+            sql """ alter table ${table_name} modify column c_s_not_null 
STRUCT<col:VARCHAR(10)> NULL"""
+            waitUntilSchemaChangeDone.call(table_name, insert_sql_23)
+            qt_sql_after14 """ select * from ${table_name} where c0 = 23; """
+            descRes = sql """ desc ${table_name} """
+            logger.info(descRes[2][2])
+            assertEquals(descRes[2][2].toString().toLowerCase(), "yes")
+            // insert some data
+            sql "insert into ${table_name} values (24, 160.61, 
named_struct('col','amory'), named_struct('col','amory16','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory16', 
named_struct('col','amoryIsBetter', 'col1', 1, 'col2', 12.21), 
array(named_struct('a1', 7, 'a2', 8)), named_struct('a1' [...]
+            qt_sql_after15 """ select * from ${table_name} where c0 = 24; """
+        }
+
+        // test agg table for not change agg type
+        // desc for c_s_1
+        String[][] res = sql """ desc ${tableNames[2]} """
+        logger.info(res[5][1])
+        
assertEquals(res[5][1].toLowerCase(),"struct<col:varchar(30),col1:int,col2:decimal(10,2)>")
+
+        test {
+            sql """ alter table ${tableNames[2]} modify column c_s_1 
struct<col:varchar(30),col1:int,col2:decimal(10,2)> REPLACE """
+            exception "Can not change aggregation type"
+        }
+
+
+        /////////////// compaction behavior ///////////////
+        for (int idx = 0; idx < tableNames.size(); idx++) {
+            String table_name = tableNames[idx]
+            String[][] descRes = sql """ desc ${table_name} """
+            logger.info(descRes[0][1])
+            assertEquals(descRes[0][1].toLowerCase(),"largeint")
+            logger.info(descRes[3][1])
+            
assertEquals(descRes[3][1].toLowerCase(),"struct<col:varchar(10),col1:int,col2:decimal(10,2),col3:datetime,col4:array<text>,col5:map<int,text>,col6:struct<a:int,b:text>,col11:int,col12:decimal(10,2),col13:datetime,col14:array<text>,col15:map<int,text>,col16:struct<a:int,b:text>>")
+            logger.info(descRes[4][1])
+            assertEquals(descRes[4][1].toLowerCase(),"text")
+            logger.info(descRes[5][1])
+            
assertEquals(descRes[5][1].toLowerCase(),"struct<col:varchar(30),col1:int,col2:decimal(10,2)>")
+            // 1. insert more data
+            sql """ insert into ${table_name} values (25, 81.18, 
named_struct('col','amory'), named_struct('col','amory8','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory8', 
named_struct('col','amoryMoreMore30Better', 'col1', 1, 'col2', 1.1), 
array(named_struct('a1', 7, 'a2', 8)), named_struc [...]
+            sql """ insert into ${table_name} values (26, 91.19, 
named_struct('col','amory'), named_struct('col','amory9','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory9', 
named_struct('col','amoryMoreMore30Better', 'col1', 1, 'col2', 1.1), 
array(named_struct('a1', 7, 'a2', 8)), named_struc [...]
+            sql """ insert into ${table_name} values (27, 10.01, 
named_struct('col','amory'),named_struct('col','amory10','col1', 1, 'col2', 
1.1, 'col3', '2021-01-01 00:00:00', 'col4', ['a', 'b'], 'col5', {1:'a', 2:'b'}, 
'col6', {1, 'a'}, 'col11', 2, 'col12', 2.1, 'col13', '2021-01-01 00:00:00', 
'col14', ['a', 'b'], 'col15', {1:'a', 2:'b'}, 'col16', {1, 'a'}), 'amory10', 
named_struct('col','amoryMoreMore30Better', 'col1', 1, 'col2', 1.1), 
array(named_struct('a1', 7, 'a2', 8)), named_stru [...]
+            sql """ insert into ${table_name} 
+                                                    values 
+                                                      (
+                                                        28, 
+                                                        11.11, 
+                                                        
named_struct('col','amory'),
+                                                        named_struct(
+                                                          'col', 'amory11', 
'col1', 1, 'col2', 
+                                                          1.1, 'col3', 
'2021-01-01 00:00:00', 
+                                                          'col4', [ 'a', 'b' 
], 'col5', {1 : 'a', 
+                                                          2 : 'b' }, 'col6', 
{1, 'a' }, 'col11', 
+                                                          2, 'col12', 2.1, 
'col13', '2021-01-01 00:00:00', 
+                                                          'col14', [ 'a', 'b' 
], 'col15', {1 : 'a', 
+                                                          2 : 'b' }, 'col16', 
{1, 'a' }
+                                                        ), 
+                                                        'amory11', 
+                                                        named_struct(
+                                                          'col', 
'amoryMoreMore30Better', 'col1', 1,
+                                                          'col2', 1.1
+                                                        ), 
array(named_struct('a1', 7, 'a2', 8)), named_struct('a1', 
array(named_struct('a1', 5, 'a2', 6))), named_struct('a1', 5, 'a2', 
named_struct('a1', 5, 'a2', 'amory', 'a3', 6)), named_struct('a1', 
named_struct('a1', 7, 'a2', 8), 'a2', named_struct('a1', 8, 'a2', 'amory'))
+                                                      ), 
+                                                      (29, 12.21, null, null, 
null, null, null, null, null, null), 
+                                                      (30, 13.31, 
named_struct('col','amory'), named_struct(
+                                                          'col', null, 'col1', 
null, 'col2', 
+                                                           null, 'col3', null, 
+                                                          'col4', null, 
'col5', null, 'col6', null, 'col11', 
+                                                          null, 'col12', null, 
'col13', null, 
+                                                          'col14', null, 
'col15', null, 'col16', null
+                                                      ), 
+                                                      "", 
+                                                      named_struct(
+                                                          'col', null, 'col1', 
null, 'col2', 
+                                                          null
+                                                      ),  
array(named_struct('a1', null, 'a2', 8)), named_struct('a1', 
array(named_struct('a1', null, 'a2', 6))), named_struct('a1', 5, 'a2', 
named_struct('a1', 5, 'a2', 'amory', 'a3', 6)), named_struct('a1', 
named_struct('a1', 7, 'a2', 8), 'a2', named_struct('a1', 8, 'a2', 'amory'))), 
+                                                      (31, 14.41, null, null, 
"amory14",
+                                                      named_struct(
+                                                          'col', 
'amoryMoreMore30Better', 'col1', null, 'col2',
+                                                          null
+                                                      ), 
array(named_struct('a1', 9, 'a2', 10)), null, null, null) """
+            // 2. check insert res
+            qt_sql_after6 """ select * from ${table_name} order by c0; """
+            // 3. check compaction
+            
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+            def tablets = sql_return_maparray """ show tablets from 
${tableName}; """
+            // trigger compactions for all tablets in ${tableName}
+            trigger_and_wait_compaction(tableName, "cumulative")
+            int rowCount = 0
+            for (def tablet in tablets) {
+                def (code, out, err) = curl("GET", tablet.CompactionStatus)
+                logger.info("Show tablets status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def tabletJson = parseJson(out.trim())
+                assert tabletJson.rowsets instanceof List
+                for (String rowset in (List<String>) tabletJson.rowsets) {
+                    rowCount += Integer.parseInt(rowset.split(" ")[1])
+                }
+            }
+            logger.info("rowCount: " + rowCount)
+            // check res
+            qt_sql_after7 """ select * from ${table_name} order by c0; """
+
+        }
+
+    } finally {
+//        for (String tb : tableNames) {
+//            try_sql("DROP TABLE IF EXISTS ${tb}")
+//        }
+    }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to