This is an automated email from the ASF dual-hosted git repository.
eldenmoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 0e1491b5759 [feat](variant) support DESCRIBE variant subcolumns
(#54750)
0e1491b5759 is described below
commit 0e1491b57595fcb70946f1f8ca80dc0cb8370d1d
Author: lihangyu <[email protected]>
AuthorDate: Tue Aug 19 14:32:39 2025 +0800
[feat](variant) support DESCRIBE variant subcolumns (#54750)
---
be/src/service/internal_service.cpp | 14 +-
be/src/vec/common/schema_util.cpp | 4 +-
be/src/vec/common/schema_util.h | 1 +
regression-test/data/variant_p0/desc.out | Bin 5908 -> 5720 bytes
regression-test/data/variant_p0/nested.out | Bin 14942 -> 16169 bytes
regression-test/data/variant_p0/nested2.out | Bin 4088 -> 4042 bytes
.../predefine/test_all_prdefine_type_to_sparse.out | Bin 367502 -> 372591 bytes
regression-test/suites/variant_p0/desc.groovy | 503 ++++++++++-----------
regression-test/suites/variant_p0/load.groovy | 6 -
regression-test/suites/variant_p0/nested.groovy | 9 +-
regression-test/suites/variant_p0/nested2.groovy | 287 ++++++------
11 files changed, 402 insertions(+), 422 deletions(-)
diff --git a/be/src/service/internal_service.cpp
b/be/src/service/internal_service.cpp
index fb971e2a1a2..4904c501104 100644
--- a/be/src/service/internal_service.cpp
+++ b/be/src/service/internal_service.cpp
@@ -1159,6 +1159,10 @@ void
PInternalService::fetch_remote_tablet_schema(google::protobuf::RpcControlle
bool ret = _heavy_work_pool.try_offer([request, response, done]() {
brpc::ClosureGuard closure_guard(done);
Status st = Status::OK();
+ std::shared_ptr<MemTrackerLimiter> mem_tracker =
MemTrackerLimiter::create_shared(
+ MemTrackerLimiter::Type::OTHER,
+ fmt::format("InternalService::fetch_remote_tablet_schema"));
+ SCOPED_ATTACH_TASK(mem_tracker);
if (request->is_coordinator()) {
// Spawn rpc request to none coordinator nodes, and finally merge
them all
PFetchRemoteSchemaRequest remote_request(*request);
@@ -1244,11 +1248,11 @@ void
PInternalService::fetch_remote_tablet_schema(google::protobuf::RpcControlle
LOG(WARNING) << "tablet does not exist, tablet id is "
<< tablet_id;
continue;
}
- // TODO(lihangyu): implement this
- // auto schema = res.value()->merged_tablet_schema();
- // if (schema != nullptr) {
- // tablet_schemas.push_back(schema);
- // }
+ auto tablet = res.value();
+ auto rowsets = tablet->get_snapshot_rowset();
+ auto schema =
vectorized::schema_util::VariantCompactionUtil::
+ calculate_variant_extended_schema(rowsets,
tablet->tablet_schema());
+ tablet_schemas.push_back(schema);
}
if (!tablet_schemas.empty()) {
// merge all
diff --git a/be/src/vec/common/schema_util.cpp
b/be/src/vec/common/schema_util.cpp
index 943fb01d9f1..2cb18863395 100644
--- a/be/src/vec/common/schema_util.cpp
+++ b/be/src/vec/common/schema_util.cpp
@@ -1280,8 +1280,8 @@ bool generate_sub_column_info(const TabletSchema& schema,
int32_t col_unique_id,
return false;
}
-TabletSchemaSPtr calculate_variant_extended_schema(const
std::vector<RowsetSharedPtr>& rowsets,
- const TabletSchemaSPtr&
base_schema) {
+TabletSchemaSPtr VariantCompactionUtil::calculate_variant_extended_schema(
+ const std::vector<RowsetSharedPtr>& rowsets, const TabletSchemaSPtr&
base_schema) {
if (rowsets.empty()) {
return nullptr;
}
diff --git a/be/src/vec/common/schema_util.h b/be/src/vec/common/schema_util.h
index df0757b6633..e6d9d25b5b6 100644
--- a/be/src/vec/common/schema_util.h
+++ b/be/src/vec/common/schema_util.h
@@ -178,6 +178,7 @@ public:
static Status get_extended_compaction_schema(const
std::vector<RowsetSharedPtr>& rowsets,
TabletSchemaSPtr& target);
+ // Used to collect all the subcolumns types of variant column from rowsets
static TabletSchemaSPtr calculate_variant_extended_schema(
const std::vector<RowsetSharedPtr>& rowsets, const
TabletSchemaSPtr& base_schema);
diff --git a/regression-test/data/variant_p0/desc.out
b/regression-test/data/variant_p0/desc.out
index 71f804cc25c..9a3be9de833 100644
Binary files a/regression-test/data/variant_p0/desc.out and
b/regression-test/data/variant_p0/desc.out differ
diff --git a/regression-test/data/variant_p0/nested.out
b/regression-test/data/variant_p0/nested.out
index 4b01760e039..6c3ff0802bc 100644
Binary files a/regression-test/data/variant_p0/nested.out and
b/regression-test/data/variant_p0/nested.out differ
diff --git a/regression-test/data/variant_p0/nested2.out
b/regression-test/data/variant_p0/nested2.out
index c7790a107de..357f76927e7 100644
Binary files a/regression-test/data/variant_p0/nested2.out and
b/regression-test/data/variant_p0/nested2.out differ
diff --git
a/regression-test/data/variant_p0/predefine/test_all_prdefine_type_to_sparse.out
b/regression-test/data/variant_p0/predefine/test_all_prdefine_type_to_sparse.out
index 531a7c5a6bb..ee534654715 100644
Binary files
a/regression-test/data/variant_p0/predefine/test_all_prdefine_type_to_sparse.out
and
b/regression-test/data/variant_p0/predefine/test_all_prdefine_type_to_sparse.out
differ
diff --git a/regression-test/suites/variant_p0/desc.groovy
b/regression-test/suites/variant_p0/desc.groovy
index 390ceb06b72..bfa3d8c92e2 100644
--- a/regression-test/suites/variant_p0/desc.groovy
+++ b/regression-test/suites/variant_p0/desc.groovy
@@ -15,263 +15,246 @@
// specific language governing permissions and limitations
// under the License.
-// TODO(lihangyu) fix this
-/// suite("regression_test_variant_desc", "p0"){
-/// // if (isCloudMode()) {
-/// // return
-/// // }
-///
-/// def load_json_data = {table_name, file_name ->
-/// // load the json data
-/// streamLoad {
-/// table "${table_name}"
-///
-/// // set http request header params
-/// set 'read_json_by_line', 'true'
-/// set 'format', 'json'
-/// set 'max_filter_ratio', '0.1'
-/// file file_name // import json file
-/// time 10000 // limit inflight 10s
-///
-/// // if declared a check callback, the default check condition
will ignore.
-/// // So you must check all condition
-///
-/// check { result, exception, startTime, endTime ->
-/// if (exception != null) {
-/// throw exception
-/// }
-/// logger.info("Stream load ${file_name} result:
${result}".toString())
-/// def json = parseJson(result)
-/// assertEquals("success", json.Status.toLowerCase())
-/// // assertEquals(json.NumberTotalRows,
json.NumberLoadedRows + json.NumberUnselectedRows)
-/// assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
-/// }
-/// }
-/// }
-///
-/// def create_table = { table_name, buckets="1" ->
-/// sql "DROP TABLE IF EXISTS ${table_name}"
-/// sql """
-/// CREATE TABLE IF NOT EXISTS ${table_name} (
-/// k bigint,
-/// v variant
-/// )
-/// DUPLICATE KEY(`k`)
-/// DISTRIBUTED BY HASH(k) BUCKETS ${buckets}
-/// properties("replication_num" = "1", "disable_auto_compaction"
= "false");
-/// """
-/// }
-///
-/// def create_table_partition = { table_name, buckets="1" ->
-/// sql "DROP TABLE IF EXISTS ${table_name}"
-/// sql """
-/// CREATE TABLE IF NOT EXISTS ${table_name} (
-/// k bigint,
-/// v variant
-/// )
-/// DUPLICATE KEY(`k`)
-/// PARTITION BY RANGE(k)
-/// (
-/// PARTITION p1 VALUES LESS THAN (3000),
-/// PARTITION p2 VALUES LESS THAN (50000),
-/// PARTITION p3 VALUES LESS THAN (100000)
-/// )
-/// DISTRIBUTED BY HASH(k) BUCKETS ${buckets}
-/// properties("replication_num" = "1", "disable_auto_compaction"
= "false");
-/// """
-/// }
-///
-/// def set_be_config = { key, value ->
-/// // String backend_id;
-/// def backendId_to_backendIP = [:]
-/// def backendId_to_backendHttpPort = [:]
-/// getBackendIpHttpPort(backendId_to_backendIP,
backendId_to_backendHttpPort);
-///
-/// // backend_id = backendId_to_backendIP.keySet()[0]
-/// for (backend_id in backendId_to_backendIP.keySet()) {
-/// def (code, out, err) =
update_be_config(backendId_to_backendIP.get(backend_id),
backendId_to_backendHttpPort.get(backend_id), key, value)
-/// logger.info("update config: code=" + code + ", out=" + out +
", err=" + err)
-/// }
-/// }
-///
-/// try {
-/// // sparse columns
-/// def table_name = "sparse_columns"
-/// create_table table_name
-/// sql """set describe_extend_variant_column = true"""
-/// sql """insert into sparse_columns select 0, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}' as json_str
-/// union all select 0, '{"a": 1123}' as json_str union all
select 0, '{"a" : 1234, "xxxx" : "kaana"}' as json_str from numbers("number" =
"4096") limit 4096 ;"""
-/// // select for sync rowsets
-/// sql "select * from sparse_columns limit 1"
-/// qt_sql_1 """desc ${table_name}"""
-/// sql "truncate table sparse_columns"
-/// sql """insert into sparse_columns select 0, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}' as json_str
-/// union all select 0, '{"a" : 1234, "xxxx" : "kaana", "ddd" :
{"aaa" : 123, "mxmxm" : [456, "789"]}}' as json_str from numbers("number" =
"4096") limit 4096 ;"""
-/// sql "select * from sparse_columns limit 1"
-/// qt_sql_2 """desc ${table_name}"""
-/// sql "truncate table sparse_columns"
-///
-/// // no sparse columns
-/// table_name = "no_sparse_columns"
-/// create_table.call(table_name, "4")
-/// sql "set enable_two_phase_read_opt = false;"
-/// sql """insert into ${table_name} select 0, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}' as json_str
-/// union all select 0, '{"a": 1123}' as json_str union all
select 0, '{"a" : 1234, "xxxx" : "kaana"}' as json_str from numbers("number" =
"4096") limit 4096 ;"""
-/// sql "select * from no_sparse_columns limit 1"
-/// qt_sql_3 """desc ${table_name}"""
-/// sql "truncate table ${table_name}"
-///
-/// // partititon
-/// table_name = "partition_data"
-/// create_table_partition.call(table_name, "4")
-/// sql "set enable_two_phase_read_opt = false;"
-/// sql """insert into ${table_name} select 2500, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}' as json_str
-/// union all select 2500, '{"a" : 1234, "xxxx" : "kaana", "ddd"
: {"aaa" : 123, "mxmxm" : [456, "789"]}}' as json_str from numbers("number" =
"4096") limit 4096 ;"""
-/// sql """insert into ${table_name} select 45000, '{"a": 11245, "b"
: [123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}' as json_str
-/// union all select 45000, '{"a": 1123}' as json_str union all
select 45000, '{"a" : 1234, "xxxx" : "kaana"}' as json_str from
numbers("number" = "4096") limit 4096 ;"""
-/// sql """insert into ${table_name} values(95000, '{"a": 11245, "b"
: [123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}')"""
-/// sql "select * from partition_data limit 1"
-/// qt_sql_6_1 """desc ${table_name} partition p1"""
-/// qt_sql_6_2 """desc ${table_name} partition p2"""
-/// qt_sql_6_3 """desc ${table_name} partition p3"""
-/// qt_sql_6 """desc ${table_name}"""
-/// sql "truncate table ${table_name}"
-///
-/// // drop partition
-/// table_name = "drop_partition"
-/// create_table_partition.call(table_name, "4")
-/// // insert into partition p1
-/// sql """insert into ${table_name} values(2500, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}')"""
-/// // insert into partition p2
-/// sql """insert into ${table_name} values(45000, '{"a": 11245,
"xxxx" : "kaana"}')"""
-/// // insert into partition p3
-/// sql """insert into ${table_name} values(95000, '{"a": 11245, "b"
: [123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}')"""
-/// // drop p1
-/// sql """alter table ${table_name} drop partition p1"""
-/// sql "select * from drop_partition limit 1"
-/// qt_sql_7 """desc ${table_name}"""
-/// qt_sql_7_1 """desc ${table_name} partition p2"""
-/// qt_sql_7_2 """desc ${table_name} partition p3"""
-/// qt_sql_7_3 """desc ${table_name} partition (p2, p3)"""
-/// sql "truncate table ${table_name}"
-///
-/// // more variant
-/// table_name = "more_variant_table"
-/// sql """
-/// CREATE TABLE IF NOT EXISTS ${table_name} (
-/// k bigint,
-/// v1 variant,
-/// v2 variant,
-/// v3 variant
-/// )
-/// DUPLICATE KEY(`k`)
-/// DISTRIBUTED BY HASH(k) BUCKETS 5
-/// properties("replication_num" = "1", "disable_auto_compaction"
= "false");
-/// """
-/// sql """ insert into ${table_name} values (0, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}', '{"a": 11245, "xxxx" :
"kaana"}', '{"a": 11245, "b" : [123, {"xx" : 1}], "c" : {"c" : 456, "d" : null,
"e" : 7.111}}')"""
-/// sql "select * from ${table_name} limit 1"
-/// qt_sql_8 """desc ${table_name}"""
-/// sql "truncate table ${table_name}"
-///
-/// // describe_extend_variant_column = false
-/// sql """set describe_extend_variant_column = false"""
-/// table_name = "no_extend_variant_column"
-/// sql """
-/// CREATE TABLE IF NOT EXISTS ${table_name} (
-/// k bigint,
-/// v variant
-/// )
-/// DUPLICATE KEY(`k`)
-/// DISTRIBUTED BY HASH(k) BUCKETS 5
-/// properties("replication_num" = "1", "disable_auto_compaction"
= "false");
-/// """
-/// sql """ insert into ${table_name} values (0, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}')"""
-/// sql "select * from ${table_name} limit 1"
-/// qt_sql_9 """desc ${table_name}"""
-/// sql """set describe_extend_variant_column = true"""
-/// qt_sql_9_1 """desc ${table_name}"""
-/// sql "truncate table ${table_name}"
-///
-/// // schema change: add varaint
-/// table_name = "schema_change_table"
-/// create_table.call(table_name, "5")
-/// // add, drop columns
-/// sql """INSERT INTO ${table_name} values(0, '{"k1":1, "k2": "hello
world", "k3" : [1234], "k4" : 1.10000, "k5" : [[123]]}')"""
-/// sql "select * from ${table_name} limit 1"
-/// sql """set describe_extend_variant_column = true"""
-/// qt_sql_10 """desc ${table_name}"""
-/// // add column
-/// sql "alter table ${table_name} add column v2 variant default null"
-/// sql """ insert into ${table_name} values (0, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}',
-/// '{"a": 1123, "b" : [123, {"xx" : 1}], "c" : {"c" : 456,
"d" : null, "e" : 7.111}, "zzz" : null, "oooo" : {"akakaka" : null, "xxxx" :
{"xxx" : 123}}}')"""
-/// sql "select * from ${table_name} limit 1"
-/// qt_sql_10_1 """desc ${table_name}"""
-/// // drop cloumn
-/// sql "alter table ${table_name} drop column v2"
-/// qt_sql_10_2 """desc ${table_name}"""
-/// // add column
-/// sql "alter table ${table_name} add column v3 variant default null"
-/// sql """ insert into ${table_name} values (0, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}',
-/// '{"a": 1123, "b" : [123, {"xx" : 1}], "c" : {"c" :
456, "d" : null, "e" : 7.111}, "zzz" : null, "oooo" : {"akakaka" : null, "xxxx"
: {"xxx" : 123}}}')"""
-/// sql "select * from ${table_name} limit 1"
-/// qt_sql_10_3 """desc ${table_name}"""
-/// //sql "truncate table ${table_name}"
-///
-/// // varaint column name: chinese name, unicode
-/// table_name = "chinese_table"
-/// sql """
-/// CREATE TABLE IF NOT EXISTS ${table_name} (
-/// k bigint,
-/// v variant
-/// )
-/// DUPLICATE KEY(`k`)
-/// DISTRIBUTED BY HASH(k) BUCKETS 5
-/// properties("replication_num" = "1", "disable_auto_compaction"
= "false");
-/// """
-/// sql """ insert into ${table_name} values (0, '{"名字" : "jack",
"!@#^&*()": "11111", "金额" : 200, "画像" : {"地址" : "北京", "\\\u4E2C\\\u6587":
"unicode"}}')"""
-/// sql """set describe_extend_variant_column = true"""
-/// sql "select * from ${table_name} limit 1"
-/// qt_sql_11 """desc ${table_name}"""
-///
-/// // varaint subcolumn: empty
-/// table_name = "no_subcolumn_table"
-/// sql """
-/// CREATE TABLE IF NOT EXISTS ${table_name} (
-/// k bigint,
-/// v variant
-/// )
-/// DUPLICATE KEY(`k`)
-/// DISTRIBUTED BY HASH(k) BUCKETS 5
-/// properties("replication_num" = "1", "disable_auto_compaction"
= "false");
-/// """
-/// sql """ insert into ${table_name} values (0, '{}')"""
-/// sql """ insert into ${table_name} values (0, '100')"""
-/// sql """set describe_extend_variant_column = true"""
-/// sql "select * from ${table_name} limit 1"
-/// qt_sql_12 """desc ${table_name}"""
-///
-///
-/// // desc with large tablets
-/// table_name = "large_tablets"
-/// create_table_partition.call(table_name, "200")
-/// sql """insert into large_tablets values (1, '{"a" : 10}')"""
-/// sql """insert into large_tablets values (3001, '{"b" : 10}')"""
-/// sql """insert into large_tablets values (50001, '{"c" : 10}')"""
-/// sql """insert into large_tablets values (99999, '{"d" : 10}')"""
-/// sql "select * from ${table_name} limit 1"
-/// sql """set max_fetch_remote_schema_tablet_count = 2"""
-/// sql "desc large_tablets"
-/// sql """set max_fetch_remote_schema_tablet_count = 128"""
-/// sql "desc large_tablets"
-/// sql """set max_fetch_remote_schema_tablet_count = 512"""
-/// sql "desc large_tablets"
-/// sql """set max_fetch_remote_schema_tablet_count = 2048"""
-/// qt_sql15 "desc large_tablets"
-///
-/// sql "truncate table large_tablets"
-/// sql "desc large_tablets"
-/// } finally {
-/// // reset flags
-/// }
-/// }
-///
\ No newline at end of file
+suite("regression_test_variant_desc", "p0"){
+ def load_json_data = {table_name, file_name ->
+ // load the json data
+ streamLoad {
+ table "${table_name}"
+
+ // set http request header params
+ set 'read_json_by_line', 'true'
+ set 'format', 'json'
+ set 'max_filter_ratio', '0.1'
+ file file_name // import json file
+ time 10000 // limit inflight 10s
+
+ // if declared a check callback, the default check condition will
ignore.
+ // So you must check all condition
+
+ check { result, exception, startTime, endTime ->
+ if (exception != null) {
+ throw exception
+ }
+ logger.info("Stream load ${file_name} result:
${result}".toString())
+ def json = parseJson(result)
+ assertEquals("success", json.Status.toLowerCase())
+ // assertEquals(json.NumberTotalRows, json.NumberLoadedRows +
json.NumberUnselectedRows)
+ assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
+ }
+ }
+ }
+
+ def create_table = { table_name, buckets="1" ->
+ sql "DROP TABLE IF EXISTS ${table_name}"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v variant
+ )
+ DUPLICATE KEY(`k`)
+ DISTRIBUTED BY HASH(k) BUCKETS ${buckets}
+ properties("replication_num" = "1", "disable_auto_compaction" =
"false");
+ """
+ }
+
+ def create_table_partition = { table_name, buckets="1" ->
+ sql "DROP TABLE IF EXISTS ${table_name}"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v variant
+ )
+ DUPLICATE KEY(`k`)
+ PARTITION BY RANGE(k)
+ (
+ PARTITION p1 VALUES LESS THAN (3000),
+ PARTITION p2 VALUES LESS THAN (50000),
+ PARTITION p3 VALUES LESS THAN (100000)
+ )
+ DISTRIBUTED BY HASH(k) BUCKETS ${buckets}
+ properties("replication_num" = "1", "disable_auto_compaction" =
"false");
+ """
+ }
+
+ try {
+ sql """set default_variant_max_subcolumns_count = 2"""
+ // sparse columns
+ def table_name = "sparse_columns"
+ create_table table_name
+ sql """set describe_extend_variant_column = true"""
+ sql """insert into sparse_columns select 0, '{"a": 11245, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}' as json_str
+ union all select 0, '{"a": 1123}' as json_str union all select 0,
'{"a" : 1234, "xxxx" : "kaana"}' as json_str from numbers("number" = "4096")
limit 4096 ;"""
+ // select for sync rowsets
+ sql "select * from sparse_columns limit 1"
+ qt_sql_1 """desc ${table_name}"""
+ sql "truncate table sparse_columns"
+ sql """insert into sparse_columns select 0, '{"a": 1123, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null, "oooo" :
{"akakaka" : null, "xxxx" : {"xxx" : 123}}}' as json_str
+ union all select 0, '{"a" : 1234, "xxxx" : "kaana", "ddd" :
{"aaa" : 123, "mxmxm" : [456, "789"]}}' as json_str from numbers("number" =
"4096") limit 4096 ;"""
+ sql "select * from sparse_columns limit 1"
+ qt_sql_2 """desc ${table_name}"""
+ sql "truncate table sparse_columns"
+
+ sql """set default_variant_max_subcolumns_count = 0"""
+ // no sparse columns
+ table_name = "no_sparse_columns"
+ create_table.call(table_name, "4")
+ sql "set enable_two_phase_read_opt = false;"
+ sql """insert into ${table_name} select 0, '{"a": 11245, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}' as json_str
+ union all select 0, '{"a": 1123}' as json_str union all select 0,
'{"a" : 1234, "xxxx" : "kaana"}' as json_str from numbers("number" = "4096")
limit 4096 ;"""
+ sql "select * from no_sparse_columns limit 1"
+ qt_sql_3 """desc ${table_name}"""
+ sql "truncate table ${table_name}"
+
+ // partititon
+ table_name = "partition_data"
+ create_table_partition.call(table_name, "4")
+ sql "set enable_two_phase_read_opt = false;"
+ sql """insert into ${table_name} select 2500, '{"a": 1123, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null,
"oooo" : {"akakaka" : null, "xxxx" : {"xxx" : 123}}}' as json_str
+ union all select 2500, '{"a" : 1234, "xxxx" : "kaana", "ddd" :
{"aaa" : 123, "mxmxm" : [456, "789"]}}' as json_str from numbers("number" =
"4096") limit 4096 ;"""
+ sql """insert into ${table_name} select 45000, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}' as json_str
+ union all select 45000, '{"a": 1123}' as json_str union all
select 45000, '{"a" : 1234, "xxxx" : "kaana"}' as json_str from
numbers("number" = "4096") limit 4096 ;"""
+ sql """insert into ${table_name} values(95000, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}')"""
+ sql "select * from partition_data limit 1"
+ qt_sql_6_1 """desc ${table_name} partition p1"""
+ qt_sql_6_2 """desc ${table_name} partition p2"""
+ qt_sql_6_3 """desc ${table_name} partition p3"""
+ qt_sql_6 """desc ${table_name}"""
+ sql "truncate table ${table_name}"
+
+ // drop partition
+ table_name = "drop_partition"
+ create_table_partition.call(table_name, "4")
+ // insert into partition p1
+ sql """insert into ${table_name} values(2500, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}')"""
+ // insert into partition p2
+ sql """insert into ${table_name} values(45000, '{"a": 11245, "xxxx" :
"kaana"}')"""
+ // insert into partition p3
+ sql """insert into ${table_name} values(95000, '{"a": 11245, "b" :
[123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}}')"""
+ // drop p1
+ sql """alter table ${table_name} drop partition p1"""
+ sql "select * from drop_partition limit 1"
+ qt_sql_7 """desc ${table_name}"""
+ qt_sql_7_1 """desc ${table_name} partition p2"""
+ qt_sql_7_2 """desc ${table_name} partition p3"""
+ qt_sql_7_3 """desc ${table_name} partition (p2, p3)"""
+ sql "truncate table ${table_name}"
+
+ // more variant
+ table_name = "more_variant_table"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v1 variant,
+ v2 variant,
+ v3 variant
+ )
+ DUPLICATE KEY(`k`)
+ DISTRIBUTED BY HASH(k) BUCKETS 5
+ properties("replication_num" = "1", "disable_auto_compaction" =
"false");
+ """
+ sql """ insert into ${table_name} values (0, '{"a": 1123, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null, "oooo" :
{"akakaka" : null, "xxxx" : {"xxx" : 123}}}', '{"a": 11245, "xxxx" : "kaana"}',
'{"a": 11245, "b" : [123, {"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" :
7.111}}')"""
+ sql "select * from ${table_name} limit 1"
+ qt_sql_8 """desc ${table_name}"""
+ sql "truncate table ${table_name}"
+
+ // describe_extend_variant_column = false
+ sql """set describe_extend_variant_column = false"""
+ table_name = "no_extend_variant_column"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v variant
+ )
+ DUPLICATE KEY(`k`)
+ DISTRIBUTED BY HASH(k) BUCKETS 5
+ properties("replication_num" = "1", "disable_auto_compaction" =
"false");
+ """
+ sql """ insert into ${table_name} values (0, '{"a": 1123, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null, "oooo" :
{"akakaka" : null, "xxxx" : {"xxx" : 123}}}')"""
+ sql "select * from ${table_name} limit 1"
+ qt_sql_9 """desc ${table_name}"""
+ sql """set describe_extend_variant_column = true"""
+ qt_sql_9_1 """desc ${table_name}"""
+ sql "truncate table ${table_name}"
+
+ // schema change: add varaint
+ table_name = "schema_change_table"
+ create_table.call(table_name, "5")
+ // add, drop columns
+ sql """INSERT INTO ${table_name} values(0, '{"k1":1, "k2": "hello
world", "k3" : [1234], "k4" : 1.10000, "k5" : [[123]]}')"""
+ sql "select * from ${table_name} limit 1"
+ sql """set describe_extend_variant_column = true"""
+ qt_sql_10 """desc ${table_name}"""
+ // add column
+ sql "alter table ${table_name} add column v2 variant default null"
+ sql """ insert into ${table_name} values (0, '{"a": 1123, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null, "oooo" :
{"akakaka" : null, "xxxx" : {"xxx" : 123}}}',
+ '{"a": 1123, "b" : [123, {"xx" : 1}], "c" : {"c" : 456, "d" :
null, "e" : 7.111}, "zzz" : null, "oooo" : {"akakaka" : null, "xxxx" : {"xxx" :
123}}}')"""
+ sql "select * from ${table_name} limit 1"
+ qt_sql_10_1 """desc ${table_name}"""
+ // drop cloumn
+ sql "alter table ${table_name} drop column v2"
+ qt_sql_10_2 """desc ${table_name}"""
+ // add column
+ sql "alter table ${table_name} add column v3 variant default null"
+ sql """ insert into ${table_name} values (0, '{"a": 1123, "b" : [123,
{"xx" : 1}], "c" : {"c" : 456, "d" : null, "e" : 7.111}, "zzz" : null, "oooo" :
{"akakaka" : null, "xxxx" : {"xxx" : 123}}}',
+ '{"a": 1123, "b" : [123, {"xx" : 1}], "c" : {"c" : 456,
"d" : null, "e" : 7.111}, "zzz" : null, "oooo" : {"akakaka" : null, "xxxx" :
{"xxx" : 123}}}')"""
+ sql "select * from ${table_name} limit 1"
+ qt_sql_10_3 """desc ${table_name}"""
+ //sql "truncate table ${table_name}"
+
+ // varaint column name: chinese name, unicode
+ table_name = "chinese_table"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v variant
+ )
+ DUPLICATE KEY(`k`)
+ DISTRIBUTED BY HASH(k) BUCKETS 5
+ properties("replication_num" = "1", "disable_auto_compaction" =
"false");
+ """
+ sql """ insert into ${table_name} values (0, '{"名字" : "jack",
"!@#^&*()": "11111", "金额" : 200, "画像" : {"地址" : "北京", "\\\u4E2C\\\u6587":
"unicode"}}')"""
+ sql """set describe_extend_variant_column = true"""
+ sql "select * from ${table_name} limit 1"
+ qt_sql_11 """desc ${table_name}"""
+
+ // varaint subcolumn: empty
+ table_name = "no_subcolumn_table"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v variant
+ )
+ DUPLICATE KEY(`k`)
+ DISTRIBUTED BY HASH(k) BUCKETS 5
+ properties("replication_num" = "1", "disable_auto_compaction" =
"false");
+ """
+ sql """ insert into ${table_name} values (0, '{}')"""
+ sql """ insert into ${table_name} values (0, '100')"""
+ sql """set describe_extend_variant_column = true"""
+ sql "select * from ${table_name} limit 1"
+ qt_sql_12 """desc ${table_name}"""
+
+
+ // desc with large tablets
+ table_name = "large_tablets"
+ create_table_partition.call(table_name, "200")
+ sql """insert into large_tablets values (1, '{"a" : 10}')"""
+ sql """insert into large_tablets values (3001, '{"b" : 10}')"""
+ sql """insert into large_tablets values (50001, '{"c" : 10}')"""
+ sql """insert into large_tablets values (99999, '{"d" : 10}')"""
+ sql "select * from ${table_name} limit 1"
+ sql """set max_fetch_remote_schema_tablet_count = 2"""
+ sql "desc large_tablets"
+ sql """set max_fetch_remote_schema_tablet_count = 128"""
+ sql "desc large_tablets"
+ sql """set max_fetch_remote_schema_tablet_count = 512"""
+ sql "desc large_tablets"
+ sql """set max_fetch_remote_schema_tablet_count = 2048"""
+ qt_sql15 "desc large_tablets"
+
+ sql "truncate table large_tablets"
+ sql "desc large_tablets"
+ } finally {
+ // reset flags
+ }
+}
diff --git a/regression-test/suites/variant_p0/load.groovy
b/regression-test/suites/variant_p0/load.groovy
index 5bec42b3b5e..96a33f9bca0 100644
--- a/regression-test/suites/variant_p0/load.groovy
+++ b/regression-test/suites/variant_p0/load.groovy
@@ -443,12 +443,6 @@ suite("regression_test_variant", "p0"){
sql """insert into var_as_key values(2, '{"b" : 11}')"""
qt_sql "select * from var_as_key order by k"
- // TODO(lihangyu): fix this test
- // test {
- // sql """select * from ghdata where cast(v['actor']['url'] as
ipv4) = '127.0.0.1'"""
- // exception("Invalid type for variant column: 36")
- // }
-
if (!isCloudMode()) {
test {
sql """
diff --git a/regression-test/suites/variant_p0/nested.groovy
b/regression-test/suites/variant_p0/nested.groovy
index a0087e07374..a0beed45c69 100644
--- a/regression-test/suites/variant_p0/nested.groovy
+++ b/regression-test/suites/variant_p0/nested.groovy
@@ -28,7 +28,7 @@ suite("regression_test_variant_nested", "p0"){
sql """
CREATE TABLE IF NOT EXISTS ${table_name} (
k bigint,
- v variant
+ v variant <properties("variant_max_subcolumns_count" =
"3")>
)
DUPLICATE KEY(`k`)
DISTRIBUTED BY HASH(k) BUCKETS 4
@@ -69,7 +69,6 @@ suite("regression_test_variant_nested", "p0"){
"""
sql """select * from var_nested limit 1"""
sql """set describe_extend_variant_column = true"""
- // TODO(lihangyu) fix this
qt_sql """DESC var_nested"""
qt_sql """
select * from var_nested order by k limit 101
@@ -111,7 +110,7 @@
parallel_pipeline_task_num=7,profile_level=1,enable_pipeline_engine=true,enable_
sql """
CREATE TABLE IF NOT EXISTS var_nested2 (
k bigint,
- v variant
+ v variant <properties("variant_max_subcolumns_count" =
"3")>
)
UNIQUE KEY(`k`)
DISTRIBUTED BY HASH(k) BUCKETS 1
@@ -133,7 +132,7 @@ where phone_numbers['type'] = 'GSM' OR
phone_numbers['type'] = 'HOME' and phone_
sql """
CREATE TABLE IF NOT EXISTS var_nested_array_agg(
k bigint,
- v variant
+ v variant <properties("variant_max_subcolumns_count" =
"3")>
)
UNIQUE KEY(`k`)
DISTRIBUTED BY HASH(k) BUCKETS 1
@@ -150,7 +149,7 @@ where phone_numbers['type'] = 'GSM' OR
phone_numbers['type'] = 'HOME' and phone_
sql """
CREATE TABLE IF NOT EXISTS
var_nested_explode_variant_with_abnomal(
k bigint,
- v variant
+ v variant <properties("variant_max_subcolumns_count" =
"3")>
)
UNIQUE KEY(`k`)
DISTRIBUTED BY HASH(k) BUCKETS 1
diff --git a/regression-test/suites/variant_p0/nested2.groovy
b/regression-test/suites/variant_p0/nested2.groovy
index 67e351410c2..853234c7286 100644
--- a/regression-test/suites/variant_p0/nested2.groovy
+++ b/regression-test/suites/variant_p0/nested2.groovy
@@ -15,147 +15,146 @@
// specific language governing permissions and limitations
// under the License.
-// TODO(lihangyu): need to be fixed
-// // this test is used to test the type conflict of nested array
-// suite("variant_nested_type_conflict", "p0"){
-//
-// try {
-//
-// def table_name = "var_nested_type_conflict"
-// sql "DROP TABLE IF EXISTS ${table_name}"
-// sql """set describe_extend_variant_column = true"""
-//
-// sql """ set disable_variant_flatten_nested = false """
-// sql """
-// CREATE TABLE IF NOT EXISTS ${table_name} (
-// k bigint,
-// v variant
-// )
-// DUPLICATE KEY(`k`)
-// DISTRIBUTED BY HASH(k) BUCKETS 1 -- 1 bucket make really
compaction in conflict case
-// properties("replication_num" = "1",
"disable_auto_compaction" = "false", "variant_enable_flatten_nested" = "true");
-// """
-// def sql_select_batch = {
-// qt_sql_0 """select * from ${table_name} order by k"""
-//
-// qt_sql_1 """select v['nested']['a'] from ${table_name} order by
k"""
-// qt_sql_2 """select v['nested']['b'] from ${table_name} order by
k"""
-// qt_sql_3 """select v['nested']['c'] from ${table_name} order by
k"""
-//
-// qt_sql_4 """select v['nested'] from ${table_name} order by k"""
-// }
-//
-// def sql_test_cast_to_array = {
-// // test cast to array<int>
-// qt_sql_8 """select cast(v['nested']['a'] as array<int>),
size(cast(v['nested']['a'] as array<int>)) from ${table_name} order by k"""
-// qt_sql_9 """select cast(v['nested']['b'] as array<int>),
size(cast(v['nested']['b'] as array<int>)) from ${table_name} order by k"""
-// qt_sql_10 """select cast(v['nested']['c'] as array<int>),
size(cast(v['nested']['c'] as array<int>)) from ${table_name} order by k"""
-//
-// // test cast to array<string>
-// qt_sql_11 """select cast(v['nested']['a'] as array<string>),
size(cast(v['nested']['a'] as array<string>)) from ${table_name} order by k"""
-// qt_sql_12 """select cast(v['nested']['b'] as array<string>),
size(cast(v['nested']['b'] as array<string>)) from ${table_name} order by k"""
-// qt_sql_13 """select cast(v['nested']['c'] as array<string>),
size(cast(v['nested']['c'] as array<string>)) from ${table_name} order by k"""
-//
-// // test cast to array<double>
-// qt_sql_14 """select cast(v['nested']['a'] as array<double>),
size(cast(v['nested']['a'] as array<double>)) from ${table_name} order by k"""
-// qt_sql_15 """select cast(v['nested']['b'] as array<double>),
size(cast(v['nested']['b'] as array<double>)) from ${table_name} order by k"""
-// qt_sql_16 """select cast(v['nested']['c'] as array<double>),
size(cast(v['nested']['c'] as array<double>)) from ${table_name} order by k"""
-//
-// }
-// // insert Nested array in Nested array which is not supported
-// test {
-// sql """
-// insert into ${table_name} values (1, '{"nested": [{"a":
[1,2,3]}]}');
-// """
-// exception "Nesting of array in Nested array within variant
subcolumns is currently not supported."
-// }
-// // insert batch different structure in same path
-// test {
-// sql """
-// insert into ${table_name} values (3, '{"nested": [{"a":
2.5, "b": "123.1"}]}'), (4, '{"nested": {"a": 2.5, "b": "123.1"}}');
-// """
-// exception "Ambiguous paths"
-// }
-// /// insert a array of object for a, b, c
-// // insert type conflict in multiple rows
-// sql """
-// insert into ${table_name} values (1, '{"nested": [{"a": 1, "c":
1.1}, {"b": "1"}]}');
-// """
-//
-// // for cloud we should select first and then desc for syncing
rowset to get latest schema
-// sql """
-// select * from ${table_name} order by k limit 1;
-// """
-// qt_sql_desc_1 """
-// desc ${table_name};
-// """
-// // now select for a, b, c
-// sql_select_batch()
-// sql_test_cast_to_array()
-// /// insert a, b type changed to double
-// sql """
-// insert into ${table_name} values (2, '{"nested": [{"a": 2.5,
"b": 123.1}]}');
-// """
-// // for cloud we should select first and then desc for syncing
rowset to get latest schema
-// sql """
-// select * from ${table_name} order by k limit 1;
-// """
-// qt_sql_desc_2 """
-// desc ${table_name};
-// """
-// // now select for a, b, c
-// sql_select_batch()
-// sql_test_cast_to_array()
-//
-// // trigger and wait compaction
-// trigger_and_wait_compaction("${table_name}", "full")
-//
-// // now select for a, b, c
-// sql_select_batch()
-// sql_test_cast_to_array()
-//
-// sql """ truncate table ${table_name} """
-//
-//
-// // insert type conflict in one row
-// sql """
-// insert into ${table_name} values (1, '{"nested": [{"a": 1, "b":
1.1}, {"a": "1", "b": "1", "c": "1"}]}');
-// """
-// // for cloud we should select first and then desc for syncing
rowset to get latest schema
-// sql """
-// select * from ${table_name} order by k limit 1;
-// """
-// qt_sql_desc_4 """
-// desc ${table_name};
-// """
-// // now select for a, b, c
-// sql_select_batch()
-// sql_test_cast_to_array()
-//
-// // insert c type changed to double
-// sql """
-// insert into ${table_name} values (2, '{"nested": [{"a": 1, "c":
1.1}]}');
-// """
-// // for cloud we should select first and then desc for syncing
rowset to get latest schema
-// sql """
-// select * from ${table_name} order by k limit 1;
-// """
-// qt_sql_desc_5 """
-// desc ${table_name};
-// """
-// // now select for a, b, c
-// sql_select_batch()
-// sql_test_cast_to_array()
-//
-// // trigger and wait compaction
-// trigger_and_wait_compaction("${table_name}", "full")
-//
-// // now select for a, b, c
-// sql_select_batch()
-// sql_test_cast_to_array()
-//
-// } finally {
-// }
-//
-// }
-//
\ No newline at end of file
+// this test is used to test the type conflict of nested array
+suite("variant_nested_type_conflict", "p0"){
+
+ try {
+
+ def table_name = "var_nested_type_conflict"
+ sql "DROP TABLE IF EXISTS ${table_name}"
+ sql """set describe_extend_variant_column = true"""
+
+ sql """ set enable_variant_flatten_nested = true """
+ sql "set default_variant_max_subcolumns_count = 0"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${table_name} (
+ k bigint,
+ v variant
+ )
+ DUPLICATE KEY(`k`)
+ DISTRIBUTED BY HASH(k) BUCKETS 1 -- 1 bucket make really
compaction in conflict case
+ properties("replication_num" = "1", "disable_auto_compaction"
= "false", "variant_enable_flatten_nested" = "true");
+ """
+ def sql_select_batch = {
+ qt_sql_0 """select * from ${table_name} order by k"""
+
+ qt_sql_1 """select v['nested']['a'] from ${table_name} order by
k"""
+ qt_sql_2 """select v['nested']['b'] from ${table_name} order by
k"""
+ qt_sql_3 """select v['nested']['c'] from ${table_name} order by
k"""
+
+ qt_sql_4 """select v['nested'] from ${table_name} order by k"""
+ }
+
+ def sql_test_cast_to_array = {
+ // test cast to array<int>
+ qt_sql_8 """select cast(v['nested']['a'] as array<int>),
size(cast(v['nested']['a'] as array<int>)) from ${table_name} order by k"""
+ qt_sql_9 """select cast(v['nested']['b'] as array<int>),
size(cast(v['nested']['b'] as array<int>)) from ${table_name} order by k"""
+ qt_sql_10 """select cast(v['nested']['c'] as array<int>),
size(cast(v['nested']['c'] as array<int>)) from ${table_name} order by k"""
+
+ // test cast to array<string>
+ qt_sql_11 """select cast(v['nested']['a'] as array<string>),
size(cast(v['nested']['a'] as array<string>)) from ${table_name} order by k"""
+ qt_sql_12 """select cast(v['nested']['b'] as array<string>),
size(cast(v['nested']['b'] as array<string>)) from ${table_name} order by k"""
+ qt_sql_13 """select cast(v['nested']['c'] as array<string>),
size(cast(v['nested']['c'] as array<string>)) from ${table_name} order by k"""
+
+ // test cast to array<double>
+ qt_sql_14 """select cast(v['nested']['a'] as array<double>),
size(cast(v['nested']['a'] as array<double>)) from ${table_name} order by k"""
+ qt_sql_15 """select cast(v['nested']['b'] as array<double>),
size(cast(v['nested']['b'] as array<double>)) from ${table_name} order by k"""
+ qt_sql_16 """select cast(v['nested']['c'] as array<double>),
size(cast(v['nested']['c'] as array<double>)) from ${table_name} order by k"""
+
+ }
+ // insert Nested array in Nested array which is not supported
+ test {
+ sql """
+ insert into ${table_name} values (1, '{"nested": [{"a":
[1,2,3]}]}');
+ """
+ exception "Nesting of array in Nested array within variant
subcolumns is currently not supported."
+ }
+ // insert batch different structure in same path
+ test {
+ sql """
+ insert into ${table_name} values (3, '{"nested": [{"a": 2.5,
"b": "123.1"}]}'), (4, '{"nested": {"a": 2.5, "b": "123.1"}}');
+ """
+ exception "Ambiguous paths"
+ }
+ /// insert a array of object for a, b, c
+ // insert type conflict in multiple rows
+ sql """
+ insert into ${table_name} values (1, '{"nested": [{"a": 1, "c":
1.1}, {"b": "1"}]}');
+ """
+
+ // for cloud we should select first and then desc for syncing rowset
to get latest schema
+ sql """
+ select * from ${table_name} order by k limit 1;
+ """
+ qt_sql_desc_1 """
+ desc ${table_name};
+ """
+ // now select for a, b, c
+ sql_select_batch()
+ sql_test_cast_to_array()
+ /// insert a, b type changed to double
+ sql """
+ insert into ${table_name} values (2, '{"nested": [{"a": 2.5, "b":
123.1}]}');
+ """
+ // for cloud we should select first and then desc for syncing rowset
to get latest schema
+ sql """
+ select * from ${table_name} order by k limit 1;
+ """
+ qt_sql_desc_2 """
+ desc ${table_name};
+ """
+ // now select for a, b, c
+ sql_select_batch()
+ sql_test_cast_to_array()
+
+ // trigger and wait compaction
+ trigger_and_wait_compaction("${table_name}", "full")
+
+ // now select for a, b, c
+ sql_select_batch()
+ sql_test_cast_to_array()
+
+ sql """ truncate table ${table_name} """
+
+
+ // insert type conflict in one row
+ sql """
+ insert into ${table_name} values (1, '{"nested": [{"a": 1, "b":
1.1}, {"a": "1", "b": "1", "c": "1"}]}');
+ """
+ // for cloud we should select first and then desc for syncing rowset
to get latest schema
+ sql """
+ select * from ${table_name} order by k limit 1;
+ """
+ qt_sql_desc_4 """
+ desc ${table_name};
+ """
+ // now select for a, b, c
+ sql_select_batch()
+ sql_test_cast_to_array()
+
+ // insert c type changed to double
+ sql """
+ insert into ${table_name} values (2, '{"nested": [{"a": 1, "c":
1.1}]}');
+ """
+ // for cloud we should select first and then desc for syncing rowset
to get latest schema
+ sql """
+ select * from ${table_name} order by k limit 1;
+ """
+ qt_sql_desc_5 """
+ desc ${table_name};
+ """
+ // now select for a, b, c
+ sql_select_batch()
+ sql_test_cast_to_array()
+
+ // trigger and wait compaction
+ trigger_and_wait_compaction("${table_name}", "full")
+
+ // now select for a, b, c
+ sql_select_batch()
+ sql_test_cast_to_array()
+
+ } finally {
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]