This is an automated email from the ASF dual-hosted git repository.

jiacai2050 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/horaedb.git


The following commit(s) were added to refs/heads/main by this push:
     new 2c9cd1a1 fix: disable layered memtable in overwrite mode (#1533)
2c9cd1a1 is described below

commit 2c9cd1a1ebbd5f478cc364de29ecebf4ac492329
Author: kamille <[email protected]>
AuthorDate: Thu Sep 5 14:03:43 2024 +0800

    fix: disable layered memtable in overwrite mode (#1533)
    
    ## Rationale
    Layered memtable is only designed for append mode table now, and it
    shouldn't be used in overwrite mode table.
    
    ## Detailed Changes
    - Make default values in config used.
    - Add `enable` field to control layered memtable's on/off.
    - Add check to prevent invalid options during table create/alter.
    - Add related it cases.
    
    ## Test Plan
    Test manually.
    
    Following cases are considered:
    
    Check and intercept the invalid table options during table create/alter
    - enable layered memtable but mutable switch threshold is 0
    - enable layered memtable for overwrite mode table
    
    Table options new field `layered_enable`'s default value when it is not
    found in pb
    - false, when whole `layered_memtable_options` not exist
    - false, when `layered_memtable_options` exist, and
    `mutable_segment_switch_threshold` == 0
    - true, when `layered_memtable_options` exist, and
    `mutable_segment_switch_threshold` > 0
---
 Cargo.lock                                         |  2 +-
 Cargo.toml                                         |  2 +-
 .../cases/common/dml/case_sensitive.result         |  4 +-
 .../cases/common/show/show_create_table.result     |  6 +-
 .../cases/env/cluster/ddl/alter_table.result       |  4 +-
 .../cases/env/cluster/ddl/create_tables.result     | 22 +++---
 .../cases/env/cluster/ddl/partition_table.result   | 12 ++--
 .../cases/env/local/ddl/alter_table.result         | 12 +++-
 .../cases/env/local/ddl/alter_table.sql            |  8 ++-
 .../cases/env/local/ddl/create_tables.result       | 66 +++++++++++++----
 .../cases/env/local/ddl/create_tables.sql          | 15 +++-
 .../env/local/ddl/sampling-primary-key.result      |  4 +-
 src/analytic_engine/src/instance/alter.rs          |  8 ++-
 src/analytic_engine/src/instance/create.rs         |  6 +-
 src/analytic_engine/src/instance/engine.rs         |  9 ++-
 src/analytic_engine/src/lib.rs                     |  5 --
 src/analytic_engine/src/memtable/mod.rs            | 46 +++++++-----
 src/analytic_engine/src/table/data.rs              | 44 ++++++++----
 src/analytic_engine/src/table_options.rs           | 84 ++++++++++++++++++----
 src/common_types/src/lib.rs                        |  3 +-
 src/system_catalog/src/sys_catalog_table.rs        |  2 +-
 src/table_engine/src/engine.rs                     | 30 +++++++-
 22 files changed, 294 insertions(+), 100 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 0a2d3a0f..94c22e7a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3211,7 +3211,7 @@ dependencies = [
 [[package]]
 name = "horaedbproto"
 version = "2.0.0"
-source = 
"git+https://github.com/apache/incubator-horaedb-proto.git?rev=19ece8f771fc0b3e8e734072cc3d8040de6c74cb#19ece8f771fc0b3e8e734072cc3d8040de6c74cb";
+source = 
"git+https://github.com/apache/incubator-horaedb-proto.git?rev=a5874d9fedee32ab1292252c4eb6defc4f6e245a#a5874d9fedee32ab1292252c4eb6defc4f6e245a";
 dependencies = [
  "prost 0.11.8",
  "protoc-bin-vendored",
diff --git a/Cargo.toml b/Cargo.toml
index c0e62ebf..2bc85e70 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -101,7 +101,7 @@ thiserror = "1"
 bytes_ext = { path = "src/components/bytes_ext" }
 catalog = { path = "src/catalog" }
 catalog_impls = { path = "src/catalog_impls" }
-horaedbproto = { git = 
"https://github.com/apache/incubator-horaedb-proto.git";, rev = 
"19ece8f771fc0b3e8e734072cc3d8040de6c74cb" }
+horaedbproto = { git = 
"https://github.com/apache/incubator-horaedb-proto.git";, rev = 
"a5874d9fedee32ab1292252c4eb6defc4f6e245a" }
 codec = { path = "src/components/codec" }
 chrono = "0.4"
 clap = { version = "4.5.1", features = ["derive"] }
diff --git a/integration_tests/cases/common/dml/case_sensitive.result 
b/integration_tests/cases/common/dml/case_sensitive.result
index c244d45a..414523e8 100644
--- a/integration_tests/cases/common/dml/case_sensitive.result
+++ b/integration_tests/cases/common/dml/case_sensitive.result
@@ -74,7 +74,7 @@ Failed to execute query, err: Server(ServerError { code: 500, 
msg: "Failed to cr
 SHOW CREATE TABLE case_SENSITIVE_table1;
 
 Table,Create Table,
-String("case_SENSITIVE_table1"),String("CREATE TABLE `case_SENSITIVE_table1` 
(`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `VALUE1` double, PRIMARY 
KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("case_SENSITIVE_table1"),String("CREATE TABLE `case_SENSITIVE_table1` 
(`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `VALUE1` double, PRIMARY 
KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_m [...]
 
 
 SHOW CREATE TABLE CASE_SENSITIVE_TABLE1;
@@ -84,7 +84,7 @@ Failed to execute query, err: Server(ServerError { code: 500, 
msg: "Failed to cr
 SHOW CREATE TABLE `case_SENSITIVE_table1`;
 
 Table,Create Table,
-String("case_SENSITIVE_table1"),String("CREATE TABLE `case_SENSITIVE_table1` 
(`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `VALUE1` double, PRIMARY 
KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("case_SENSITIVE_table1"),String("CREATE TABLE `case_SENSITIVE_table1` 
(`tsid` uint64 NOT NULL, `ts` timestamp NOT NULL, `VALUE1` double, PRIMARY 
KEY(tsid,ts), TIMESTAMP KEY(ts)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_m [...]
 
 
 SHOW CREATE TABLE `CASE_SENSITIVE_TABLE1`;
diff --git a/integration_tests/cases/common/show/show_create_table.result 
b/integration_tests/cases/common/show/show_create_table.result
index 4e42df33..efac0751 100644
--- a/integration_tests/cases/common/show/show_create_table.result
+++ b/integration_tests/cases/common/show/show_create_table.result
@@ -35,7 +35,7 @@ affected_rows: 0
 SHOW CREATE TABLE `06_show_a`;
 
 Table,Create Table,
-String("06_show_a"),String("CREATE TABLE `06_show_a` (`tsid` uint64 NOT NULL, 
`t` timestamp NOT NULL, `a` bigint, `b` int DEFAULT 3, `c` string DEFAULT 'x', 
`d` smallint, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("06_show_a"),String("CREATE TABLE `06_show_a` (`tsid` uint64 NOT NULL, 
`t` timestamp NOT NULL, `a` bigint, `b` int DEFAULT 3, `c` string DEFAULT 'x', 
`d` smallint, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='A [...]
 
 
 CREATE TABLE `06_show_b` (a bigint, b int null default null, c string, d 
smallint null, t timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic;
@@ -45,7 +45,7 @@ affected_rows: 0
 SHOW CREATE TABLE `06_show_b`;
 
 Table,Create Table,
-String("06_show_b"),String("CREATE TABLE `06_show_b` (`tsid` uint64 NOT NULL, 
`t` timestamp NOT NULL, `a` bigint, `b` int DEFAULT NULL, `c` string, `d` 
smallint, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("06_show_b"),String("CREATE TABLE `06_show_b` (`tsid` uint64 NOT NULL, 
`t` timestamp NOT NULL, `a` bigint, `b` int DEFAULT NULL, `c` string, `d` 
smallint, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', ttl 
[...]
 
 
 CREATE TABLE `06_show_c` (a int, t timestamp NOT NULL, TIMESTAMP KEY(t)) 
ENGINE = Analytic;
@@ -55,7 +55,7 @@ affected_rows: 0
 SHOW CREATE TABLE `06_show_c`;
 
 Table,Create Table,
-String("06_show_c"),String("CREATE TABLE `06_show_c` (`tsid` uint64 NOT NULL, 
`t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) 
ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("06_show_c"),String("CREATE TABLE `06_show_c` (`tsid` uint64 NOT NULL, 
`t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) 
ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size=' [...]
 
 
 DROP TABLE `06_show_a`;
diff --git a/integration_tests/cases/env/cluster/ddl/alter_table.result 
b/integration_tests/cases/env/cluster/ddl/alter_table.result
index ef18ca11..9a9eef72 100644
--- a/integration_tests/cases/env/cluster/ddl/alter_table.result
+++ b/integration_tests/cases/env/cluster/ddl/alter_table.result
@@ -102,7 +102,7 @@ affected_rows: 0
 show create table 05_alter_table_t1;
 
 Table,Create Table,
-String("05_alter_table_t1"),String("CREATE TABLE `05_alter_table_t1` (`tsid` 
uint64 NOT NULL, `t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='314572800')"),
+String("05_alter_table_t1"),String("CREATE TABLE `05_alter_table_t1` (`tsid` 
uint64 NOT NULL, `t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', wri [...]
 
 
 drop table 05_alter_table_t1;
@@ -120,7 +120,7 @@ affected_rows: 0
 show create table 05_alter_table_t1;
 
 Table,Create Table,
-String("05_alter_table_t1"),String("CREATE TABLE `05_alter_table_t1` (`tsid` 
uint64 NOT NULL, `t` timestamp NOT NULL, `sid` uint64 NOT NULL, `a` int, 
PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='10d', update_mode='OVERWRITE', write_buffer_size='314572800')"),
+String("05_alter_table_t1"),String("CREATE TABLE `05_alter_table_t1` (`tsid` 
uint64 NOT NULL, `t` timestamp NOT NULL, `sid` uint64 NOT NULL, `a` int, 
PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='10d', upda [...]
 
 
 drop table 05_alter_table_t1;
diff --git a/integration_tests/cases/env/cluster/ddl/create_tables.result 
b/integration_tests/cases/env/cluster/ddl/create_tables.result
index cfd536d5..5fb213e8 100644
--- a/integration_tests/cases/env/cluster/ddl/create_tables.result
+++ b/integration_tests/cases/env/cluster/ddl/create_tables.result
@@ -114,7 +114,7 @@ 
String("a"),String("int"),Boolean(false),Boolean(true),Boolean(false),Boolean(fa
 show create table `05_create_tables_t4`;
 
 Table,Create Table,
-String("05_create_tables_t4"),String("CREATE TABLE `05_create_tables_t4` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='33554432')"),
+String("05_create_tables_t4"),String("CREATE TABLE `05_create_tables_t4` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', [...]
 
 
 -- TIMESTAMP KEY
@@ -133,7 +133,7 @@ 
String("c1"),String("int"),Boolean(false),Boolean(true),Boolean(false),Boolean(f
 show create table `05_create_tables_t5`;
 
 Table,Create Table,
-String("05_create_tables_t5"),String("CREATE TABLE `05_create_tables_t5` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='33554432')"),
+String("05_create_tables_t5"),String("CREATE TABLE `05_create_tables_t5` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE' [...]
 
 
 -- Multiple TIMESTAMP KEYs
@@ -157,7 +157,7 @@ 
String("c1"),String("int"),Boolean(false),Boolean(true),Boolean(false),Boolean(f
 show create table `05_create_tables_t7`;
 
 Table,Create Table,
-String("05_create_tables_t7"),String("CREATE TABLE `05_create_tables_t7` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int COMMENT 'id', PRIMARY 
KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='33554432')"),
+String("05_create_tables_t7"),String("CREATE TABLE `05_create_tables_t7` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int COMMENT 'id', PRIMARY 
KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mod [...]
 
 
 -- StorageFormat
@@ -168,7 +168,7 @@ affected_rows: 0
 show create table `05_create_tables_t8`;
 
 Table,Create Table,
-String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRI [...]
 
 
 drop table `05_create_tables_t8`;
@@ -182,7 +182,7 @@ affected_rows: 0
 show create table `05_create_tables_t8`;
 
 Table,Create Table,
-String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl='7d', update_mode='OVE [...]
 
 
 drop table `05_create_tables_t8`;
@@ -196,7 +196,7 @@ affected_rows: 0
 show create table `05_create_tables_t9`;
 
 Table,Create Table,
-String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl [...]
 
 
 drop table `05_create_tables_t9`;
@@ -210,7 +210,7 @@ affected_rows: 0
 show create table `05_create_tables_t9`;
 
 Table,Create Table,
-String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d [...]
 
 
 drop table `05_create_tables_t9`;
@@ -232,7 +232,7 @@ affected_rows: 0
 show create table `05_create_tables_t9`;
 
 Table,Create Table,
-String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `c2` bigint DEFAULT 
0, `c3` uint32 DEFAULT 1 + 1, `c4` string DEFAULT 'xxx', `c5` uint32 DEFAULT c3 
* 2 + 1, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_form [...]
+String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `c2` bigint DEFAULT 
0, `c3` uint32 DEFAULT 1 + 1, `c4` string DEFAULT 'xxx', `c5` uint32 DEFAULT c3 
* 2 + 1, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplis [...]
 
 
 drop table `05_create_tables_t9`;
@@ -247,7 +247,7 @@ affected_rows: 0
 show create table `05_create_tables_t10`;
 
 Table,Create Table,
-String("05_create_tables_t10"),String("CREATE TABLE `05_create_tables_t10` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t10"),String("CREATE TABLE `05_create_tables_t10` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERW [...]
 
 
 drop table `05_create_tables_t10`;
@@ -262,7 +262,7 @@ affected_rows: 0
 show create table `05_create_tables_t11`;
 
 Table,Create Table,
-String("05_create_tables_t11"),String("CREATE TABLE `05_create_tables_t11` 
(`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY 
KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t11"),String("CREATE TABLE `05_create_tables_t11` 
(`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY 
KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERW [...]
 
 
 drop table `05_create_tables_t11`;
@@ -276,7 +276,7 @@ affected_rows: 0
 show create table `05_create_tables_t12`;
 
 Table,Create Table,
-String("05_create_tables_t12"),String("CREATE TABLE `05_create_tables_t12` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int NOT NULL, PRIMARY 
KEY(tsid,t1,c1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t12"),String("CREATE TABLE `05_create_tables_t12` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int NOT NULL, PRIMARY 
KEY(tsid,t1,c1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update [...]
 
 
 drop table `05_create_tables_t12`;
diff --git a/integration_tests/cases/env/cluster/ddl/partition_table.result 
b/integration_tests/cases/env/cluster/ddl/partition_table.result
index 851f5a82..87f0708a 100644
--- a/integration_tests/cases/env/cluster/ddl/partition_table.result
+++ b/integration_tests/cases/env/cluster/ddl/partition_table.result
@@ -33,7 +33,7 @@ affected_rows: 0
 SHOW CREATE TABLE partition_table_t;
 
 Table,Create Table,
-String("partition_table_t"),String("CREATE TABLE `partition_table_t` (`tsid` 
uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int TAG, 
`value` double NOT NULL, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) PARTITION BY 
KEY(name) PARTITIONS 4 ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='false', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mod [...]
+String("partition_table_t"),String("CREATE TABLE `partition_table_t` (`tsid` 
uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int TAG, 
`value` double NOT NULL, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) PARTITION BY 
KEY(name) PARTITIONS 4 ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='false', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='819 [...]
 
 
 INSERT INTO partition_table_t (t, name, value)
@@ -138,25 +138,25 @@ affected_rows: 0
 SHOW CREATE TABLE __partition_table_t_0;
 
 Table,Create Table,
-String("__partition_table_t_0"),String("CREATE TABLE `__partition_table_t_0` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='2h', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE',  [...]
+String("__partition_table_t_0"),String("CREATE TABLE `__partition_table_t_0` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_durat [...]
 
 
 SHOW CREATE TABLE __partition_table_t_1;
 
 Table,Create Table,
-String("__partition_table_t_1"),String("CREATE TABLE `__partition_table_t_1` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='2h', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE',  [...]
+String("__partition_table_t_1"),String("CREATE TABLE `__partition_table_t_1` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_durat [...]
 
 
 SHOW CREATE TABLE __partition_table_t_2;
 
 Table,Create Table,
-String("__partition_table_t_2"),String("CREATE TABLE `__partition_table_t_2` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='2h', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE',  [...]
+String("__partition_table_t_2"),String("CREATE TABLE `__partition_table_t_2` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_durat [...]
 
 
 SHOW CREATE TABLE __partition_table_t_3;
 
 Table,Create Table,
-String("__partition_table_t_3"),String("CREATE TABLE `__partition_table_t_3` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='2h', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE',  [...]
+String("__partition_table_t_3"),String("CREATE TABLE `__partition_table_t_3` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `name` string TAG, `id` int 
TAG, `value` double NOT NULL, `b` string, PRIMARY KEY(tsid,t), TIMESTAMP 
KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_durat [...]
 
 
 DROP TABLE IF EXISTS `partition_table_t`;
@@ -184,7 +184,7 @@ affected_rows: 0
 SHOW CREATE TABLE random_partition_table_t;
 
 Table,Create Table,
-String("random_partition_table_t"),String("CREATE TABLE 
`random_partition_table_t` (`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, 
`name` string TAG, `id` int TAG, `value` double NOT NULL, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) PARTITION BY RANDOM PARTITIONS 4 ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', [...]
+String("random_partition_table_t"),String("CREATE TABLE 
`random_partition_table_t` (`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, 
`name` string TAG, `id` int TAG, `value` double NOT NULL, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) PARTITION BY RANDOM PARTITIONS 4 ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row [...]
 
 
 INSERT INTO random_partition_table_t (t, name, value)
diff --git a/integration_tests/cases/env/local/ddl/alter_table.result 
b/integration_tests/cases/env/local/ddl/alter_table.result
index 8f2db031..fda3c80a 100644
--- a/integration_tests/cases/env/local/ddl/alter_table.result
+++ b/integration_tests/cases/env/local/ddl/alter_table.result
@@ -20,7 +20,7 @@ DROP TABLE IF EXISTS `05_alter_table_t0`;
 
 affected_rows: 0
 
-CREATE TABLE `05_alter_table_t0`(a int, t timestamp NOT NULL, dic string 
dictionary, TIMESTAMP KEY(t)) ENGINE = Analytic with (enable_ttl='false');
+CREATE TABLE `05_alter_table_t0`(a int, t timestamp NOT NULL, dic string 
dictionary, TIMESTAMP KEY(t)) ENGINE = Analytic with (enable_ttl='false', 
update_mode='OVERWRITE');
 
 affected_rows: 0
 
@@ -117,6 +117,16 @@ 
UInt64(0),Timestamp(2),Int32(2),String("d11"),String("2"),String("d22"),
 UInt64(0),Timestamp(3),Int32(3),String("d22"),String("3"),String("d33"),
 
 
+-- try to enable layered memtable with invalid 0 mutable switch threshold
+ALTER TABLE `05_alter_table_t0` MODIFY SETTING 
layered_enable='true',layered_mutable_switch_threshold='0';
+
+Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute alter table, err:Failed to alter table options, 
err:Failed to alter options, table:05_alter_table_t0, err:Found invalid table 
options, reason:layered memtable is enabled but mutable_switch_threshold is 0, 
layered_memtable_opts:LayeredMemtableOptions { enable: true, 
mutable_segment_switch_threshold: ReadableSize(0)  [...]
+
+-- try to enable layered memtable for overwrite mode table
+ALTER TABLE `05_alter_table_t0` MODIFY SETTING 
layered_enable='true',layered_mutable_switch_threshold='3MB';
+
+Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute alter table, err:Failed to alter table options, 
err:Failed to alter options, table:05_alter_table_t0, err:Found invalid table 
options, reason:layered memtable is enabled for table needing dedup, 
layered_memtable_opts:LayeredMemtableOptions { enable: true, 
mutable_segment_switch_threshold: ReadableSize(3145728) }, u [...]
+
 DROP TABLE `05_alter_table_t0`;
 
 affected_rows: 0
diff --git a/integration_tests/cases/env/local/ddl/alter_table.sql 
b/integration_tests/cases/env/local/ddl/alter_table.sql
index d686d856..4ecdbe5e 100644
--- a/integration_tests/cases/env/local/ddl/alter_table.sql
+++ b/integration_tests/cases/env/local/ddl/alter_table.sql
@@ -19,7 +19,7 @@
 
 DROP TABLE IF EXISTS `05_alter_table_t0`;
 
-CREATE TABLE `05_alter_table_t0`(a int, t timestamp NOT NULL, dic string 
dictionary, TIMESTAMP KEY(t)) ENGINE = Analytic with (enable_ttl='false');
+CREATE TABLE `05_alter_table_t0`(a int, t timestamp NOT NULL, dic string 
dictionary, TIMESTAMP KEY(t)) ENGINE = Analytic with (enable_ttl='false', 
update_mode='OVERWRITE');
 INSERT INTO TABLE `05_alter_table_t0`(a, t, dic) values(1, 1 , "d1");
 SELECT * FROM `05_alter_table_t0`;
 
@@ -45,4 +45,10 @@ ALTER TABLE `05_alter_table_t0` DROP COLUMN b;
 DESCRIBE TABLE `05_alter_table_t0`;
 SELECT * FROM `05_alter_table_t0`;
 
+-- try to enable layered memtable with invalid 0 mutable switch threshold
+ALTER TABLE `05_alter_table_t0` MODIFY SETTING 
layered_enable='true',layered_mutable_switch_threshold='0';
+
+-- try to enable layered memtable for overwrite mode table
+ALTER TABLE `05_alter_table_t0` MODIFY SETTING 
layered_enable='true',layered_mutable_switch_threshold='3MB';
+
 DROP TABLE `05_alter_table_t0`;
diff --git a/integration_tests/cases/env/local/ddl/create_tables.result 
b/integration_tests/cases/env/local/ddl/create_tables.result
index ffa0a628..0f8e0bc8 100644
--- a/integration_tests/cases/env/local/ddl/create_tables.result
+++ b/integration_tests/cases/env/local/ddl/create_tables.result
@@ -52,6 +52,26 @@ DROP TABLE IF EXISTS `05_create_tables_t9`;
 
 affected_rows: 0
 
+DROP TABLE IF EXISTS `05_create_tables_t10`;
+
+affected_rows: 0
+
+DROP TABLE IF EXISTS `05_create_tables_t11`;
+
+affected_rows: 0
+
+DROP TABLE IF EXISTS `05_timestamp_not_in_primary_key`;
+
+affected_rows: 0
+
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_append`;
+
+affected_rows: 0
+
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_overwrite`;
+
+affected_rows: 0
+
 -- no TIMESTAMP column
 CREATE TABLE `05_create_tables_t`(c1 int) ENGINE = Analytic;
 
@@ -68,7 +88,7 @@ affected_rows: 0
 -- table already exist
 CREATE TABLE `05_create_tables_t`(c1 int, t timestamp NOT NULL, TIMESTAMP 
KEY(t)) ENGINE = Analytic;
 
-Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_create_tables_t [...]
+Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_create_tables_t [...]
 
 create table `05_create_tables_t2`(a int, b int, t timestamp NOT NULL, 
TIMESTAMP KEY(t)) ENGINE = Analytic with (enable_ttl='false');
 
@@ -88,12 +108,12 @@ Int32(4),
 -- table already exist
 create table `05_create_tables_t2`(a int,b int, t timestamp NOT NULL, 
TIMESTAMP KEY(t)) ENGINE = Analytic;
 
-Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_create_tables_t [...]
+Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_create_tables_t [...]
 
 -- table already exist
 create table `05_create_tables_t2`(a int,b int, t timestamp NOT NULL, 
TIMESTAMP KEY(t)) ENGINE = Analytic;
 
-Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_create_tables_t [...]
+Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_create_tables_t [...]
 
 create table `05_create_tables_t3`(a int,b int, t timestamp NOT NULL, 
TIMESTAMP KEY(t)) ENGINE = Analytic;
 
@@ -114,7 +134,7 @@ 
String("a"),String("int"),Boolean(false),Boolean(true),Boolean(false),Boolean(fa
 show create table `05_create_tables_t4`;
 
 Table,Create Table,
-String("05_create_tables_t4"),String("CREATE TABLE `05_create_tables_t4` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='33554432')"),
+String("05_create_tables_t4"),String("CREATE TABLE `05_create_tables_t4` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `a` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', [...]
 
 
 -- TIMESTAMP KEY
@@ -133,7 +153,7 @@ 
String("c1"),String("int"),Boolean(false),Boolean(true),Boolean(false),Boolean(f
 show create table `05_create_tables_t5`;
 
 Table,Create Table,
-String("05_create_tables_t5"),String("CREATE TABLE `05_create_tables_t5` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='33554432')"),
+String("05_create_tables_t5"),String("CREATE TABLE `05_create_tables_t5` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int, PRIMARY KEY(tsid,t), 
TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE' [...]
 
 
 -- Multiple TIMESTAMP KEYs
@@ -157,7 +177,7 @@ 
String("c1"),String("int"),Boolean(false),Boolean(true),Boolean(false),Boolean(f
 show create table `05_create_tables_t7`;
 
 Table,Create Table,
-String("05_create_tables_t7"),String("CREATE TABLE `05_create_tables_t7` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int COMMENT 'id', PRIMARY 
KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mode='OVERWRITE', 
write_buffer_size='33554432')"),
+String("05_create_tables_t7"),String("CREATE TABLE `05_create_tables_t7` 
(`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, `c1` int COMMENT 'id', PRIMARY 
KEY(tsid,t), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', 
compaction_strategy='default', compression='ZSTD', enable_ttl='true', 
layered_enable='false', layered_mutable_switch_threshold='3145728', 
memtable_type='skiplist', num_rows_per_row_group='8192', segment_duration='', 
storage_format='AUTO', ttl='7d', update_mod [...]
 
 
 -- StorageFormat
@@ -168,7 +188,7 @@ affected_rows: 0
 show create table `05_create_tables_t8`;
 
 Table,Create Table,
-String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRI [...]
 
 
 drop table `05_create_tables_t8`;
@@ -182,7 +202,7 @@ affected_rows: 0
 show create table `05_create_tables_t8`;
 
 Table,Create Table,
-String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t8"),String("CREATE TABLE `05_create_tables_t8` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl='7d', update_mode='OVE [...]
 
 
 drop table `05_create_tables_t8`;
@@ -196,7 +216,7 @@ affected_rows: 0
 show create table `05_create_tables_t9`;
 
 Table,Create Table,
-String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', 
ttl [...]
 
 
 drop table `05_create_tables_t9`;
@@ -210,7 +230,7 @@ affected_rows: 0
 show create table `05_create_tables_t9`;
 
 Table,Create Table,
-String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `d` string 
DICTIONARY, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d [...]
 
 
 drop table `05_create_tables_t9`;
@@ -232,7 +252,7 @@ affected_rows: 0
 show create table `05_create_tables_t9`;
 
 Table,Create Table,
-String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `c2` bigint DEFAULT 
0, `c3` uint32 DEFAULT 1 + 1, `c4` string DEFAULT 'xxx', `c5` uint32 DEFAULT c3 
* 2 + 1, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_form [...]
+String("05_create_tables_t9"),String("CREATE TABLE `05_create_tables_t9` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, `c2` bigint DEFAULT 
0, `c3` uint32 DEFAULT 1 + 1, `c4` string DEFAULT 'xxx', `c5` uint32 DEFAULT c3 
* 2 + 1, PRIMARY KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplis [...]
 
 
 drop table `05_create_tables_t9`;
@@ -247,7 +267,7 @@ affected_rows: 0
 show create table `05_create_tables_t10`;
 
 Table,Create Table,
-String("05_create_tables_t10"),String("CREATE TABLE `05_create_tables_t10` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t10"),String("CREATE TABLE `05_create_tables_t10` 
(`tsid` uint64 NOT NULL, `t1` timestamp NOT NULL, `c1` int, PRIMARY 
KEY(tsid,t1), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERW [...]
 
 
 drop table `05_create_tables_t10`;
@@ -262,7 +282,7 @@ affected_rows: 0
 show create table `05_create_tables_t11`;
 
 Table,Create Table,
-String("05_create_tables_t11"),String("CREATE TABLE `05_create_tables_t11` 
(`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY 
KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')"),
+String("05_create_tables_t11"),String("CREATE TABLE `05_create_tables_t11` 
(`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY 
KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic 
WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='true', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO', 
ttl='7d', update_mode='OVERW [...]
 
 
 drop table `05_create_tables_t11`;
@@ -274,6 +294,16 @@ CREATE TABLE `05_timestamp_not_in_primary_key`(c1 int NOT 
NULL, t timestamp NOT
 
 Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
create plan. Caused by: Failed to create plan, err:Failed to build schema, 
err:Timestamp not in primary key. sql:CREATE TABLE 
`05_timestamp_not_in_primary_key`(c1 int NOT NULL, t timestamp NOT NULL, 
TIMESTAMP KEY(t), PRIMARY KEY(c1)) ENGINE = Analytic;" })
 
+-- Valid, try to create append mode table with invalid layered memtable 
enabling
+CREATE TABLE `05_enable_layered_memtable_for_append`(c1 int NOT NULL, t 
timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic with 
(layered_enable='true', layered_mutable_switch_threshold='3MB', 
update_mode='APPEND');
+
+affected_rows: 0
+
+-- Invalid, try to create overwrite mode table with invalid layered memtable 
enabling
+CREATE TABLE `05_enable_layered_memtable_for_overwrite`(c1 int NOT NULL, t 
timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic with 
(layered_enable='true', layered_mutable_switch_threshold='3MB', 
update_mode='OVERWRITE');
+
+Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to 
execute plan. Caused by: Internal error, msg:Failed to execute interpreter, 
err:Failed to execute create table, err:Failed to create table by table 
manipulator, err:Failed to operate table, err:Failed to operate table, 
msg:Some(\"failed to create table on shard, request:CreateTableRequest { 
params: CreateTableParams { catalog_name: \\\"horaedb\\\", schema_name: 
\\\"public\\\", table_name: \\\"05_enable_layered_ [...]
+
 DROP TABLE IF EXISTS `05_create_tables_t`;
 
 affected_rows: 0
@@ -318,7 +348,15 @@ DROP TABLE IF EXISTS `05_create_tables_t11`;
 
 affected_rows: 0
 
-DROP TABLE IF EXISTS `05_create_tables_t12`;
+DROP TABLE IF EXISTS `05_timestamp_not_in_primary_key`;
+
+affected_rows: 0
+
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_append`;
+
+affected_rows: 0
+
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_overwrite`;
 
 affected_rows: 0
 
diff --git a/integration_tests/cases/env/local/ddl/create_tables.sql 
b/integration_tests/cases/env/local/ddl/create_tables.sql
index 405ec3c5..56936505 100644
--- a/integration_tests/cases/env/local/ddl/create_tables.sql
+++ b/integration_tests/cases/env/local/ddl/create_tables.sql
@@ -26,6 +26,11 @@ DROP TABLE IF EXISTS `05_create_tables_t6`;
 DROP TABLE IF EXISTS `05_create_tables_t7`;
 DROP TABLE IF EXISTS `05_create_tables_t8`;
 DROP TABLE IF EXISTS `05_create_tables_t9`;
+DROP TABLE IF EXISTS `05_create_tables_t10`;
+DROP TABLE IF EXISTS `05_create_tables_t11`;
+DROP TABLE IF EXISTS `05_timestamp_not_in_primary_key`;
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_append`;
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_overwrite`;
 
 -- no TIMESTAMP column
 CREATE TABLE `05_create_tables_t`(c1 int) ENGINE = Analytic;
@@ -106,6 +111,12 @@ drop table `05_create_tables_t11`;
 -- Timestamp not in primary key
 CREATE TABLE `05_timestamp_not_in_primary_key`(c1 int NOT NULL, t timestamp 
NOT NULL, TIMESTAMP KEY(t), PRIMARY KEY(c1)) ENGINE = Analytic;
 
+-- Valid, try to create append mode table with invalid layered memtable 
enabling
+CREATE TABLE `05_enable_layered_memtable_for_append`(c1 int NOT NULL, t 
timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic with 
(layered_enable='true', layered_mutable_switch_threshold='3MB', 
update_mode='APPEND');
+
+-- Invalid, try to create overwrite mode table with invalid layered memtable 
enabling
+CREATE TABLE `05_enable_layered_memtable_for_overwrite`(c1 int NOT NULL, t 
timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic with 
(layered_enable='true', layered_mutable_switch_threshold='3MB', 
update_mode='OVERWRITE');
+
 DROP TABLE IF EXISTS `05_create_tables_t`;
 DROP TABLE IF EXISTS `05_create_tables_t2`;
 DROP TABLE IF EXISTS `05_create_tables_t3`;
@@ -117,4 +128,6 @@ DROP TABLE IF EXISTS `05_create_tables_t8`;
 DROP TABLE IF EXISTS `05_create_tables_t9`;
 DROP TABLE IF EXISTS `05_create_tables_t10`;
 DROP TABLE IF EXISTS `05_create_tables_t11`;
-DROP TABLE IF EXISTS `05_create_tables_t12`;
+DROP TABLE IF EXISTS `05_timestamp_not_in_primary_key`;
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_append`;
+DROP TABLE IF EXISTS `05_enable_layered_memtable_for_overwrite`;
diff --git a/integration_tests/cases/env/local/ddl/sampling-primary-key.result 
b/integration_tests/cases/env/local/ddl/sampling-primary-key.result
index 8ef0dc08..2a804058 100644
--- a/integration_tests/cases/env/local/ddl/sampling-primary-key.result
+++ b/integration_tests/cases/env/local/ddl/sampling-primary-key.result
@@ -38,7 +38,7 @@ affected_rows: 0
 show create table `sampling_primary_key_table`;
 
 Table,Create Table,
-String("sampling_primary_key_table"),String("CREATE TABLE 
`sampling_primary_key_table` (`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, 
`v1` double, `v2` double, `v3` double, `v5` double, `name` string TAG, 
`myVALUE` bigint NOT NULL, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) 
ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='', storage_format='AUTO' [...]
+String("sampling_primary_key_table"),String("CREATE TABLE 
`sampling_primary_key_table` (`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, 
`v1` double, `v2` double, `v3` double, `v5` double, `name` string TAG, 
`myVALUE` bigint NOT NULL, PRIMARY KEY(tsid,t), TIMESTAMP KEY(t)) 
ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='skiplist', num_r 
[...]
 
 
 INSERT INTO `sampling_primary_key_table` (t, name, myVALUE)
@@ -64,7 +64,7 @@ 
UInt64(14649097417416496686),Timestamp(1695348000005),Double(0.0),Double(0.0),Do
 show create table `sampling_primary_key_table`;
 
 Table,Create Table,
-String("sampling_primary_key_table"),String("CREATE TABLE 
`sampling_primary_key_table` (`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, 
`v1` double, `v2` double, `v3` double, `v5` double, `name` string TAG, 
`myVALUE` bigint NOT NULL, PRIMARY KEY(myVALUE,name,tsid,t), TIMESTAMP KEY(t)) 
ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', memtable_type='skiplist', 
num_rows_per_row_group='8192', segment_duration='2h', storag [...]
+String("sampling_primary_key_table"),String("CREATE TABLE 
`sampling_primary_key_table` (`tsid` uint64 NOT NULL, `t` timestamp NOT NULL, 
`v1` double, `v2` double, `v3` double, `v5` double, `name` string TAG, 
`myVALUE` bigint NOT NULL, PRIMARY KEY(myVALUE,name,tsid,t), TIMESTAMP KEY(t)) 
ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', 
compression='ZSTD', enable_ttl='false', layered_enable='false', 
layered_mutable_switch_threshold='3145728', memtable_type='ski [...]
 
 
 select * from `sampling_primary_key_table`;
diff --git a/src/analytic_engine/src/instance/alter.rs 
b/src/analytic_engine/src/instance/alter.rs
index 28d17977..508ac49a 100644
--- a/src/analytic_engine/src/instance/alter.rs
+++ b/src/analytic_engine/src/instance/alter.rs
@@ -30,7 +30,7 @@ use crate::{
         self,
         engine::{
             AlterDroppedTable, EncodePayloads, FlushTable, InvalidOptions, 
InvalidPreVersion,
-            InvalidSchemaVersion, Result, WriteManifest, WriteWal,
+            InvalidSchemaVersion, InvalidTableOptions, Result, WriteManifest, 
WriteWal,
         },
         flush_compaction::TableFlushOptions,
         serial_executor::TableOpSerialExecutor,
@@ -239,6 +239,12 @@ impl<'a> Alterer<'a> {
             opts.sanitize();
             opts
         };
+
+        // We should check the options before altering
+        if let Some(reason) = table_opts.check_validity() {
+            return InvalidTableOptions { reason }.fail();
+        }
+
         let manifest_update = AlterOptionsMeta {
             space_id: self.table_data.space_id,
             table_id: self.table_data.id,
diff --git a/src/analytic_engine/src/instance/create.rs 
b/src/analytic_engine/src/instance/create.rs
index bf47aeb9..232459a4 100644
--- a/src/analytic_engine/src/instance/create.rs
+++ b/src/analytic_engine/src/instance/create.rs
@@ -28,7 +28,7 @@ use table_engine::{
 use crate::{
     instance::{
         engine::{
-            CreateOpenFailedTable, InvalidOptions, Result, TableNotExist,
+            CreateOpenFailedTable, InvalidOptions, InvalidTableOptions, 
Result, TableNotExist,
             TryCreateRandomPartitionTableInOverwriteMode, WriteManifest,
         },
         Instance,
@@ -49,6 +49,10 @@ impl Instance {
                     table: &params.table_name,
                 })?;
 
+        if let Some(reason) = table_opts.check_validity() {
+            return InvalidTableOptions { reason }.fail();
+        }
+
         if let Some(partition_info) = &params.partition_info {
             let dedup_on_random_partition =
                 table_opts.need_dedup() && matches!(partition_info, 
PartitionInfo::Random(_));
diff --git a/src/analytic_engine/src/instance/engine.rs 
b/src/analytic_engine/src/instance/engine.rs
index abea72a8..8c29ab1c 100644
--- a/src/analytic_engine/src/instance/engine.rs
+++ b/src/analytic_engine/src/instance/engine.rs
@@ -243,6 +243,12 @@ pub enum Error {
     ))]
     TryCreateRandomPartitionTableInOverwriteMode { table: String, backtrace: 
Backtrace },
 
+    #[snafu(display("Found invalid table options, 
reason:{reason}.\nBacktrace:\n{backtrace}",))]
+    InvalidTableOptions {
+        reason: String,
+        backtrace: Backtrace,
+    },
+
     #[snafu(display(
         "Failed to purge wal, wal_location:{:?}, sequence:{}",
         wal_location,
@@ -290,7 +296,8 @@ impl From<Error> for table_engine::engine::Error {
             | Error::OpenTablesOfShard { .. }
             | Error::ReplayWalNoCause { .. }
             | Error::PurgeWal { .. }
-            | Error::ReplayWalWithCause { .. } => Self::Unexpected {
+            | Error::ReplayWalWithCause { .. }
+            | Error::InvalidTableOptions { .. } => Self::Unexpected {
                 source: Box::new(err),
             },
         }
diff --git a/src/analytic_engine/src/lib.rs b/src/analytic_engine/src/lib.rs
index c1308d88..e6fa2f85 100644
--- a/src/analytic_engine/src/lib.rs
+++ b/src/analytic_engine/src/lib.rs
@@ -89,10 +89,6 @@ pub struct Config {
     /// should be in the range (0, 1].
     pub preflush_write_buffer_size_ratio: f32,
 
-    /// The threshold to trigger switching mutable segment of memtable.
-    /// If it is zero, disable the layered memtable.
-    pub mutable_segment_switch_threshold: ReadableSize,
-
     pub enable_primary_key_sampling: bool,
 
     // Iterator scanning options
@@ -210,7 +206,6 @@ impl Default for Config {
             remote_engine_client: 
remote_engine_client::config::Config::default(),
             recover_mode: RecoverMode::TableBased,
             metrics: MetricsOptions::default(),
-            mutable_segment_switch_threshold: ReadableSize::mb(3),
         }
     }
 }
diff --git a/src/analytic_engine/src/memtable/mod.rs 
b/src/analytic_engine/src/memtable/mod.rs
index f53bff14..cdb216a4 100644
--- a/src/analytic_engine/src/memtable/mod.rs
+++ b/src/analytic_engine/src/memtable/mod.rs
@@ -26,9 +26,8 @@ mod reversed_iter;
 pub mod skiplist;
 pub mod test_util;
 
-use std::{collections::HashMap, ops::Bound, sync::Arc, time::Instant};
+use std::{ops::Bound, sync::Arc, time::Instant};
 
-use anyhow::Context;
 use bytes_ext::{ByteVec, Bytes};
 use common_types::{
     projected_schema::RowProjectorBuilder,
@@ -36,7 +35,7 @@ use common_types::{
     row::Row,
     schema::{IndexInWriterSchema, Schema},
     time::TimeRange,
-    SequenceNumber, MUTABLE_SEGMENT_SWITCH_THRESHOLD,
+    SequenceNumber,
 };
 pub use error::Error;
 use horaedbproto::manifest;
@@ -82,35 +81,45 @@ impl ToString for MemtableType {
 #[derive(Debug, Clone, Deserialize, PartialEq, Serialize)]
 #[serde(default)]
 pub struct LayeredMemtableOptions {
+    pub enable: bool,
     pub mutable_segment_switch_threshold: ReadableSize,
 }
 
+impl LayeredMemtableOptions {
+    #[inline]
+    pub fn enable_layered_memtable(&self) -> bool {
+        self.enable && self.mutable_segment_switch_threshold.0 > 0
+    }
+}
+
 impl Default for LayeredMemtableOptions {
     fn default() -> Self {
         Self {
+            enable: false,
             mutable_segment_switch_threshold: ReadableSize::mb(3),
         }
     }
 }
 
-impl LayeredMemtableOptions {
-    pub fn parse_from(opts: &HashMap<String, String>) -> Result<Self> {
-        let mut options = LayeredMemtableOptions::default();
-        if let Some(v) = opts.get(MUTABLE_SEGMENT_SWITCH_THRESHOLD) {
-            let threshold = v
-                .parse::<u64>()
-                .with_context(|| format!("invalid mutable segment switch 
threshold:{v}"))?;
-            options.mutable_segment_switch_threshold = ReadableSize(threshold);
-        }
-
-        Ok(options)
-    }
-}
-
 impl From<manifest::LayeredMemtableOptions> for LayeredMemtableOptions {
     fn from(value: manifest::LayeredMemtableOptions) -> Self {
+        // For compatibility here.
+        // Layered memtable is enabled default in former,
+        // so some horaedb service is running with layered memtable in 
production
+        // and we shouldn't make difference to such exist running services
+        // after switching to control layered memtable's on/off with the new 
added
+        // `enable` field in manifest(that says `enable` should assume to true 
when not
+        // exist).
+        // However, pb version used now don't support to define default value
+        // explicitly, and default value of bool is always false...
+        // So we use `disable` rather than `enable` in pb to reach it
+        // (disable: false --> enable: true).
+        let enable = !value.disable;
+        let mutable_segment_switch_threshold = 
ReadableSize(value.mutable_segment_switch_threshold);
+
         Self {
-            mutable_segment_switch_threshold: 
ReadableSize(value.mutable_segment_switch_threshold),
+            enable,
+            mutable_segment_switch_threshold,
         }
     }
 }
@@ -119,6 +128,7 @@ impl From<LayeredMemtableOptions> for 
manifest::LayeredMemtableOptions {
     fn from(value: LayeredMemtableOptions) -> Self {
         Self {
             mutable_segment_switch_threshold: 
value.mutable_segment_switch_threshold.0,
+            disable: !value.enable,
         }
     }
 }
diff --git a/src/analytic_engine/src/table/data.rs 
b/src/analytic_engine/src/table/data.rs
index c9ba1662..99e59cba 100644
--- a/src/analytic_engine/src/table/data.rs
+++ b/src/analytic_engine/src/table/data.rs
@@ -44,7 +44,7 @@ use id_allocator::IdAllocator;
 use logger::{debug, info};
 use macros::define_result;
 use object_store::Path;
-use snafu::{Backtrace, OptionExt, ResultExt, Snafu};
+use snafu::{ensure, Backtrace, OptionExt, ResultExt, Snafu};
 use table_engine::table::{SchemaId, TableId};
 use time_ext::ReadableDuration;
 
@@ -95,6 +95,9 @@ pub enum Error {
 
     #[snafu(display("Failed to alloc file id, err:{}", source))]
     AllocFileId { source: GenericError },
+
+    #[snafu(display("Found invalid table opts, 
msg:{msg}.\nBacktrace:\n{backtrace}"))]
+    InvalidTableOpts { msg: String, backtrace: Backtrace },
 }
 
 define_result!(Error);
@@ -323,13 +326,20 @@ impl TableData {
             MemtableType::Column => Arc::new(ColumnarMemTableFactory),
         };
 
-        // Wrap it by `LayeredMemtable`.
-        let mutable_segment_switch_threshold = opts
-            .layered_memtable_opts
-            .mutable_segment_switch_threshold
-            .0 as usize;
-        let enable_layered_memtable = mutable_segment_switch_threshold > 0;
+        let enable_layered_memtable = opts.layered_memtable_opts.enable;
         let memtable_factory = if enable_layered_memtable {
+            let mutable_segment_switch_threshold = opts
+                .layered_memtable_opts
+                .mutable_segment_switch_threshold
+                .0 as usize;
+
+            ensure!(
+                mutable_segment_switch_threshold > 0,
+                InvalidTableOpts {
+                    msg: "layered memtable is enabled but 
mutable_switch_threshold is 0",
+                }
+            );
+
             Arc::new(LayeredMemtableFactory::new(
                 memtable_factory,
                 mutable_segment_switch_threshold,
@@ -403,13 +413,21 @@ impl TableData {
             MemtableType::Column => Arc::new(ColumnarMemTableFactory),
         };
         // Maybe wrap it by `LayeredMemtable`.
-        let mutable_segment_switch_threshold = add_meta
-            .opts
-            .layered_memtable_opts
-            .mutable_segment_switch_threshold
-            .0 as usize;
-        let enable_layered_memtable = mutable_segment_switch_threshold > 0;
+        let enable_layered_memtable = 
add_meta.opts.layered_memtable_opts.enable;
         let memtable_factory = if enable_layered_memtable {
+            let mutable_segment_switch_threshold = add_meta
+                .opts
+                .layered_memtable_opts
+                .mutable_segment_switch_threshold
+                .0 as usize;
+
+            ensure!(
+                mutable_segment_switch_threshold > 0,
+                InvalidTableOpts {
+                    msg: "layered memtable is enabled but 
mutable_switch_threshold is 0",
+                }
+            );
+
             Arc::new(LayeredMemtableFactory::new(
                 memtable_factory,
                 mutable_segment_switch_threshold,
diff --git a/src/analytic_engine/src/table_options.rs 
b/src/analytic_engine/src/table_options.rs
index c5651618..4c1823ee 100644
--- a/src/analytic_engine/src/table_options.rs
+++ b/src/analytic_engine/src/table_options.rs
@@ -17,12 +17,12 @@
 
 //! Constants for table options.
 
-use std::{collections::HashMap, string::ToString, time::Duration};
+use std::{collections::HashMap, str::FromStr, string::ToString, 
time::Duration};
 
 use common_types::{
-    time::Timestamp, ARENA_BLOCK_SIZE, COMPACTION_STRATEGY, COMPRESSION, 
ENABLE_TTL, MEMTABLE_TYPE,
-    NUM_ROWS_PER_ROW_GROUP, OPTION_KEY_ENABLE_TTL, SEGMENT_DURATION, 
STORAGE_FORMAT, TTL,
-    UPDATE_MODE, WRITE_BUFFER_SIZE,
+    time::Timestamp, ARENA_BLOCK_SIZE, COMPACTION_STRATEGY, COMPRESSION, 
ENABLE_TTL,
+    LAYERED_ENABLE, LAYERED_MUTABLE_SWITCH_THRESHOLD, MEMTABLE_TYPE, 
NUM_ROWS_PER_ROW_GROUP,
+    OPTION_KEY_ENABLE_TTL, SEGMENT_DURATION, STORAGE_FORMAT, TTL, UPDATE_MODE, 
WRITE_BUFFER_SIZE,
 };
 use datafusion::parquet::basic::Compression as ParquetCompression;
 use horaedbproto::manifest as manifest_pb;
@@ -140,12 +140,9 @@ pub enum Error {
     HybridDeprecated { backtrace: Backtrace },
 
     #[snafu(display(
-        "Failed to parse layered memtable options, 
err:{source}.\nBacktrace:\n{backtrace}",
+        "Failed to parse layered memtable options, 
msg:{msg}.\nBacktrace:\n{backtrace}",
     ))]
-    ParseLayeredMemtableOptions {
-        source: crate::memtable::Error,
-        backtrace: Backtrace,
-    },
+    ParseLayeredMemtableOptions { msg: String, backtrace: Backtrace },
 
     #[snafu(display("Layered memtable options is 
missing.\nBacktrace:\n{backtrace}",))]
     MissingLayeredMemtableOptions { backtrace: Backtrace },
@@ -468,6 +465,17 @@ impl TableOptions {
                 self.storage_format_hint.to_string(),
             ),
             (MEMTABLE_TYPE.to_string(), self.memtable_type.to_string()),
+            (
+                LAYERED_ENABLE.to_string(),
+                self.layered_memtable_opts.enable.to_string(),
+            ),
+            (
+                LAYERED_MUTABLE_SWITCH_THRESHOLD.to_string(),
+                self.layered_memtable_opts
+                    .mutable_segment_switch_threshold
+                    .0
+                    .to_string(),
+            ),
         ]
         .into_iter()
         .collect();
@@ -476,6 +484,34 @@ impl TableOptions {
         m
     }
 
+    /// Check if the options are valid.
+    /// If invalid, Some(reason) will be returned.
+    /// If valid, None will be returned
+    pub fn check_validity(&self) -> Option<String> {
+        if self.layered_memtable_opts.enable
+            && self
+                .layered_memtable_opts
+                .mutable_segment_switch_threshold
+                .0
+                == 0
+        {
+            return Some(format!(
+                "layered memtable is enabled but mutable_switch_threshold is 
0, layered_memtable_opts:{:?}",
+                self.layered_memtable_opts,
+            ));
+        }
+
+        // layered memtable is not support in overwrite mode
+        if self.need_dedup() && self.layered_memtable_opts.enable {
+            return Some(format!(
+                "layered memtable is enabled for table needing dedup, 
layered_memtable_opts:{:?}, update_mode:{:?}",
+                self.layered_memtable_opts, self.update_mode,
+            ));
+        }
+
+        None
+    }
+
     /// Sanitize options silently.
     pub fn sanitize(&mut self) {
         let one_day_secs = BUCKET_DURATION_1D.as_secs();
@@ -686,6 +722,7 @@ impl TryFrom<manifest_pb::TableOptions> for TableOptions {
             Some(v) => v.into(),
             None => LayeredMemtableOptions {
                 mutable_segment_switch_threshold: ReadableSize(0),
+                enable: false,
             },
         };
 
@@ -791,10 +828,33 @@ fn merge_table_options(
     if let Some(v) = options.get(MEMTABLE_TYPE) {
         base_table_opts.memtable_type = MemtableType::parse_from(v);
     }
+    if let Some(v) = options.get(LAYERED_ENABLE) {
+        let enable = match v.parse::<bool>() {
+            Ok(v) => v,
+            Err(e) => {
+                return ParseLayeredMemtableOptions {
+                    msg: format!("invalid layered_enable setting, err:{e}"),
+                }
+                .fail()
+            }
+        };
+        base_table_opts.layered_memtable_opts.enable = enable;
+    }
+    if let Some(v) = options.get(LAYERED_MUTABLE_SWITCH_THRESHOLD) {
+        let threshold = match ReadableSize::from_str(v) {
+            Ok(v) => v,
+            Err(e) => {
+                return ParseLayeredMemtableOptions {
+                    msg: format!("invalid layered_mutable_switch_threshold 
setting, err:{e}"),
+                }
+                .fail()
+            }
+        };
 
-    let layered_memtable_opts =
-        
LayeredMemtableOptions::parse_from(options).context(ParseLayeredMemtableOptions)?;
-    base_table_opts.layered_memtable_opts = layered_memtable_opts;
+        base_table_opts
+            .layered_memtable_opts
+            .mutable_segment_switch_threshold = threshold;
+    }
 
     Ok(base_table_opts)
 }
diff --git a/src/common_types/src/lib.rs b/src/common_types/src/lib.rs
index a92f48d3..0b6cda17 100644
--- a/src/common_types/src/lib.rs
+++ b/src/common_types/src/lib.rs
@@ -53,7 +53,8 @@ pub const UPDATE_MODE: &str = "update_mode";
 pub const COMPRESSION: &str = "compression";
 pub const STORAGE_FORMAT: &str = "storage_format";
 pub const MEMTABLE_TYPE: &str = "memtable_type";
-pub const MUTABLE_SEGMENT_SWITCH_THRESHOLD: &str = 
"mutable_segment_switch_threshold";
+pub const LAYERED_MUTABLE_SWITCH_THRESHOLD: &str = 
"layered_mutable_switch_threshold";
+pub const LAYERED_ENABLE: &str = "layered_enable";
 
 #[cfg(any(test, feature = "test"))]
 pub mod tests;
diff --git a/src/system_catalog/src/sys_catalog_table.rs 
b/src/system_catalog/src/sys_catalog_table.rs
index fd77282a..d87c8ab8 100644
--- a/src/system_catalog/src/sys_catalog_table.rs
+++ b/src/system_catalog/src/sys_catalog_table.rs
@@ -314,7 +314,7 @@ impl SysCatalogTable {
         );
         // Disable layered memtable for system catalog table.
         options.insert(
-            common_types::MUTABLE_SEGMENT_SWITCH_THRESHOLD.to_string(),
+            common_types::LAYERED_MUTABLE_SWITCH_THRESHOLD.to_string(),
             0.to_string(),
         );
         let params = CreateTableParams {
diff --git a/src/table_engine/src/engine.rs b/src/table_engine/src/engine.rs
index 2a9ba800..a9ea1337 100644
--- a/src/table_engine/src/engine.rs
+++ b/src/table_engine/src/engine.rs
@@ -17,7 +17,8 @@
 
 //! Table factory trait
 
-use std::{collections::HashMap, sync::Arc};
+use core::fmt;
+use std::{collections::HashMap, fmt::Debug, sync::Arc};
 
 use async_trait::async_trait;
 use common_types::{
@@ -26,6 +27,7 @@ use common_types::{
 };
 use generic_error::{GenericError, GenericResult};
 use horaedbproto::sys_catalog as sys_catalog_pb;
+use itertools::Itertools;
 use macros::define_result;
 use runtime::{PriorityRuntime, RuntimeRef};
 use snafu::{ensure, Backtrace, Snafu};
@@ -159,7 +161,7 @@ pub enum TableRequestType {
 }
 
 /// The necessary params used to create table.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
 pub struct CreateTableParams {
     pub catalog_name: String,
     pub schema_name: String,
@@ -170,6 +172,30 @@ pub struct CreateTableParams {
     pub engine: String,
 }
 
+impl Debug for CreateTableParams {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let table_opts_formatter = TableOptionsFormatter(&self.table_options);
+        f.debug_struct("CreateTableParams")
+            .field("catalog_name", &self.catalog_name)
+            .field("schema_name", &self.schema_name)
+            .field("table_name", &self.table_name)
+            .field("table_options", &table_opts_formatter)
+            .field("table_schema", &self.table_schema)
+            .field("partition_info", &self.partition_info)
+            .field("engine", &self.engine)
+            .finish()
+    }
+}
+
+struct TableOptionsFormatter<'a>(&'a HashMap<String, String>);
+
+impl<'a> Debug for TableOptionsFormatter<'a> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let sorted_iter = self.0.iter().sorted();
+        f.debug_list().entries(sorted_iter).finish()
+    }
+}
+
 /// Create table request
 // TODO(yingwen): Add option for create_if_not_exists?
 #[derive(Clone, Debug)]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to